summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.am2
-rw-r--r--Makefile.in1069
-rw-r--r--configure.ac11
-rw-r--r--contrib/ffmpeg/COPYING504
-rw-r--r--contrib/ffmpeg/CREDITS47
-rw-r--r--contrib/ffmpeg/Changelog389
-rw-r--r--contrib/ffmpeg/Doxyfile1038
-rw-r--r--contrib/ffmpeg/INSTALL14
-rw-r--r--contrib/ffmpeg/MAINTAINERS243
-rw-r--r--contrib/ffmpeg/Makefile241
-rw-r--r--contrib/ffmpeg/README19
-rw-r--r--contrib/ffmpeg/berrno.h44
-rwxr-xr-xcontrib/ffmpeg/build_avopt9
-rwxr-xr-xcontrib/ffmpeg/clean-diff11
-rw-r--r--contrib/ffmpeg/cmdutils.c141
-rw-r--r--contrib/ffmpeg/cmdutils.h34
-rw-r--r--contrib/ffmpeg/common.mak100
-rwxr-xr-xcontrib/ffmpeg/configure2186
-rw-r--r--contrib/ffmpeg/cws2fws.c127
-rw-r--r--contrib/ffmpeg/doc/Makefile20
-rw-r--r--contrib/ffmpeg/doc/TODO82
-rw-r--r--contrib/ffmpeg/doc/faq.texi312
-rw-r--r--contrib/ffmpeg/doc/ffmpeg-doc.texi1607
-rw-r--r--contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt172
-rw-r--r--contrib/ffmpeg/doc/ffplay-doc.texi104
-rw-r--r--contrib/ffmpeg/doc/ffserver-doc.texi224
-rw-r--r--contrib/ffmpeg/doc/ffserver.conf349
-rw-r--r--contrib/ffmpeg/doc/hooks.texi113
-rw-r--r--contrib/ffmpeg/doc/optimization.txt158
-rw-r--r--contrib/ffmpeg/doc/soc.txt24
-rwxr-xr-xcontrib/ffmpeg/doc/texi2pod.pl427
-rw-r--r--contrib/ffmpeg/ffinstall.nsi75
-rw-r--r--contrib/ffmpeg/ffmpeg.c3973
-rw-r--r--contrib/ffmpeg/ffplay.c2488
-rw-r--r--contrib/ffmpeg/ffserver.c4602
-rw-r--r--contrib/ffmpeg/ffserver.h8
-rw-r--r--contrib/ffmpeg/libavcodec/4xm.c (renamed from src/libffmpeg/libavcodec/4xm.c)14
-rw-r--r--contrib/ffmpeg/libavcodec/8bps.c (renamed from src/libffmpeg/libavcodec/8bps.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/Makefile456
-rw-r--r--contrib/ffmpeg/libavcodec/a52dec.c257
-rw-r--r--contrib/ffmpeg/libavcodec/aasc.c (renamed from src/libffmpeg/libavcodec/aasc.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/ac3.h65
-rw-r--r--contrib/ffmpeg/libavcodec/ac3dec.c184
-rw-r--r--contrib/ffmpeg/libavcodec/ac3enc.c1557
-rw-r--r--contrib/ffmpeg/libavcodec/ac3tab.h205
-rw-r--r--contrib/ffmpeg/libavcodec/adpcm.c (renamed from src/libffmpeg/libavcodec/adpcm.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/adx.c (renamed from src/libffmpeg/libavcodec/adx.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/alac.c (renamed from src/libffmpeg/libavcodec/alac.c)16
-rw-r--r--contrib/ffmpeg/libavcodec/allcodecs.c289
-rw-r--r--contrib/ffmpeg/libavcodec/alpha/asm.h (renamed from src/libffmpeg/libavcodec/alpha/asm.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/alpha/dsputil_alpha.c (renamed from src/libffmpeg/libavcodec/alpha/dsputil_alpha.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/alpha/dsputil_alpha_asm.S (renamed from src/libffmpeg/libavcodec/alpha/dsputil_alpha_asm.S)10
-rw-r--r--contrib/ffmpeg/libavcodec/alpha/motion_est_alpha.c (renamed from src/libffmpeg/libavcodec/alpha/motion_est_alpha.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/alpha/motion_est_mvi_asm.S (renamed from src/libffmpeg/libavcodec/alpha/motion_est_mvi_asm.S)10
-rw-r--r--contrib/ffmpeg/libavcodec/alpha/mpegvideo_alpha.c (renamed from src/libffmpeg/libavcodec/alpha/mpegvideo_alpha.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/alpha/regdef.h (renamed from src/libffmpeg/libavcodec/alpha/regdef.h)21
-rw-r--r--contrib/ffmpeg/libavcodec/alpha/simple_idct_alpha.c (renamed from src/libffmpeg/libavcodec/alpha/simple_idct_alpha.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/amr.c715
-rw-r--r--contrib/ffmpeg/libavcodec/apiexample.c457
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/dsputil_arm.c (renamed from src/libffmpeg/libavcodec/armv4l/dsputil_arm.c)52
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/dsputil_arm_s.S696
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt.c188
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt_rnd.h1114
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/jrevdct_arm.S (renamed from src/libffmpeg/libavcodec/armv4l/jrevdct_arm.S)0
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/mathops.h49
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/mpegvideo_arm.c (renamed from src/libffmpeg/libavcodec/armv4l/mpegvideo_arm.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/mpegvideo_iwmmxt.c119
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/simple_idct_arm.S (renamed from src/libffmpeg/libavcodec/armv4l/simple_idct_arm.S)20
-rw-r--r--contrib/ffmpeg/libavcodec/armv4l/simple_idct_armv5te.S718
-rw-r--r--contrib/ffmpeg/libavcodec/asv1.c (renamed from src/libffmpeg/libavcodec/asv1.c)16
-rw-r--r--contrib/ffmpeg/libavcodec/audioconvert.c79
-rw-r--r--contrib/ffmpeg/libavcodec/avcodec.h (renamed from src/libffmpeg/libavcodec/avcodec.h)101
-rw-r--r--contrib/ffmpeg/libavcodec/avs.c (renamed from src/libffmpeg/libavcodec/avs.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/beosthread.c182
-rw-r--r--contrib/ffmpeg/libavcodec/bfin/dsputil_bfin.c55
-rw-r--r--[-rwxr-xr-x]contrib/ffmpeg/libavcodec/bitstream.c (renamed from src/libffmpeg/libavcodec/bitstream.c)51
-rw-r--r--contrib/ffmpeg/libavcodec/bitstream.h (renamed from src/libffmpeg/libavcodec/bitstream.h)161
-rw-r--r--contrib/ffmpeg/libavcodec/bitstream_filter.c284
-rw-r--r--contrib/ffmpeg/libavcodec/bmp.c254
-rw-r--r--contrib/ffmpeg/libavcodec/bytestream.h89
-rw-r--r--contrib/ffmpeg/libavcodec/cabac.c (renamed from src/libffmpeg/libavcodec/cabac.c)91
-rw-r--r--contrib/ffmpeg/libavcodec/cabac.h859
-rw-r--r--contrib/ffmpeg/libavcodec/cavs.c (renamed from src/libffmpeg/libavcodec/cavs.c)140
-rw-r--r--contrib/ffmpeg/libavcodec/cavsdata.h (renamed from src/libffmpeg/libavcodec/cavsdata.h)12
-rw-r--r--contrib/ffmpeg/libavcodec/cavsdsp.c546
-rw-r--r--contrib/ffmpeg/libavcodec/cinepak.c (renamed from src/libffmpeg/libavcodec/cinepak.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/cljr.c (renamed from src/libffmpeg/libavcodec/cljr.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/cook.c (renamed from src/libffmpeg/libavcodec/cook.c)11
-rw-r--r--contrib/ffmpeg/libavcodec/cookdata.h (renamed from src/libffmpeg/libavcodec/cookdata.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/cscd.c (renamed from src/libffmpeg/libavcodec/cscd.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/cyuv.c (renamed from src/libffmpeg/libavcodec/cyuv.c)16
-rw-r--r--contrib/ffmpeg/libavcodec/dct-test.c535
-rw-r--r--contrib/ffmpeg/libavcodec/dpcm.c (renamed from src/libffmpeg/libavcodec/dpcm.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/dsicinav.c362
-rw-r--r--contrib/ffmpeg/libavcodec/dsputil.c (renamed from src/libffmpeg/libavcodec/dsputil.c)284
-rw-r--r--contrib/ffmpeg/libavcodec/dsputil.h (renamed from src/libffmpeg/libavcodec/dsputil.h)137
-rw-r--r--contrib/ffmpeg/libavcodec/dtsdec.c320
-rw-r--r--contrib/ffmpeg/libavcodec/dv.c (renamed from src/libffmpeg/libavcodec/dv.c)162
-rw-r--r--contrib/ffmpeg/libavcodec/dvbsub.c445
-rw-r--r--contrib/ffmpeg/libavcodec/dvbsubdec.c1633
-rw-r--r--contrib/ffmpeg/libavcodec/dvdata.h (renamed from src/libffmpeg/libavcodec/dvdata.h)67
-rw-r--r--contrib/ffmpeg/libavcodec/dvdsubdec.c477
-rw-r--r--contrib/ffmpeg/libavcodec/dvdsubenc.c247
-rw-r--r--contrib/ffmpeg/libavcodec/error_resilience.c (renamed from src/libffmpeg/libavcodec/error_resilience.c)30
-rw-r--r--contrib/ffmpeg/libavcodec/eval.c466
-rw-r--r--contrib/ffmpeg/libavcodec/eval.h84
-rw-r--r--contrib/ffmpeg/libavcodec/faac.c133
-rw-r--r--contrib/ffmpeg/libavcodec/faad.c334
-rw-r--r--contrib/ffmpeg/libavcodec/faandct.c (renamed from src/libffmpeg/libavcodec/faandct.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/faandct.h (renamed from src/libffmpeg/libavcodec/faandct.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/fdctref.c (renamed from src/libffmpeg/libavcodec/fdctref.c)0
-rw-r--r--contrib/ffmpeg/libavcodec/fft-test.c297
-rw-r--r--contrib/ffmpeg/libavcodec/fft.c (renamed from src/libffmpeg/libavcodec/fft.c)64
-rw-r--r--contrib/ffmpeg/libavcodec/ffv1.c (renamed from src/libffmpeg/libavcodec/ffv1.c)21
-rw-r--r--contrib/ffmpeg/libavcodec/flac.c (renamed from src/libffmpeg/libavcodec/flac.c)114
-rw-r--r--contrib/ffmpeg/libavcodec/flacenc.c1371
-rw-r--r--contrib/ffmpeg/libavcodec/flashsv.c (renamed from src/libffmpeg/libavcodec/flashsv.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/flicvideo.c (renamed from src/libffmpeg/libavcodec/flicvideo.c)52
-rw-r--r--contrib/ffmpeg/libavcodec/fraps.c (renamed from src/libffmpeg/libavcodec/fraps.c)206
-rw-r--r--contrib/ffmpeg/libavcodec/g726.c (renamed from src/libffmpeg/libavcodec/g726.c)161
-rw-r--r--contrib/ffmpeg/libavcodec/gif.c350
-rw-r--r--contrib/ffmpeg/libavcodec/gifdec.c339
-rw-r--r--contrib/ffmpeg/libavcodec/golomb.c (renamed from src/libffmpeg/libavcodec/golomb.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/golomb.h (renamed from src/libffmpeg/libavcodec/golomb.h)14
-rw-r--r--contrib/ffmpeg/libavcodec/h261.c (renamed from src/libffmpeg/libavcodec/h261.c)29
-rw-r--r--[-rwxr-xr-x]contrib/ffmpeg/libavcodec/h261data.h (renamed from src/libffmpeg/libavcodec/h261data.h)39
-rw-r--r--contrib/ffmpeg/libavcodec/h263.c (renamed from src/libffmpeg/libavcodec/h263.c)65
-rw-r--r--contrib/ffmpeg/libavcodec/h263data.h (renamed from src/libffmpeg/libavcodec/h263data.h)31
-rw-r--r--contrib/ffmpeg/libavcodec/h263dec.c (renamed from src/libffmpeg/libavcodec/h263dec.c)37
-rw-r--r--contrib/ffmpeg/libavcodec/h264.c (renamed from src/libffmpeg/libavcodec/h264.c)471
-rw-r--r--contrib/ffmpeg/libavcodec/h264data.h (renamed from src/libffmpeg/libavcodec/h264data.h)56
-rw-r--r--[-rwxr-xr-x]contrib/ffmpeg/libavcodec/h264idct.c (renamed from src/libffmpeg/libavcodec/h264idct.c)18
-rw-r--r--contrib/ffmpeg/libavcodec/huffyuv.c (renamed from src/libffmpeg/libavcodec/huffyuv.c)28
-rw-r--r--contrib/ffmpeg/libavcodec/i386/cavsdsp_mmx.c518
-rw-r--r--contrib/ffmpeg/libavcodec/i386/cputest.c (renamed from src/libffmpeg/libavcodec/i386/cputest.c)25
-rw-r--r--contrib/ffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c (renamed from src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/i386/dsputil_mmx.c (renamed from src/libffmpeg/libavcodec/i386/dsputil_mmx.c)450
-rw-r--r--contrib/ffmpeg/libavcodec/i386/dsputil_mmx_avg.h (renamed from src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h)60
-rw-r--r--contrib/ffmpeg/libavcodec/i386/dsputil_mmx_rnd.h (renamed from src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h)26
-rw-r--r--contrib/ffmpeg/libavcodec/i386/fdct_mmx.c (renamed from src/libffmpeg/libavcodec/i386/fdct_mmx.c)158
-rw-r--r--contrib/ffmpeg/libavcodec/i386/fft_3dn.c125
-rw-r--r--contrib/ffmpeg/libavcodec/i386/fft_3dn2.c210
-rw-r--r--contrib/ffmpeg/libavcodec/i386/fft_sse.c247
-rw-r--r--contrib/ffmpeg/libavcodec/i386/h264dsp_mmx.c (renamed from src/libffmpeg/libavcodec/i386/h264dsp_mmx.c)230
-rw-r--r--contrib/ffmpeg/libavcodec/i386/idct_mmx.c (renamed from src/libffmpeg/libavcodec/i386/idct_mmx.c)3
-rw-r--r--contrib/ffmpeg/libavcodec/i386/idct_mmx_xvid.c (renamed from src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c)37
-rw-r--r--contrib/ffmpeg/libavcodec/i386/mathops.h41
-rw-r--r--contrib/ffmpeg/libavcodec/i386/mmx.h (renamed from src/libffmpeg/libavcodec/i386/mmx.h)22
-rw-r--r--contrib/ffmpeg/libavcodec/i386/motion_est_mmx.c (renamed from src/libffmpeg/libavcodec/i386/motion_est_mmx.c)22
-rw-r--r--contrib/ffmpeg/libavcodec/i386/mpegvideo_mmx.c (renamed from src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c)23
-rw-r--r--contrib/ffmpeg/libavcodec/i386/mpegvideo_mmx_template.c (renamed from src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c)16
-rw-r--r--contrib/ffmpeg/libavcodec/i386/simple_idct_mmx.c (renamed from src/libffmpeg/libavcodec/i386/simple_idct_mmx.c)154
-rw-r--r--contrib/ffmpeg/libavcodec/i386/snowdsp_mmx.c921
-rw-r--r--contrib/ffmpeg/libavcodec/i386/vp3dsp_mmx.c (renamed from src/libffmpeg/libavcodec/i386/vp3dsp_mmx.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/i386/vp3dsp_sse2.c (renamed from src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/idcinvideo.c (renamed from src/libffmpeg/libavcodec/idcinvideo.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/imc.c813
-rw-r--r--contrib/ffmpeg/libavcodec/imcdata.h164
-rw-r--r--contrib/ffmpeg/libavcodec/imgconvert.c (renamed from src/libffmpeg/libavcodec/imgconvert.c)296
-rw-r--r--contrib/ffmpeg/libavcodec/imgconvert_template.h (renamed from src/libffmpeg/libavcodec/imgconvert_template.h)44
-rw-r--r--contrib/ffmpeg/libavcodec/imgresample.c (renamed from src/libffmpeg/libavcodec/imgresample.c)57
-rw-r--r--contrib/ffmpeg/libavcodec/indeo2.c (renamed from src/libffmpeg/libavcodec/indeo2.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/indeo2data.h (renamed from src/libffmpeg/libavcodec/indeo2data.h)21
-rw-r--r--contrib/ffmpeg/libavcodec/indeo3.c (renamed from src/libffmpeg/libavcodec/indeo3.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/indeo3data.h (renamed from src/libffmpeg/libavcodec/indeo3data.h)20
-rw-r--r--contrib/ffmpeg/libavcodec/interplayvideo.c (renamed from src/libffmpeg/libavcodec/interplayvideo.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/jfdctfst.c (renamed from src/libffmpeg/libavcodec/jfdctfst.c)37
-rw-r--r--contrib/ffmpeg/libavcodec/jfdctint.c (renamed from src/libffmpeg/libavcodec/jfdctint.c)37
-rw-r--r--contrib/ffmpeg/libavcodec/jpeg_ls.c (renamed from src/libffmpeg/libavcodec/jpeg_ls.c)276
-rw-r--r--contrib/ffmpeg/libavcodec/jrevdct.c (renamed from src/libffmpeg/libavcodec/jrevdct.c)37
-rw-r--r--contrib/ffmpeg/libavcodec/kmvc.c (renamed from src/libffmpeg/libavcodec/kmvc.c)16
-rw-r--r--contrib/ffmpeg/libavcodec/lcl.c (renamed from src/libffmpeg/libavcodec/lcl.c)38
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/a52.h73
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/a52_internal.h162
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/a52_util.h32
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/bit_allocate.c260
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/bitstream.c91
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/bitstream.h77
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/crc.c73
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/downmix.c679
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/imdct.c411
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/mm_accel.h42
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/parse.c939
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/resample.c63
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/resample_c.c203
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/resample_mmx.c537
-rw-r--r--contrib/ffmpeg/libavcodec/liba52/tables.h246
-rw-r--r--contrib/ffmpeg/libavcodec/libgsm.c97
-rw-r--r--contrib/ffmpeg/libavcodec/loco.c (renamed from src/libffmpeg/libavcodec/loco.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/lzo.c (renamed from src/libffmpeg/libavcodec/lzo.c)14
-rw-r--r--contrib/ffmpeg/libavcodec/lzo.h35
-rw-r--r--contrib/ffmpeg/libavcodec/lzw.c262
-rw-r--r--contrib/ffmpeg/libavcodec/lzw.h49
-rw-r--r--contrib/ffmpeg/libavcodec/mace.c (renamed from src/libffmpeg/libavcodec/mace.c)18
-rw-r--r--contrib/ffmpeg/libavcodec/mathops.h69
-rw-r--r--contrib/ffmpeg/libavcodec/mdct.c (renamed from src/libffmpeg/libavcodec/mdct.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/mdec.c (renamed from src/libffmpeg/libavcodec/mdec.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/mjpeg.c (renamed from src/libffmpeg/libavcodec/mjpeg.c)243
-rw-r--r--contrib/ffmpeg/libavcodec/mlib/dsputil_mlib.c (renamed from src/libffmpeg/libavcodec/mlib/dsputil_mlib.c)18
-rw-r--r--contrib/ffmpeg/libavcodec/mmvideo.c (renamed from src/libffmpeg/libavcodec/mmvideo.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/motion_est.c (renamed from src/libffmpeg/libavcodec/motion_est.c)66
-rw-r--r--contrib/ffmpeg/libavcodec/motion_est_template.c (renamed from src/libffmpeg/libavcodec/motion_est_template.c)110
-rw-r--r--contrib/ffmpeg/libavcodec/motion_test.c159
-rw-r--r--contrib/ffmpeg/libavcodec/mp3lameaudio.c221
-rw-r--r--contrib/ffmpeg/libavcodec/mpeg12.c (renamed from src/libffmpeg/libavcodec/mpeg12.c)392
-rw-r--r--contrib/ffmpeg/libavcodec/mpeg12data.h (renamed from src/libffmpeg/libavcodec/mpeg12data.h)32
-rw-r--r--contrib/ffmpeg/libavcodec/mpeg4data.h (renamed from src/libffmpeg/libavcodec/mpeg4data.h)22
-rw-r--r--contrib/ffmpeg/libavcodec/mpegaudio.c801
-rw-r--r--contrib/ffmpeg/libavcodec/mpegaudio.h (renamed from src/libffmpeg/libavcodec/mpegaudio.h)22
-rw-r--r--contrib/ffmpeg/libavcodec/mpegaudiodec.c (renamed from src/libffmpeg/libavcodec/mpegaudiodec.c)1001
-rw-r--r--contrib/ffmpeg/libavcodec/mpegaudiodectab.h (renamed from src/libffmpeg/libavcodec/mpegaudiodectab.h)93
-rw-r--r--contrib/ffmpeg/libavcodec/mpegaudiotab.h (renamed from src/libffmpeg/libavcodec/mpegaudiotab.h)17
-rw-r--r--contrib/ffmpeg/libavcodec/mpegvideo.c (renamed from src/libffmpeg/libavcodec/mpegvideo.c)280
-rw-r--r--contrib/ffmpeg/libavcodec/mpegvideo.h (renamed from src/libffmpeg/libavcodec/mpegvideo.h)121
-rw-r--r--contrib/ffmpeg/libavcodec/msmpeg4.c (renamed from src/libffmpeg/libavcodec/msmpeg4.c)26
-rw-r--r--contrib/ffmpeg/libavcodec/msmpeg4data.h (renamed from src/libffmpeg/libavcodec/msmpeg4data.h)24
-rw-r--r--contrib/ffmpeg/libavcodec/msrle.c (renamed from src/libffmpeg/libavcodec/msrle.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/msvideo1.c (renamed from src/libffmpeg/libavcodec/msvideo1.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/nuv.c (renamed from src/libffmpeg/libavcodec/nuv.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/oggvorbis.c381
-rw-r--r--contrib/ffmpeg/libavcodec/opt.c381
-rw-r--r--contrib/ffmpeg/libavcodec/opt.h (renamed from src/libffmpeg/libavcodec/opt.h)22
-rw-r--r--contrib/ffmpeg/libavcodec/os2thread.c147
-rw-r--r--contrib/ffmpeg/libavcodec/parser.c (renamed from src/libffmpeg/libavcodec/parser.c)321
-rw-r--r--contrib/ffmpeg/libavcodec/parser.h63
-rw-r--r--contrib/ffmpeg/libavcodec/pcm.c (renamed from src/libffmpeg/libavcodec/pcm.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/png.c968
-rw-r--r--contrib/ffmpeg/libavcodec/pnm.c606
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/dsputil_altivec.c (renamed from src/libffmpeg/libavcodec/ppc/dsputil_altivec.c)612
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/dsputil_altivec.h106
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/dsputil_ppc.c (renamed from src/libffmpeg/libavcodec/ppc/dsputil_ppc.c)130
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/dsputil_ppc.h (renamed from src/libffmpeg/libavcodec/ppc/dsputil_ppc.h)21
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/fdct_altivec.c (renamed from src/libffmpeg/libavcodec/ppc/fdct_altivec.c)15
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/fft_altivec.c (renamed from src/libffmpeg/libavcodec/ppc/fft_altivec.c)95
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/float_altivec.c194
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/gcc_fixes.h (renamed from src/libffmpeg/libavcodec/ppc/gcc_fixes.h)16
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/gmc_altivec.c (renamed from src/libffmpeg/libavcodec/ppc/gmc_altivec.c)38
-rw-r--r--[-rwxr-xr-x]contrib/ffmpeg/libavcodec/ppc/h264_altivec.c (renamed from src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c)211
-rw-r--r--[-rwxr-xr-x]contrib/ffmpeg/libavcodec/ppc/h264_template_altivec.c (renamed from src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/idct_altivec.c (renamed from src/libffmpeg/libavcodec/ppc/idct_altivec.c)24
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/mathops.h33
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/mpegvideo_altivec.c (renamed from src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c)58
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/mpegvideo_ppc.c (renamed from src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c)14
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/snow_altivec.c788
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/types_altivec.h41
-rw-r--r--contrib/ffmpeg/libavcodec/ppc/vc1dsp_altivec.c338
-rw-r--r--contrib/ffmpeg/libavcodec/ps2/dsputil_mmi.c163
-rw-r--r--contrib/ffmpeg/libavcodec/ps2/idct_mmi.c363
-rw-r--r--contrib/ffmpeg/libavcodec/ps2/mmi.h172
-rw-r--r--contrib/ffmpeg/libavcodec/ps2/mpegvideo_mmi.c89
-rw-r--r--contrib/ffmpeg/libavcodec/pthread.c170
-rw-r--r--contrib/ffmpeg/libavcodec/qdm2.c (renamed from src/libffmpeg/libavcodec/qdm2.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/qdm2data.h (renamed from src/libffmpeg/libavcodec/qdm2data.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/qdrw.c (renamed from src/libffmpeg/libavcodec/qdrw.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/qpeg.c (renamed from src/libffmpeg/libavcodec/qpeg.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/qtrle.c (renamed from src/libffmpeg/libavcodec/qtrle.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/ra144.c (renamed from src/libffmpeg/libavcodec/ra144.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/ra144.h (renamed from src/libffmpeg/libavcodec/ra144.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/ra288.c (renamed from src/libffmpeg/libavcodec/ra288.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/ra288.h (renamed from src/libffmpeg/libavcodec/ra288.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/rangecoder.c (renamed from src/libffmpeg/libavcodec/rangecoder.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/rangecoder.h (renamed from src/libffmpeg/libavcodec/rangecoder.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/ratecontrol.c (renamed from src/libffmpeg/libavcodec/ratecontrol.c)214
-rw-r--r--contrib/ffmpeg/libavcodec/ratecontrol.h103
-rw-r--r--contrib/ffmpeg/libavcodec/raw.c (renamed from src/libffmpeg/libavcodec/raw.c)31
-rw-r--r--contrib/ffmpeg/libavcodec/resample.c249
-rw-r--r--contrib/ffmpeg/libavcodec/resample2.c (renamed from src/libffmpeg/libavcodec/resample2.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/roqvideo.c (renamed from src/libffmpeg/libavcodec/roqvideo.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/rpza.c (renamed from src/libffmpeg/libavcodec/rpza.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/rtjpeg.c (renamed from src/libffmpeg/libavcodec/rtjpeg.c)39
-rw-r--r--contrib/ffmpeg/libavcodec/rtjpeg.h39
-rw-r--r--contrib/ffmpeg/libavcodec/rv10.c (renamed from src/libffmpeg/libavcodec/rv10.c)53
-rw-r--r--contrib/ffmpeg/libavcodec/sh4/dsputil_align.c430
-rw-r--r--contrib/ffmpeg/libavcodec/sh4/dsputil_sh4.c120
-rw-r--r--contrib/ffmpeg/libavcodec/sh4/idct_sh4.c366
-rw-r--r--contrib/ffmpeg/libavcodec/sh4/qpel.c1600
-rw-r--r--contrib/ffmpeg/libavcodec/shorten.c (renamed from src/libffmpeg/libavcodec/shorten.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/simple_idct.c (renamed from src/libffmpeg/libavcodec/simple_idct.c)20
-rw-r--r--contrib/ffmpeg/libavcodec/simple_idct.h (renamed from src/libffmpeg/libavcodec/simple_idct.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/smacker.c (renamed from src/libffmpeg/libavcodec/smacker.c)24
-rw-r--r--contrib/ffmpeg/libavcodec/smc.c (renamed from src/libffmpeg/libavcodec/smc.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/snow.c (renamed from src/libffmpeg/libavcodec/snow.c)288
-rw-r--r--contrib/ffmpeg/libavcodec/snow.h (renamed from src/libffmpeg/libavcodec/snow.h)16
-rw-r--r--contrib/ffmpeg/libavcodec/sonic.c981
-rw-r--r--contrib/ffmpeg/libavcodec/sp5x.h (renamed from src/libffmpeg/libavcodec/sp5x.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/sparc/dsputil_vis.c (renamed from src/libffmpeg/libavcodec/sparc/dsputil_vis.c)21
-rw-r--r--contrib/ffmpeg/libavcodec/sparc/vis.h (renamed from src/libffmpeg/libavcodec/sparc/vis.h)21
-rw-r--r--contrib/ffmpeg/libavcodec/svq1.c (renamed from src/libffmpeg/libavcodec/svq1.c)24
-rw-r--r--contrib/ffmpeg/libavcodec/svq1_cb.h (renamed from src/libffmpeg/libavcodec/svq1_cb.h)10
-rw-r--r--contrib/ffmpeg/libavcodec/svq1_vlc.h (renamed from src/libffmpeg/libavcodec/svq1_vlc.h)20
-rw-r--r--contrib/ffmpeg/libavcodec/svq3.c (renamed from src/libffmpeg/libavcodec/svq3.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/targa.c254
-rw-r--r--contrib/ffmpeg/libavcodec/tiertexseqv.c232
-rw-r--r--contrib/ffmpeg/libavcodec/tiff.c531
-rw-r--r--contrib/ffmpeg/libavcodec/truemotion1.c (renamed from src/libffmpeg/libavcodec/truemotion1.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/truemotion1data.h (renamed from src/libffmpeg/libavcodec/truemotion1data.h)16
-rw-r--r--contrib/ffmpeg/libavcodec/truemotion2.c (renamed from src/libffmpeg/libavcodec/truemotion2.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/truespeech.c (renamed from src/libffmpeg/libavcodec/truespeech.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/truespeech_data.h (renamed from src/libffmpeg/libavcodec/truespeech_data.h)21
-rw-r--r--contrib/ffmpeg/libavcodec/tscc.c (renamed from src/libffmpeg/libavcodec/tscc.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/tta.c (renamed from src/libffmpeg/libavcodec/tta.c)39
-rw-r--r--[-rwxr-xr-x]contrib/ffmpeg/libavcodec/ulti.c (renamed from src/libffmpeg/libavcodec/ulti.c)10
-rw-r--r--[-rwxr-xr-x]contrib/ffmpeg/libavcodec/ulti_cb.h (renamed from src/libffmpeg/libavcodec/ulti_cb.h)21
-rw-r--r--contrib/ffmpeg/libavcodec/utils.c (renamed from src/libffmpeg/libavcodec/utils.c)226
-rw-r--r--contrib/ffmpeg/libavcodec/vc1.c (renamed from src/libffmpeg/libavcodec/vc1.c)1538
-rw-r--r--contrib/ffmpeg/libavcodec/vc1acdata.h (renamed from src/libffmpeg/libavcodec/vc1acdata.h)21
-rw-r--r--contrib/ffmpeg/libavcodec/vc1data.h (renamed from src/libffmpeg/libavcodec/vc1data.h)43
-rw-r--r--contrib/ffmpeg/libavcodec/vc1dsp.c (renamed from src/libffmpeg/libavcodec/vc1dsp.c)44
-rw-r--r--contrib/ffmpeg/libavcodec/vcr1.c (renamed from src/libffmpeg/libavcodec/vcr1.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/vmdav.c (renamed from src/libffmpeg/libavcodec/vmdav.c)49
-rw-r--r--contrib/ffmpeg/libavcodec/vmnc.c525
-rw-r--r--contrib/ffmpeg/libavcodec/vorbis.c (renamed from src/libffmpeg/libavcodec/vorbis.c)646
-rw-r--r--contrib/ffmpeg/libavcodec/vorbis.h43
-rw-r--r--contrib/ffmpeg/libavcodec/vorbis_data.c (renamed from src/libffmpeg/libavcodec/vorbis.h)147
-rw-r--r--contrib/ffmpeg/libavcodec/vorbis_enc.c1087
-rw-r--r--contrib/ffmpeg/libavcodec/vorbis_enc_data.h498
-rw-r--r--contrib/ffmpeg/libavcodec/vp3.c (renamed from src/libffmpeg/libavcodec/vp3.c)32
-rw-r--r--contrib/ffmpeg/libavcodec/vp3data.h (renamed from src/libffmpeg/libavcodec/vp3data.h)20
-rw-r--r--contrib/ffmpeg/libavcodec/vp3dsp.c (renamed from src/libffmpeg/libavcodec/vp3dsp.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/vp5.c290
-rw-r--r--contrib/ffmpeg/libavcodec/vp56.c665
-rw-r--r--contrib/ffmpeg/libavcodec/vp56.h248
-rw-r--r--contrib/ffmpeg/libavcodec/vp56data.c66
-rw-r--r--contrib/ffmpeg/libavcodec/vp56data.h248
-rw-r--r--contrib/ffmpeg/libavcodec/vp5data.h173
-rw-r--r--contrib/ffmpeg/libavcodec/vp6.c522
-rw-r--r--contrib/ffmpeg/libavcodec/vp6data.h292
-rw-r--r--contrib/ffmpeg/libavcodec/vqavideo.c (renamed from src/libffmpeg/libavcodec/vqavideo.c)33
-rw-r--r--contrib/ffmpeg/libavcodec/w32thread.c136
-rw-r--r--contrib/ffmpeg/libavcodec/wavpack.c556
-rw-r--r--contrib/ffmpeg/libavcodec/wmadata.h (renamed from src/libffmpeg/libavcodec/wmadata.h)21
-rw-r--r--contrib/ffmpeg/libavcodec/wmadec.c (renamed from src/libffmpeg/libavcodec/wmadec.c)42
-rw-r--r--contrib/ffmpeg/libavcodec/wmv2.c (renamed from src/libffmpeg/libavcodec/wmv2.c)13
-rw-r--r--contrib/ffmpeg/libavcodec/wnv1.c (renamed from src/libffmpeg/libavcodec/wnv1.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/ws-snd1.c (renamed from src/libffmpeg/libavcodec/ws-snd1.c)15
-rw-r--r--contrib/ffmpeg/libavcodec/x264.c299
-rw-r--r--contrib/ffmpeg/libavcodec/xan.c (renamed from src/libffmpeg/libavcodec/xan.c)10
-rw-r--r--contrib/ffmpeg/libavcodec/xl.c (renamed from src/libffmpeg/libavcodec/xl.c)12
-rw-r--r--contrib/ffmpeg/libavcodec/xvid_internal.h32
-rw-r--r--contrib/ffmpeg/libavcodec/xvid_rc.c148
-rw-r--r--contrib/ffmpeg/libavcodec/xvidff.c768
-rw-r--r--contrib/ffmpeg/libavcodec/xvmcvideo.c318
-rw-r--r--contrib/ffmpeg/libavcodec/zmbv.c (renamed from src/libffmpeg/libavcodec/zmbv.c)18
-rw-r--r--contrib/ffmpeg/libavformat/4xm.c331
-rw-r--r--contrib/ffmpeg/libavformat/Makefile197
-rw-r--r--contrib/ffmpeg/libavformat/adtsenc.c123
-rw-r--r--contrib/ffmpeg/libavformat/aiff.c436
-rw-r--r--contrib/ffmpeg/libavformat/allformats.c182
-rw-r--r--contrib/ffmpeg/libavformat/allformats.h176
-rw-r--r--contrib/ffmpeg/libavformat/amr.c201
-rw-r--r--contrib/ffmpeg/libavformat/asf-enc.c866
-rw-r--r--contrib/ffmpeg/libavformat/asf.c951
-rw-r--r--contrib/ffmpeg/libavformat/asf.h285
-rw-r--r--contrib/ffmpeg/libavformat/au.c209
-rw-r--r--contrib/ffmpeg/libavformat/audio.c352
-rw-r--r--contrib/ffmpeg/libavformat/avformat.h539
-rw-r--r--contrib/ffmpeg/libavformat/avi.h39
-rw-r--r--contrib/ffmpeg/libavformat/avidec.c989
-rw-r--r--contrib/ffmpeg/libavformat/avienc.c580
-rw-r--r--contrib/ffmpeg/libavformat/avio.c192
-rw-r--r--contrib/ffmpeg/libavformat/avio.h201
-rw-r--r--contrib/ffmpeg/libavformat/aviobuf.c790
-rw-r--r--contrib/ffmpeg/libavformat/avisynth.c222
-rw-r--r--contrib/ffmpeg/libavformat/avs.c227
-rw-r--r--contrib/ffmpeg/libavformat/barpainet.h45
-rw-r--r--contrib/ffmpeg/libavformat/base64.c231
-rw-r--r--contrib/ffmpeg/libavformat/base64.h24
-rw-r--r--contrib/ffmpeg/libavformat/beosaudio.cpp465
-rw-r--r--contrib/ffmpeg/libavformat/crc.c98
-rw-r--r--contrib/ffmpeg/libavformat/cutils.c275
-rw-r--r--contrib/ffmpeg/libavformat/daud.c58
-rw-r--r--contrib/ffmpeg/libavformat/dc1394.c193
-rw-r--r--contrib/ffmpeg/libavformat/dsicin.c224
-rw-r--r--contrib/ffmpeg/libavformat/dv.c451
-rw-r--r--contrib/ffmpeg/libavformat/dv.h37
-rw-r--r--contrib/ffmpeg/libavformat/dv1394.c240
-rw-r--r--contrib/ffmpeg/libavformat/dv1394.h357
-rw-r--r--contrib/ffmpeg/libavformat/dvenc.c407
-rw-r--r--contrib/ffmpeg/libavformat/electronicarts.c291
-rw-r--r--contrib/ffmpeg/libavformat/ffm.c792
-rw-r--r--contrib/ffmpeg/libavformat/file.c140
-rw-r--r--contrib/ffmpeg/libavformat/flic.c221
-rw-r--r--contrib/ffmpeg/libavformat/flvdec.c259
-rw-r--r--contrib/ffmpeg/libavformat/flvenc.c284
-rw-r--r--contrib/ffmpeg/libavformat/framehook.c121
-rw-r--r--contrib/ffmpeg/libavformat/framehook.h50
-rw-r--r--contrib/ffmpeg/libavformat/gif.c419
-rw-r--r--contrib/ffmpeg/libavformat/gifdec.c593
-rw-r--r--contrib/ffmpeg/libavformat/grab.c860
-rw-r--r--contrib/ffmpeg/libavformat/grab_bktr.c330
-rw-r--r--contrib/ffmpeg/libavformat/gxf.c525
-rw-r--r--contrib/ffmpeg/libavformat/gxf.h34
-rw-r--r--contrib/ffmpeg/libavformat/gxfenc.c829
-rw-r--r--contrib/ffmpeg/libavformat/http.c289
-rw-r--r--contrib/ffmpeg/libavformat/idcin.c301
-rw-r--r--contrib/ffmpeg/libavformat/idroq.c291
-rw-r--r--contrib/ffmpeg/libavformat/img.c400
-rw-r--r--contrib/ffmpeg/libavformat/img2.c425
-rw-r--r--contrib/ffmpeg/libavformat/ipmovie.c625
-rw-r--r--contrib/ffmpeg/libavformat/isom.c131
-rw-r--r--contrib/ffmpeg/libavformat/isom.h38
-rw-r--r--contrib/ffmpeg/libavformat/jpeg.c240
-rw-r--r--contrib/ffmpeg/libavformat/libnut.c283
-rw-r--r--contrib/ffmpeg/libavformat/matroska.c2767
-rw-r--r--contrib/ffmpeg/libavformat/mm.c212
-rw-r--r--contrib/ffmpeg/libavformat/mmf.c331
-rw-r--r--contrib/ffmpeg/libavformat/mov.c1798
-rw-r--r--contrib/ffmpeg/libavformat/movenc.c1724
-rw-r--r--contrib/ffmpeg/libavformat/mp3.c430
-rw-r--r--contrib/ffmpeg/libavformat/mpeg.c1824
-rw-r--r--contrib/ffmpeg/libavformat/mpegts.c1527
-rw-r--r--contrib/ffmpeg/libavformat/mpegts.h63
-rw-r--r--contrib/ffmpeg/libavformat/mpegtsenc.c676
-rw-r--r--contrib/ffmpeg/libavformat/mpjpeg.c67
-rw-r--r--contrib/ffmpeg/libavformat/mtv.c187
-rw-r--r--contrib/ffmpeg/libavformat/mxf.c1082
-rw-r--r--contrib/ffmpeg/libavformat/nsvdec.c763
-rw-r--r--contrib/ffmpeg/libavformat/nut.c1457
-rw-r--r--contrib/ffmpeg/libavformat/nut.h97
-rw-r--r--contrib/ffmpeg/libavformat/nutdec.c889
-rw-r--r--contrib/ffmpeg/libavformat/nuv.c241
-rw-r--r--contrib/ffmpeg/libavformat/ogg.c283
-rw-r--r--contrib/ffmpeg/libavformat/ogg2.c697
-rw-r--r--contrib/ffmpeg/libavformat/ogg2.h85
-rw-r--r--contrib/ffmpeg/libavformat/oggparseflac.c82
-rw-r--r--contrib/ffmpeg/libavformat/oggparseogm.c166
-rw-r--r--contrib/ffmpeg/libavformat/oggparsetheora.c129
-rw-r--r--contrib/ffmpeg/libavformat/oggparsevorbis.c205
-rw-r--r--contrib/ffmpeg/libavformat/os_support.c96
-rw-r--r--contrib/ffmpeg/libavformat/os_support.h53
-rw-r--r--contrib/ffmpeg/libavformat/png.c889
-rw-r--r--contrib/ffmpeg/libavformat/pnm.c478
-rw-r--r--contrib/ffmpeg/libavformat/psxstr.c364
-rw-r--r--contrib/ffmpeg/libavformat/qtpalette.h295
-rw-r--r--contrib/ffmpeg/libavformat/raw.c843
-rw-r--r--contrib/ffmpeg/libavformat/riff.c468
-rw-r--r--contrib/ffmpeg/libavformat/riff.h51
-rw-r--r--contrib/ffmpeg/libavformat/rm.c1146
-rw-r--r--contrib/ffmpeg/libavformat/rtp.c1099
-rw-r--r--contrib/ffmpeg/libavformat/rtp.h118
-rw-r--r--contrib/ffmpeg/libavformat/rtp_h264.c419
-rw-r--r--contrib/ffmpeg/libavformat/rtp_h264.h26
-rw-r--r--contrib/ffmpeg/libavformat/rtp_internal.h110
-rw-r--r--contrib/ffmpeg/libavformat/rtpproto.c303
-rw-r--r--contrib/ffmpeg/libavformat/rtsp.c1493
-rw-r--r--contrib/ffmpeg/libavformat/rtsp.h98
-rw-r--r--contrib/ffmpeg/libavformat/rtspcodes.h31
-rw-r--r--contrib/ffmpeg/libavformat/segafilm.c310
-rw-r--r--contrib/ffmpeg/libavformat/sgi.c460
-rw-r--r--contrib/ffmpeg/libavformat/sierravmd.c302
-rw-r--r--contrib/ffmpeg/libavformat/smacker.c345
-rw-r--r--contrib/ffmpeg/libavformat/sol.c160
-rw-r--r--contrib/ffmpeg/libavformat/swf.c944
-rw-r--r--contrib/ffmpeg/libavformat/tcp.c232
-rw-r--r--contrib/ffmpeg/libavformat/tiertexseq.c310
-rw-r--r--contrib/ffmpeg/libavformat/tta.c152
-rw-r--r--contrib/ffmpeg/libavformat/udp.c512
-rw-r--r--contrib/ffmpeg/libavformat/utils.c3108
-rw-r--r--contrib/ffmpeg/libavformat/v4l2.c541
-rw-r--r--contrib/ffmpeg/libavformat/voc.c36
-rw-r--r--contrib/ffmpeg/libavformat/voc.h51
-rw-r--r--contrib/ffmpeg/libavformat/vocdec.c155
-rw-r--r--contrib/ffmpeg/libavformat/vocenc.c104
-rw-r--r--contrib/ffmpeg/libavformat/wav.c253
-rw-r--r--contrib/ffmpeg/libavformat/wc3movie.c394
-rw-r--r--contrib/ffmpeg/libavformat/westwood.c414
-rw-r--r--contrib/ffmpeg/libavformat/wv.c202
-rw-r--r--contrib/ffmpeg/libavformat/yuv.c161
-rw-r--r--contrib/ffmpeg/libavformat/yuv4mpeg.c408
-rw-r--r--contrib/ffmpeg/libavutil/Makefile29
-rw-r--r--contrib/ffmpeg/libavutil/adler32.c (renamed from src/libffmpeg/libavutil/adler32.c)20
-rw-r--r--contrib/ffmpeg/libavutil/adler32.h27
-rw-r--r--contrib/ffmpeg/libavutil/avutil.h137
-rw-r--r--contrib/ffmpeg/libavutil/bswap.h (renamed from src/libffmpeg/libavutil/bswap.h)24
-rw-r--r--contrib/ffmpeg/libavutil/common.h (renamed from src/libffmpeg/libavutil/common.h)152
-rw-r--r--contrib/ffmpeg/libavutil/crc.c (renamed from src/libffmpeg/libavutil/crc.c)20
-rw-r--r--contrib/ffmpeg/libavutil/crc.h35
-rw-r--r--contrib/ffmpeg/libavutil/fifo.c137
-rw-r--r--contrib/ffmpeg/libavutil/fifo.h25
-rw-r--r--contrib/ffmpeg/libavutil/integer.c (renamed from src/libffmpeg/libavutil/integer.c)14
-rw-r--r--contrib/ffmpeg/libavutil/integer.h (renamed from src/libffmpeg/libavutil/integer.h)10
-rw-r--r--contrib/ffmpeg/libavutil/internal.h (renamed from src/libffmpeg/libavutil/internal.h)136
-rw-r--r--contrib/ffmpeg/libavutil/intfloat_readwrite.c97
-rw-r--r--contrib/ffmpeg/libavutil/intfloat_readwrite.h39
-rw-r--r--contrib/ffmpeg/libavutil/lls.c (renamed from src/libffmpeg/libavutil/lls.c)10
-rw-r--r--contrib/ffmpeg/libavutil/lls.h (renamed from src/libffmpeg/libavutil/lls.h)10
-rw-r--r--contrib/ffmpeg/libavutil/log.c (renamed from src/libffmpeg/libavutil/log.c)10
-rw-r--r--contrib/ffmpeg/libavutil/log.h (renamed from src/libffmpeg/libavutil/log.h)22
-rw-r--r--contrib/ffmpeg/libavutil/mathematics.c (renamed from src/libffmpeg/libavutil/mathematics.c)12
-rw-r--r--contrib/ffmpeg/libavutil/mathematics.h (renamed from src/libffmpeg/libavutil/mathematics.h)20
-rw-r--r--contrib/ffmpeg/libavutil/md5.c (renamed from src/libffmpeg/libavutil/md5.c)20
-rw-r--r--contrib/ffmpeg/libavutil/md5.h34
-rw-r--r--contrib/ffmpeg/libavutil/mem.c (renamed from src/libffmpeg/libavcodec/mem.c)74
-rw-r--r--contrib/ffmpeg/libavutil/rational.c (renamed from src/libffmpeg/libavutil/rational.c)27
-rw-r--r--contrib/ffmpeg/libavutil/rational.h (renamed from src/libffmpeg/libavutil/rational.h)10
-rw-r--r--contrib/ffmpeg/libavutil/softfloat.c72
-rw-r--r--contrib/ffmpeg/libavutil/softfloat.h122
-rw-r--r--contrib/ffmpeg/libavutil/tree.c151
-rw-r--r--contrib/ffmpeg/libavutil/tree.h52
-rw-r--r--contrib/ffmpeg/libavutil/x86_cpu.h60
-rw-r--r--contrib/ffmpeg/libpostproc/Makefile26
-rw-r--r--contrib/ffmpeg/libpostproc/mangle.h47
-rw-r--r--contrib/ffmpeg/libpostproc/postprocess.c (renamed from src/libffmpeg/libavcodec/libpostproc/postprocess.c)183
-rw-r--r--contrib/ffmpeg/libpostproc/postprocess.h (renamed from src/libffmpeg/libavcodec/libpostproc/postprocess.h)34
-rw-r--r--contrib/ffmpeg/libpostproc/postprocess_altivec_template.c (renamed from src/libffmpeg/libavcodec/libpostproc/postprocess_altivec_template.c)45
-rw-r--r--contrib/ffmpeg/libpostproc/postprocess_internal.h (renamed from src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h)57
-rw-r--r--contrib/ffmpeg/libpostproc/postprocess_template.c (renamed from src/libffmpeg/libavcodec/libpostproc/postprocess_template.c)94
-rw-r--r--contrib/ffmpeg/libswscale/Makefile26
-rw-r--r--contrib/ffmpeg/libswscale/cs_test.c306
-rw-r--r--contrib/ffmpeg/libswscale/rgb2rgb.c665
-rw-r--r--contrib/ffmpeg/libswscale/rgb2rgb.h147
-rw-r--r--contrib/ffmpeg/libswscale/rgb2rgb_template.c2688
-rw-r--r--contrib/ffmpeg/libswscale/swscale-example.c229
-rw-r--r--contrib/ffmpeg/libswscale/swscale.c2864
-rw-r--r--contrib/ffmpeg/libswscale/swscale.h146
-rw-r--r--contrib/ffmpeg/libswscale/swscale_altivec_template.c548
-rw-r--r--contrib/ffmpeg/libswscale/swscale_internal.h226
-rw-r--r--contrib/ffmpeg/libswscale/swscale_template.c3241
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb.c844
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb_altivec.c963
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb_mlib.c87
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb_template.c540
-rw-r--r--contrib/ffmpeg/makefile.xine.in30
-rw-r--r--contrib/ffmpeg/output_example.c546
-rw-r--r--contrib/ffmpeg/pktdumper.c97
-rw-r--r--contrib/ffmpeg/qt-faststart.c311
-rw-r--r--contrib/ffmpeg/tests/Makefile91
-rw-r--r--contrib/ffmpeg/tests/audiogen.c168
-rw-r--r--contrib/ffmpeg/tests/dsptest.c178
-rw-r--r--contrib/ffmpeg/tests/ffmpeg.regression.ref182
-rw-r--r--contrib/ffmpeg/tests/ffserver.regression.ref10
-rw-r--r--contrib/ffmpeg/tests/lena.pnm109
-rw-r--r--contrib/ffmpeg/tests/libav.regression.ref106
-rwxr-xr-xcontrib/ffmpeg/tests/regression.sh785
-rw-r--r--contrib/ffmpeg/tests/rotozoom.c289
-rw-r--r--contrib/ffmpeg/tests/rotozoom.regression.ref182
-rwxr-xr-xcontrib/ffmpeg/tests/server-regression.sh50
-rw-r--r--contrib/ffmpeg/tests/test.conf306
-rw-r--r--contrib/ffmpeg/tests/tiny_psnr.c154
-rw-r--r--contrib/ffmpeg/tests/videogen.c278
-rwxr-xr-xcontrib/ffmpeg/unwrap-diff2
-rwxr-xr-xcontrib/ffmpeg/version.sh14
-rw-r--r--contrib/ffmpeg/vhook/Makefile51
-rw-r--r--contrib/ffmpeg/vhook/drawtext.c531
-rw-r--r--contrib/ffmpeg/vhook/fish.c380
-rw-r--r--contrib/ffmpeg/vhook/imlib2.c450
-rw-r--r--contrib/ffmpeg/vhook/null.c116
-rw-r--r--contrib/ffmpeg/vhook/ppm.c367
-rw-r--r--contrib/ffmpeg/vhook/watermark.c661
-rw-r--r--contrib/ffmpeg/xvmc_render.h47
-rw-r--r--src/libffmpeg/Makefile.am20
-rw-r--r--src/libffmpeg/diff_to_ffmpeg_cvs.txt527
-rw-r--r--src/libffmpeg/dvaudio_decoder.c28
-rw-r--r--src/libffmpeg/libavcodec/.cvsignore6
-rw-r--r--src/libffmpeg/libavcodec/Makefile.am172
-rw-r--r--src/libffmpeg/libavcodec/alpha/.cvsignore6
-rw-r--r--src/libffmpeg/libavcodec/alpha/Makefile.am13
-rw-r--r--src/libffmpeg/libavcodec/armv4l/.cvsignore6
-rw-r--r--src/libffmpeg/libavcodec/armv4l/Makefile.am18
-rw-r--r--src/libffmpeg/libavcodec/armv4l/libavcodec_armv4l_dummy.c2
-rw-r--r--src/libffmpeg/libavcodec/cabac.h429
-rw-r--r--src/libffmpeg/libavcodec/eval.c226
-rw-r--r--src/libffmpeg/libavcodec/fastmemcpy.h4
-rw-r--r--src/libffmpeg/libavcodec/i386/.cvsignore6
-rw-r--r--src/libffmpeg/libavcodec/i386/Makefile.am49
-rw-r--r--src/libffmpeg/libavcodec/i386/fft_sse.c140
-rw-r--r--src/libffmpeg/libavcodec/i386/libavcodec_mmx_dummy.c2
-rw-r--r--src/libffmpeg/libavcodec/libpostproc/.cvsignore6
-rw-r--r--src/libffmpeg/libavcodec/libpostproc/Makefile.am15
-rw-r--r--src/libffmpeg/libavcodec/libpostproc/mangle.h28
-rw-r--r--src/libffmpeg/libavcodec/lzo.h14
-rw-r--r--src/libffmpeg/libavcodec/mlib/.cvsignore6
-rw-r--r--src/libffmpeg/libavcodec/mlib/Makefile.am18
-rw-r--r--src/libffmpeg/libavcodec/mlib/libavcodec_mlib_dummy.c2
-rw-r--r--src/libffmpeg/libavcodec/ppc/.cvsignore6
-rw-r--r--src/libffmpeg/libavcodec/ppc/Makefile.am34
-rw-r--r--src/libffmpeg/libavcodec/ppc/dsputil_altivec.h97
-rw-r--r--src/libffmpeg/libavcodec/ppc/libavcodec_ppc_dummy.c2
-rw-r--r--src/libffmpeg/libavcodec/rtjpeg.h19
-rw-r--r--src/libffmpeg/libavcodec/sparc/.cvsignore6
-rw-r--r--src/libffmpeg/libavcodec/sparc/Makefile.am18
-rw-r--r--src/libffmpeg/libavcodec/sparc/libavcodec_sparc_dummy.c2
-rw-r--r--src/libffmpeg/libavcodec/swscale.h32
-rw-r--r--src/libffmpeg/libavutil/.cvsignore6
-rw-r--r--src/libffmpeg/libavutil/Makefile.am35
-rw-r--r--src/libffmpeg/libavutil/adler32.h7
-rw-r--r--src/libffmpeg/libavutil/avutil.h80
-rw-r--r--src/libffmpeg/libavutil/crc.h15
-rw-r--r--src/libffmpeg/libavutil/intfloat_readwrite.h19
-rw-r--r--src/libffmpeg/libavutil/md5.h14
-rw-r--r--src/libffmpeg/libavutil/x86_cpu.h38
-rw-r--r--src/libffmpeg/video_decoder.c8
-rw-r--r--src/libffmpeg/xine_decoder.h8
-rw-r--r--src/post/deinterlace/plugins/Makefile.am2
-rw-r--r--src/post/planar/Makefile.am9
595 files changed, 149002 insertions, 7669 deletions
diff --git a/Makefile.am b/Makefile.am
index f4a8156eb..7e9036b8a 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -6,7 +6,7 @@ AUTOMAKE_OPTIONS = 1.3
ACLOCAL_AMFLAGS = -I m4
-SUBDIRS = doc m4 po misc include lib src win32
+SUBDIRS = doc m4 po misc include lib src win32 contrib
DEBFILES = debian/README.Debian debian/changelog debian/control \
debian/copyright debian/rules debian/compat \
diff --git a/Makefile.in b/Makefile.in
new file mode 100644
index 000000000..9cf5fcf8c
--- /dev/null
+++ b/Makefile.in
@@ -0,0 +1,1069 @@
+# Makefile.in generated by automake 1.9.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005 Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+###
+
+srcdir = @srcdir@
+top_srcdir = @top_srcdir@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+top_builddir = .
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+INSTALL = @INSTALL@
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = README $(am__configure_deps) $(noinst_HEADERS) \
+ $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+ $(srcdir)/config.h.in $(top_srcdir)/configure \
+ $(top_srcdir)/contrib/Makefile.in \
+ $(top_srcdir)/contrib/ffmpeg/makefile.xine.in \
+ $(top_srcdir)/src/libffmpeg/libavcodec/Makefile.in \
+ $(top_srcdir)/src/libffmpeg/libavcodec/alpha/Makefile.in \
+ $(top_srcdir)/src/libffmpeg/libavcodec/armv4l/Makefile.in \
+ $(top_srcdir)/src/libffmpeg/libavcodec/i386/Makefile.in \
+ $(top_srcdir)/src/libffmpeg/libavcodec/libpostproc/Makefile.in \
+ $(top_srcdir)/src/libffmpeg/libavcodec/mlib/Makefile.in \
+ $(top_srcdir)/src/libffmpeg/libavcodec/ppc/Makefile.in \
+ $(top_srcdir)/src/libffmpeg/libavcodec/sparc/Makefile.in \
+ $(top_srcdir)/src/libffmpeg/libavutil/Makefile.in ABOUT-NLS \
+ AUTHORS COPYING ChangeLog INSTALL NEWS TODO compile \
+ config.guess config.rpath config.sub depcomp install-sh \
+ ltmain.sh missing mkinstalldirs
+subdir = .
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/m4/_xine.m4 $(top_srcdir)/m4/aa.m4 \
+ $(top_srcdir)/m4/arts.m4 $(top_srcdir)/m4/as.m4 \
+ $(top_srcdir)/m4/attributes.m4 $(top_srcdir)/m4/directx.m4 \
+ $(top_srcdir)/m4/dl.m4 $(top_srcdir)/m4/dvdnav.m4 \
+ $(top_srcdir)/m4/gettext.m4 $(top_srcdir)/m4/iconv.m4 \
+ $(top_srcdir)/m4/irixal.m4 $(top_srcdir)/m4/isc-posix.m4 \
+ $(top_srcdir)/m4/lib-ld.m4 $(top_srcdir)/m4/lib-link.m4 \
+ $(top_srcdir)/m4/lib-prefix.m4 $(top_srcdir)/m4/libFLAC.m4 \
+ $(top_srcdir)/m4/libfame.m4 $(top_srcdir)/m4/libtool15.m4 \
+ $(top_srcdir)/m4/nls.m4 $(top_srcdir)/m4/opengl.m4 \
+ $(top_srcdir)/m4/optimizations.m4 $(top_srcdir)/m4/pkg.m4 \
+ $(top_srcdir)/m4/po.m4 $(top_srcdir)/m4/progtest.m4 \
+ $(top_srcdir)/m4/xv.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ $(ACLOCAL_M4)
+am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
+ configure.lineno configure.status.lineno
+mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
+CONFIG_HEADER = config.h
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
+ html-recursive info-recursive install-data-recursive \
+ install-exec-recursive install-info-recursive \
+ install-recursive installcheck-recursive installdirs-recursive \
+ pdf-recursive ps-recursive uninstall-info-recursive \
+ uninstall-recursive
+HEADERS = $(noinst_HEADERS)
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+am__remove_distdir = \
+ { test ! -d $(distdir) \
+ || { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \
+ && rm -fr $(distdir); }; }
+DIST_ARCHIVES = $(distdir).tar.gz
+GZIP_ENV = --best
+distuninstallcheck_listfiles = find . -type f -print
+distcleancheck_listfiles = find . -type f -print
+A52_FALSE = @A52_FALSE@
+A52_TRUE = @A52_TRUE@
+AAINFO = @AAINFO@
+AALIB_CFLAGS = @AALIB_CFLAGS@
+AALIB_CONFIG = @AALIB_CONFIG@
+AALIB_LIBS = @AALIB_LIBS@
+ACLOCAL = @ACLOCAL@
+ACLOCAL_DIR = @ACLOCAL_DIR@
+ALSA_CFLAGS = @ALSA_CFLAGS@
+ALSA_LIBS = @ALSA_LIBS@
+AMDEP_FALSE = @AMDEP_FALSE@
+AMDEP_TRUE = @AMDEP_TRUE@
+AMTAR = @AMTAR@
+AR = @AR@
+ARTS_CFLAGS = @ARTS_CFLAGS@
+ARTS_CONFIG = @ARTS_CONFIG@
+ARTS_LIBS = @ARTS_LIBS@
+AS = @AS@
+ASFLAGS = @ASFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BUILD_ASF_FALSE = @BUILD_ASF_FALSE@
+BUILD_ASF_TRUE = @BUILD_ASF_TRUE@
+BUILD_DHA_KMOD_FALSE = @BUILD_DHA_KMOD_FALSE@
+BUILD_DHA_KMOD_TRUE = @BUILD_DHA_KMOD_TRUE@
+BUILD_DMX_IMAGE_FALSE = @BUILD_DMX_IMAGE_FALSE@
+BUILD_DMX_IMAGE_TRUE = @BUILD_DMX_IMAGE_TRUE@
+BUILD_FAAD_FALSE = @BUILD_FAAD_FALSE@
+BUILD_FAAD_TRUE = @BUILD_FAAD_TRUE@
+CACA_CFLAGS = @CACA_CFLAGS@
+CACA_LIBS = @CACA_LIBS@
+CC = @CC@
+CCAS = @CCAS@
+CCASCOMPILE = @CCASCOMPILE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEBUG_CFLAGS = @DEBUG_CFLAGS@
+DEFS = @DEFS@
+DEPCOMP = @DEPCOMP@
+DEPDIR = @DEPDIR@
+DEPMOD = @DEPMOD@
+DIRECTFB_CFLAGS = @DIRECTFB_CFLAGS@
+DIRECTFB_LIBS = @DIRECTFB_LIBS@
+DIRECTX_AUDIO_LIBS = @DIRECTX_AUDIO_LIBS@
+DIRECTX_CPPFLAGS = @DIRECTX_CPPFLAGS@
+DIRECTX_VIDEO_LIBS = @DIRECTX_VIDEO_LIBS@
+DLLTOOL = @DLLTOOL@
+DTS_FALSE = @DTS_FALSE@
+DTS_TRUE = @DTS_TRUE@
+DVDNAV_CFLAGS = @DVDNAV_CFLAGS@
+DVDNAV_CONFIG = @DVDNAV_CONFIG@
+DVDNAV_LIBS = @DVDNAV_LIBS@
+DYNAMIC_LD_LIBS = @DYNAMIC_LD_LIBS@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ENABLE_VCD_FALSE = @ENABLE_VCD_FALSE@
+ENABLE_VCD_TRUE = @ENABLE_VCD_TRUE@
+ENABLE_VIS_FALSE = @ENABLE_VIS_FALSE@
+ENABLE_VIS_TRUE = @ENABLE_VIS_TRUE@
+ESD_CFLAGS = @ESD_CFLAGS@
+ESD_LIBS = @ESD_LIBS@
+EXEEXT = @EXEEXT@
+EXTERNAL_A52DEC_FALSE = @EXTERNAL_A52DEC_FALSE@
+EXTERNAL_A52DEC_TRUE = @EXTERNAL_A52DEC_TRUE@
+EXTERNAL_LIBDTS_FALSE = @EXTERNAL_LIBDTS_FALSE@
+EXTERNAL_LIBDTS_TRUE = @EXTERNAL_LIBDTS_TRUE@
+EXTERNAL_LIBMAD_FALSE = @EXTERNAL_LIBMAD_FALSE@
+EXTERNAL_LIBMAD_TRUE = @EXTERNAL_LIBMAD_TRUE@
+EXTRA_X_CFLAGS = @EXTRA_X_CFLAGS@
+EXTRA_X_LIBS = @EXTRA_X_LIBS@
+FFMPEG_CFLAGS = @FFMPEG_CFLAGS@
+FFMPEG_LIBS = @FFMPEG_LIBS@
+FFMPEG_POSTPROC_CFLAGS = @FFMPEG_POSTPROC_CFLAGS@
+FFMPEG_POSTPROC_LIBS = @FFMPEG_POSTPROC_LIBS@
+FIG2DEV = @FIG2DEV@
+FONTCONFIG_CFLAGS = @FONTCONFIG_CFLAGS@
+FONTCONFIG_LIBS = @FONTCONFIG_LIBS@
+FT2_CFLAGS = @FT2_CFLAGS@
+FT2_LIBS = @FT2_LIBS@
+FUSIONSOUND_CFLAGS = @FUSIONSOUND_CFLAGS@
+FUSIONSOUND_LIBS = @FUSIONSOUND_LIBS@
+GDK_PIXBUF_CFLAGS = @GDK_PIXBUF_CFLAGS@
+GDK_PIXBUF_LIBS = @GDK_PIXBUF_LIBS@
+GENERATED_INTTYPES_H_FALSE = @GENERATED_INTTYPES_H_FALSE@
+GENERATED_INTTYPES_H_TRUE = @GENERATED_INTTYPES_H_TRUE@
+GLU_LIBS = @GLU_LIBS@
+GMSGFMT = @GMSGFMT@
+GNOME_VFS_CFLAGS = @GNOME_VFS_CFLAGS@
+GNOME_VFS_LIBS = @GNOME_VFS_LIBS@
+GOOM_LIBS = @GOOM_LIBS@
+GREP = @GREP@
+HAVE_AA_FALSE = @HAVE_AA_FALSE@
+HAVE_AA_TRUE = @HAVE_AA_TRUE@
+HAVE_ALSA_FALSE = @HAVE_ALSA_FALSE@
+HAVE_ALSA_TRUE = @HAVE_ALSA_TRUE@
+HAVE_ARMV4L_FALSE = @HAVE_ARMV4L_FALSE@
+HAVE_ARMV4L_TRUE = @HAVE_ARMV4L_TRUE@
+HAVE_ARTS_FALSE = @HAVE_ARTS_FALSE@
+HAVE_ARTS_TRUE = @HAVE_ARTS_TRUE@
+HAVE_BSDI_CDROM = @HAVE_BSDI_CDROM@
+HAVE_CACA_FALSE = @HAVE_CACA_FALSE@
+HAVE_CACA_TRUE = @HAVE_CACA_TRUE@
+HAVE_CDROM_IOCTLS_FALSE = @HAVE_CDROM_IOCTLS_FALSE@
+HAVE_CDROM_IOCTLS_TRUE = @HAVE_CDROM_IOCTLS_TRUE@
+HAVE_COREAUDIO_FALSE = @HAVE_COREAUDIO_FALSE@
+HAVE_COREAUDIO_TRUE = @HAVE_COREAUDIO_TRUE@
+HAVE_DARWIN_CDROM = @HAVE_DARWIN_CDROM@
+HAVE_DIRECTFB_FALSE = @HAVE_DIRECTFB_FALSE@
+HAVE_DIRECTFB_TRUE = @HAVE_DIRECTFB_TRUE@
+HAVE_DIRECTX_FALSE = @HAVE_DIRECTX_FALSE@
+HAVE_DIRECTX_TRUE = @HAVE_DIRECTX_TRUE@
+HAVE_DVDNAV_FALSE = @HAVE_DVDNAV_FALSE@
+HAVE_DVDNAV_TRUE = @HAVE_DVDNAV_TRUE@
+HAVE_DXR3_FALSE = @HAVE_DXR3_FALSE@
+HAVE_DXR3_TRUE = @HAVE_DXR3_TRUE@
+HAVE_ESD_FALSE = @HAVE_ESD_FALSE@
+HAVE_ESD_TRUE = @HAVE_ESD_TRUE@
+HAVE_FB_FALSE = @HAVE_FB_FALSE@
+HAVE_FB_TRUE = @HAVE_FB_TRUE@
+HAVE_FFMMX_FALSE = @HAVE_FFMMX_FALSE@
+HAVE_FFMMX_TRUE = @HAVE_FFMMX_TRUE@
+HAVE_FFMPEG_FALSE = @HAVE_FFMPEG_FALSE@
+HAVE_FFMPEG_TRUE = @HAVE_FFMPEG_TRUE@
+HAVE_FIG2DEV_FALSE = @HAVE_FIG2DEV_FALSE@
+HAVE_FIG2DEV_TRUE = @HAVE_FIG2DEV_TRUE@
+HAVE_FLAC_FALSE = @HAVE_FLAC_FALSE@
+HAVE_FLAC_TRUE = @HAVE_FLAC_TRUE@
+HAVE_FREEBSD_CDROM = @HAVE_FREEBSD_CDROM@
+HAVE_FUSIONSOUND_FALSE = @HAVE_FUSIONSOUND_FALSE@
+HAVE_FUSIONSOUND_TRUE = @HAVE_FUSIONSOUND_TRUE@
+HAVE_GDK_PIXBUF_FALSE = @HAVE_GDK_PIXBUF_FALSE@
+HAVE_GDK_PIXBUF_TRUE = @HAVE_GDK_PIXBUF_TRUE@
+HAVE_GNOME_VFS_FALSE = @HAVE_GNOME_VFS_FALSE@
+HAVE_GNOME_VFS_TRUE = @HAVE_GNOME_VFS_TRUE@
+HAVE_IRIXAL_FALSE = @HAVE_IRIXAL_FALSE@
+HAVE_IRIXAL_TRUE = @HAVE_IRIXAL_TRUE@
+HAVE_JACK = @HAVE_JACK@
+HAVE_JACK_FALSE = @HAVE_JACK_FALSE@
+HAVE_JACK_TRUE = @HAVE_JACK_TRUE@
+HAVE_LIBFAME_FALSE = @HAVE_LIBFAME_FALSE@
+HAVE_LIBFAME_TRUE = @HAVE_LIBFAME_TRUE@
+HAVE_LIBMNG_FALSE = @HAVE_LIBMNG_FALSE@
+HAVE_LIBMNG_TRUE = @HAVE_LIBMNG_TRUE@
+HAVE_LIBRTE_FALSE = @HAVE_LIBRTE_FALSE@
+HAVE_LIBRTE_TRUE = @HAVE_LIBRTE_TRUE@
+HAVE_LIBSMBCLIENT_FALSE = @HAVE_LIBSMBCLIENT_FALSE@
+HAVE_LIBSMBCLIENT_TRUE = @HAVE_LIBSMBCLIENT_TRUE@
+HAVE_LINUX_CDROM = @HAVE_LINUX_CDROM@
+HAVE_LINUX_FALSE = @HAVE_LINUX_FALSE@
+HAVE_LINUX_TRUE = @HAVE_LINUX_TRUE@
+HAVE_MACOSX_VIDEO_FALSE = @HAVE_MACOSX_VIDEO_FALSE@
+HAVE_MACOSX_VIDEO_TRUE = @HAVE_MACOSX_VIDEO_TRUE@
+HAVE_MLIB_FALSE = @HAVE_MLIB_FALSE@
+HAVE_MLIB_TRUE = @HAVE_MLIB_TRUE@
+HAVE_OPENGL_FALSE = @HAVE_OPENGL_FALSE@
+HAVE_OPENGL_TRUE = @HAVE_OPENGL_TRUE@
+HAVE_OSS_FALSE = @HAVE_OSS_FALSE@
+HAVE_OSS_TRUE = @HAVE_OSS_TRUE@
+HAVE_PULSEAUDIO_FALSE = @HAVE_PULSEAUDIO_FALSE@
+HAVE_PULSEAUDIO_TRUE = @HAVE_PULSEAUDIO_TRUE@
+HAVE_SDL_FALSE = @HAVE_SDL_FALSE@
+HAVE_SDL_TRUE = @HAVE_SDL_TRUE@
+HAVE_SGMLTOOLS_FALSE = @HAVE_SGMLTOOLS_FALSE@
+HAVE_SGMLTOOLS_TRUE = @HAVE_SGMLTOOLS_TRUE@
+HAVE_SOLARIS_CDROM = @HAVE_SOLARIS_CDROM@
+HAVE_SPEEX_FALSE = @HAVE_SPEEX_FALSE@
+HAVE_SPEEX_TRUE = @HAVE_SPEEX_TRUE@
+HAVE_STK_FALSE = @HAVE_STK_FALSE@
+HAVE_STK_TRUE = @HAVE_STK_TRUE@
+HAVE_SUNAUDIO_FALSE = @HAVE_SUNAUDIO_FALSE@
+HAVE_SUNAUDIO_TRUE = @HAVE_SUNAUDIO_TRUE@
+HAVE_SUNDGA_FALSE = @HAVE_SUNDGA_FALSE@
+HAVE_SUNDGA_TRUE = @HAVE_SUNDGA_TRUE@
+HAVE_SUNFB_FALSE = @HAVE_SUNFB_FALSE@
+HAVE_SUNFB_TRUE = @HAVE_SUNFB_TRUE@
+HAVE_SYNCFB_FALSE = @HAVE_SYNCFB_FALSE@
+HAVE_SYNCFB_TRUE = @HAVE_SYNCFB_TRUE@
+HAVE_THEORA_FALSE = @HAVE_THEORA_FALSE@
+HAVE_THEORA_TRUE = @HAVE_THEORA_TRUE@
+HAVE_V4L_FALSE = @HAVE_V4L_FALSE@
+HAVE_V4L_TRUE = @HAVE_V4L_TRUE@
+HAVE_VCDNAV_FALSE = @HAVE_VCDNAV_FALSE@
+HAVE_VCDNAV_TRUE = @HAVE_VCDNAV_TRUE@
+HAVE_VIDIX_FALSE = @HAVE_VIDIX_FALSE@
+HAVE_VIDIX_TRUE = @HAVE_VIDIX_TRUE@
+HAVE_VLDXVMC_FALSE = @HAVE_VLDXVMC_FALSE@
+HAVE_VLDXVMC_TRUE = @HAVE_VLDXVMC_TRUE@
+HAVE_VORBIS_FALSE = @HAVE_VORBIS_FALSE@
+HAVE_VORBIS_TRUE = @HAVE_VORBIS_TRUE@
+HAVE_W32DLL_FALSE = @HAVE_W32DLL_FALSE@
+HAVE_W32DLL_TRUE = @HAVE_W32DLL_TRUE@
+HAVE_WAND_FALSE = @HAVE_WAND_FALSE@
+HAVE_WAND_TRUE = @HAVE_WAND_TRUE@
+HAVE_WIN32_CDROM = @HAVE_WIN32_CDROM@
+HAVE_X11_FALSE = @HAVE_X11_FALSE@
+HAVE_X11_TRUE = @HAVE_X11_TRUE@
+HAVE_XVMC_FALSE = @HAVE_XVMC_FALSE@
+HAVE_XVMC_TRUE = @HAVE_XVMC_TRUE@
+HAVE_XV_FALSE = @HAVE_XV_FALSE@
+HAVE_XV_TRUE = @HAVE_XV_TRUE@
+HAVE_XXMC_FALSE = @HAVE_XXMC_FALSE@
+HAVE_XXMC_TRUE = @HAVE_XXMC_TRUE@
+HOST_OS_DARWIN_FALSE = @HOST_OS_DARWIN_FALSE@
+HOST_OS_DARWIN_TRUE = @HOST_OS_DARWIN_TRUE@
+IMPURE_TEXT_LDFLAGS = @IMPURE_TEXT_LDFLAGS@
+INCLUDES = @INCLUDES@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_M4_FALSE = @INSTALL_M4_FALSE@
+INSTALL_M4_TRUE = @INSTALL_M4_TRUE@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+INTLLIBS = @INTLLIBS@
+INTL_MACOSX_LIBS = @INTL_MACOSX_LIBS@
+IRIXAL_CFLAGS = @IRIXAL_CFLAGS@
+IRIXAL_LIBS = @IRIXAL_LIBS@
+IRIXAL_STATIC_LIB = @IRIXAL_STATIC_LIB@
+JACK_CFLAGS = @JACK_CFLAGS@
+JACK_LIBS = @JACK_LIBS@
+KSTAT_LIBS = @KSTAT_LIBS@
+LDFLAGS = @LDFLAGS@
+LIBCDIO_CFLAGS = @LIBCDIO_CFLAGS@
+LIBCDIO_LIBS = @LIBCDIO_LIBS@
+LIBDTS_CFLAGS = @LIBDTS_CFLAGS@
+LIBDTS_LIBS = @LIBDTS_LIBS@
+LIBFAME_CFLAGS = @LIBFAME_CFLAGS@
+LIBFAME_CONFIG = @LIBFAME_CONFIG@
+LIBFAME_LIBS = @LIBFAME_LIBS@
+LIBFFMPEG_CPPFLAGS = @LIBFFMPEG_CPPFLAGS@
+LIBFLAC_CFLAGS = @LIBFLAC_CFLAGS@
+LIBFLAC_LIBS = @LIBFLAC_LIBS@
+LIBICONV = @LIBICONV@
+LIBINTL = @LIBINTL@
+LIBISO9660_LIBS = @LIBISO9660_LIBS@
+LIBMAD_CFLAGS = @LIBMAD_CFLAGS@
+LIBMAD_LIBS = @LIBMAD_LIBS@
+LIBMODPLUG_CFLAGS = @LIBMODPLUG_CFLAGS@
+LIBMODPLUG_LIBS = @LIBMODPLUG_LIBS@
+LIBMPEG2_CFLAGS = @LIBMPEG2_CFLAGS@
+LIBNAME = @LIBNAME@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBSMBCLIENT_LIBS = @LIBSMBCLIENT_LIBS@
+LIBSTK_CFLAGS = @LIBSTK_CFLAGS@
+LIBSTK_LIBS = @LIBSTK_LIBS@
+LIBTOOL = @LIBTOOL@
+LIBTOOL_DEPS = @LIBTOOL_DEPS@
+LIBVCDINFO_LIBS = @LIBVCDINFO_LIBS@
+LIBVCD_CFLAGS = @LIBVCD_CFLAGS@
+LIBVCD_LIBS = @LIBVCD_LIBS@
+LIBVCD_SYSDEP = @LIBVCD_SYSDEP@
+LINUX_CDROM_TIMEOUT = @LINUX_CDROM_TIMEOUT@
+LINUX_INCLUDE = @LINUX_INCLUDE@
+LN_S = @LN_S@
+LTLIBICONV = @LTLIBICONV@
+LTLIBINTL = @LTLIBINTL@
+LTLIBOBJS = @LTLIBOBJS@
+MAD_FALSE = @MAD_FALSE@
+MAD_TRUE = @MAD_TRUE@
+MAKEINFO = @MAKEINFO@
+MKINSTALLDIRS = @MKINSTALLDIRS@
+MKNOD = @MKNOD@
+MLIB_CFLAGS = @MLIB_CFLAGS@
+MLIB_LIBS = @MLIB_LIBS@
+MNG_LIBS = @MNG_LIBS@
+MSGFMT = @MSGFMT@
+MSGMERGE = @MSGMERGE@
+NET_LIBS = @NET_LIBS@
+OBJC = @OBJC@
+OBJCDEPMODE = @OBJCDEPMODE@
+OBJCFLAGS = @OBJCFLAGS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OPENGL_CFLAGS = @OPENGL_CFLAGS@
+OPENGL_LIBS = @OPENGL_LIBS@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PASS1_CFLAGS = @PASS1_CFLAGS@
+PASS2_CFLAGS = @PASS2_CFLAGS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+POSUB = @POSUB@
+PPC_ARCH_FALSE = @PPC_ARCH_FALSE@
+PPC_ARCH_TRUE = @PPC_ARCH_TRUE@
+PULSEAUDIO_CFLAGS = @PULSEAUDIO_CFLAGS@
+PULSEAUDIO_LIBS = @PULSEAUDIO_LIBS@
+RANLIB = @RANLIB@
+RT_LIBS = @RT_LIBS@
+SDL_CFLAGS = @SDL_CFLAGS@
+SDL_LIBS = @SDL_LIBS@
+SET_MAKE = @SET_MAKE@
+SGMLTOOLS = @SGMLTOOLS@
+SHELL = @SHELL@
+SPEC_VERSION = @SPEC_VERSION@
+SPEEX_CFLAGS = @SPEEX_CFLAGS@
+SPEEX_LIBS = @SPEEX_LIBS@
+STATIC = @STATIC@
+STRIP = @STRIP@
+SUNDGA_CFLAGS = @SUNDGA_CFLAGS@
+SUNDGA_LIBS = @SUNDGA_LIBS@
+TAR_NAME = @TAR_NAME@
+THEORA_CFLAGS = @THEORA_CFLAGS@
+THEORA_LIBS = @THEORA_LIBS@
+THREAD_CPPFLAGS = @THREAD_CPPFLAGS@
+THREAD_LIBS = @THREAD_LIBS@
+USE_NLS = @USE_NLS@
+VERSION = @VERSION@
+VISIBILITY_FLAG = @VISIBILITY_FLAG@
+VORBIS_CFLAGS = @VORBIS_CFLAGS@
+VORBIS_LIBS = @VORBIS_LIBS@
+W32DLL_DEP = @W32DLL_DEP@
+W32_NO_OPTIMIZE = @W32_NO_OPTIMIZE@
+WAND_CFLAGS = @WAND_CFLAGS@
+WAND_LIBS = @WAND_LIBS@
+WIN32_CPPFLAGS = @WIN32_CPPFLAGS@
+WIN32_FALSE = @WIN32_FALSE@
+WIN32_TRUE = @WIN32_TRUE@
+X11_CFLAGS = @X11_CFLAGS@
+X11_LIBS = @X11_LIBS@
+XGETTEXT = @XGETTEXT@
+XINERAMA_CFLAGS = @XINERAMA_CFLAGS@
+XINERAMA_LIBS = @XINERAMA_LIBS@
+XINE_ACFLAGS = @XINE_ACFLAGS@
+XINE_BIN_AGE = @XINE_BIN_AGE@
+XINE_BUILD_CC = @XINE_BUILD_CC@
+XINE_BUILD_DATE = @XINE_BUILD_DATE@
+XINE_BUILD_OS = @XINE_BUILD_OS@
+XINE_CONFIG_PREFIX = @XINE_CONFIG_PREFIX@
+XINE_DATADIR = @XINE_DATADIR@
+XINE_FONTDIR = @XINE_FONTDIR@
+XINE_FONTPATH = @XINE_FONTPATH@
+XINE_IFACE_AGE = @XINE_IFACE_AGE@
+XINE_LOCALEDIR = @XINE_LOCALEDIR@
+XINE_LOCALEPATH = @XINE_LOCALEPATH@
+XINE_LT_AGE = @XINE_LT_AGE@
+XINE_LT_CURRENT = @XINE_LT_CURRENT@
+XINE_LT_REVISION = @XINE_LT_REVISION@
+XINE_MAJOR = @XINE_MAJOR@
+XINE_MINOR = @XINE_MINOR@
+XINE_PLUGINDIR = @XINE_PLUGINDIR@
+XINE_PLUGINPATH = @XINE_PLUGINPATH@
+XINE_SCRIPTPATH = @XINE_SCRIPTPATH@
+XINE_SUB = @XINE_SUB@
+XMKMF = @XMKMF@
+XVMC_LIBS = @XVMC_LIBS@
+XV_CFLAGS = @XV_CFLAGS@
+XV_LIBS = @XV_LIBS@
+XXMC_LIBS = @XXMC_LIBS@
+X_CFLAGS = @X_CFLAGS@
+X_EXTRA_LIBS = @X_EXTRA_LIBS@
+X_LIBS = @X_LIBS@
+X_PRE_LIBS = @X_PRE_LIBS@
+ZLIB_CPPFLAGS = @ZLIB_CPPFLAGS@
+ZLIB_LIBS = @ZLIB_LIBS@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
+am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
+am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
+am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
+am__fastdepOBJC_FALSE = @am__fastdepOBJC_FALSE@
+am__fastdepOBJC_TRUE = @am__fastdepOBJC_TRUE@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+w32_path = @w32_path@
+AUTOMAKE_OPTIONS = 1.3
+ACLOCAL_AMFLAGS = -I m4
+SUBDIRS = doc m4 po misc include lib src win32 contrib
+DEBFILES = debian/README.Debian debian/changelog debian/control \
+ debian/copyright debian/rules debian/compat \
+ debian/shlibdeps.sh debian/libxine-dev.install debian/libxine1.install
+
+EXTRA_DIST = config.rpath autogen.sh \
+ automake.diff \
+ ChangeLog \
+ configure \
+ config.guess \
+ config.sub \
+ COPYING \
+ INSTALL \
+ install-sh \
+ libtool \
+ ltmain.sh \
+ missing \
+ NEWS \
+ README \
+ TODO \
+ depcomp \
+ CREDITS \
+ @DEPCOMP@
+
+noinst_HEADERS = config.h
+CONFIG_CLEAN_FILES = libtool
+all: config.h
+ $(MAKE) $(AM_MAKEFLAGS) all-recursive
+
+.SUFFIXES:
+am--refresh:
+ @:
+$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps)
+ @for dep in $?; do \
+ case '$(am__configure_deps)' in \
+ *$$dep*) \
+ echo ' cd $(srcdir) && $(AUTOMAKE) --gnu '; \
+ cd $(srcdir) && $(AUTOMAKE) --gnu \
+ && exit 0; \
+ exit 1;; \
+ esac; \
+ done; \
+ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \
+ cd $(top_srcdir) && \
+ $(AUTOMAKE) --gnu Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+ @case '$?' in \
+ *config.status*) \
+ echo ' $(SHELL) ./config.status'; \
+ $(SHELL) ./config.status;; \
+ *) \
+ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \
+ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \
+ esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+ $(SHELL) ./config.status --recheck
+
+$(top_srcdir)/configure: $(am__configure_deps)
+ cd $(srcdir) && $(AUTOCONF)
+$(ACLOCAL_M4): $(am__aclocal_m4_deps)
+ cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
+
+config.h: stamp-h1
+ @if test ! -f $@; then \
+ rm -f stamp-h1; \
+ $(MAKE) stamp-h1; \
+ else :; fi
+
+stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
+ @rm -f stamp-h1
+ cd $(top_builddir) && $(SHELL) ./config.status config.h
+$(srcdir)/config.h.in: $(am__configure_deps)
+ cd $(top_srcdir) && $(AUTOHEADER)
+ rm -f stamp-h1
+ touch $@
+
+distclean-hdr:
+ -rm -f config.h stamp-h1
+contrib/Makefile: $(top_builddir)/config.status $(top_srcdir)/contrib/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+contrib/ffmpeg/makefile.xine: $(top_builddir)/config.status $(top_srcdir)/contrib/ffmpeg/makefile.xine.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+src/libffmpeg/libavcodec/Makefile: $(top_builddir)/config.status $(top_srcdir)/src/libffmpeg/libavcodec/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+src/libffmpeg/libavcodec/armv4l/Makefile: $(top_builddir)/config.status $(top_srcdir)/src/libffmpeg/libavcodec/armv4l/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+src/libffmpeg/libavcodec/i386/Makefile: $(top_builddir)/config.status $(top_srcdir)/src/libffmpeg/libavcodec/i386/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+src/libffmpeg/libavcodec/mlib/Makefile: $(top_builddir)/config.status $(top_srcdir)/src/libffmpeg/libavcodec/mlib/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+src/libffmpeg/libavcodec/alpha/Makefile: $(top_builddir)/config.status $(top_srcdir)/src/libffmpeg/libavcodec/alpha/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+src/libffmpeg/libavcodec/ppc/Makefile: $(top_builddir)/config.status $(top_srcdir)/src/libffmpeg/libavcodec/ppc/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+src/libffmpeg/libavcodec/sparc/Makefile: $(top_builddir)/config.status $(top_srcdir)/src/libffmpeg/libavcodec/sparc/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+src/libffmpeg/libavcodec/libpostproc/Makefile: $(top_builddir)/config.status $(top_srcdir)/src/libffmpeg/libavcodec/libpostproc/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+src/libffmpeg/libavutil/Makefile: $(top_builddir)/config.status $(top_srcdir)/src/libffmpeg/libavutil/Makefile.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+
+mostlyclean-libtool:
+ -rm -f *.lo
+
+clean-libtool:
+ -rm -rf .libs _libs
+
+distclean-libtool:
+ -rm -f libtool
+uninstall-info-am:
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+# (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+$(RECURSIVE_TARGETS):
+ @failcom='exit 1'; \
+ for f in x $$MAKEFLAGS; do \
+ case $$f in \
+ *=* | --[!k]*);; \
+ *k*) failcom='fail=yes';; \
+ esac; \
+ done; \
+ dot_seen=no; \
+ target=`echo $@ | sed s/-recursive//`; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ dot_seen=yes; \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done; \
+ if test "$$dot_seen" = "no"; then \
+ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+ fi; test -z "$$fail"
+
+mostlyclean-recursive clean-recursive distclean-recursive \
+maintainer-clean-recursive:
+ @failcom='exit 1'; \
+ for f in x $$MAKEFLAGS; do \
+ case $$f in \
+ *=* | --[!k]*);; \
+ *k*) failcom='fail=yes';; \
+ esac; \
+ done; \
+ dot_seen=no; \
+ case "$@" in \
+ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+ *) list='$(SUBDIRS)' ;; \
+ esac; \
+ rev=''; for subdir in $$list; do \
+ if test "$$subdir" = "."; then :; else \
+ rev="$$subdir $$rev"; \
+ fi; \
+ done; \
+ rev="$$rev ."; \
+ target=`echo $@ | sed s/-recursive//`; \
+ for subdir in $$rev; do \
+ echo "Making $$target in $$subdir"; \
+ if test "$$subdir" = "."; then \
+ local_target="$$target-am"; \
+ else \
+ local_target="$$target"; \
+ fi; \
+ (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+ || eval $$failcom; \
+ done && test -z "$$fail"
+tags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
+ done
+ctags-recursive:
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
+ done
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ mkid -fID $$unique
+tags: TAGS
+
+TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+ include_option=--etags-include; \
+ empty_fix=.; \
+ else \
+ include_option=--include; \
+ empty_fix=; \
+ fi; \
+ list='$(SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test ! -f $$subdir/TAGS || \
+ tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
+ fi; \
+ done; \
+ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+ test -n "$$unique" || unique=$$empty_fix; \
+ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+ $$tags $$unique; \
+ fi
+ctags: CTAGS
+CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
+ $(TAGS_FILES) $(LISP)
+ tags=; \
+ here=`pwd`; \
+ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
+ unique=`for i in $$list; do \
+ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+ done | \
+ $(AWK) ' { files[$$0] = 1; } \
+ END { for (i in files) print i; }'`; \
+ test -z "$(CTAGS_ARGS)$$tags$$unique" \
+ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+ $$tags $$unique
+
+GTAGS:
+ here=`$(am__cd) $(top_builddir) && pwd` \
+ && cd $(top_srcdir) \
+ && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+ -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+ $(am__remove_distdir)
+ mkdir $(distdir)
+ $(mkdir_p) $(distdir)/contrib $(distdir)/contrib/ffmpeg $(distdir)/include $(distdir)/m4 $(distdir)/misc $(distdir)/po $(distdir)/src/libffmpeg/libavcodec $(distdir)/src/libffmpeg/libavcodec/alpha $(distdir)/src/libffmpeg/libavcodec/armv4l $(distdir)/src/libffmpeg/libavcodec/i386 $(distdir)/src/libffmpeg/libavcodec/libpostproc $(distdir)/src/libffmpeg/libavcodec/mlib $(distdir)/src/libffmpeg/libavcodec/ppc $(distdir)/src/libffmpeg/libavcodec/sparc $(distdir)/src/libffmpeg/libavutil
+ @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
+ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
+ list='$(DISTFILES)'; for file in $$list; do \
+ case $$file in \
+ $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
+ $(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
+ esac; \
+ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+ dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
+ if test "$$dir" != "$$file" && test "$$dir" != "."; then \
+ dir="/$$dir"; \
+ $(mkdir_p) "$(distdir)$$dir"; \
+ else \
+ dir=''; \
+ fi; \
+ if test -d $$d/$$file; then \
+ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+ cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+ fi; \
+ cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+ else \
+ test -f $(distdir)/$$file \
+ || cp -p $$d/$$file $(distdir)/$$file \
+ || exit 1; \
+ fi; \
+ done
+ list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+ if test "$$subdir" = .; then :; else \
+ test -d "$(distdir)/$$subdir" \
+ || $(mkdir_p) "$(distdir)/$$subdir" \
+ || exit 1; \
+ distdir=`$(am__cd) $(distdir) && pwd`; \
+ top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
+ (cd $$subdir && \
+ $(MAKE) $(AM_MAKEFLAGS) \
+ top_distdir="$$top_distdir" \
+ distdir="$$distdir/$$subdir" \
+ distdir) \
+ || exit 1; \
+ fi; \
+ done
+ -find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \
+ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \
+ ! -type d ! -perm -444 -exec $(SHELL) $(install_sh) -c -m a+r {} {} \; \
+ || chmod -R a+r $(distdir)
+dist-gzip: distdir
+ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+ $(am__remove_distdir)
+
+dist-bzip2: distdir
+ tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2
+ $(am__remove_distdir)
+
+dist-tarZ: distdir
+ tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
+ $(am__remove_distdir)
+
+dist-shar: distdir
+ shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
+ $(am__remove_distdir)
+
+dist-zip: distdir
+ -rm -f $(distdir).zip
+ zip -rq $(distdir).zip $(distdir)
+ $(am__remove_distdir)
+
+dist dist-all: distdir
+ tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+ $(am__remove_distdir)
+
+# This target untars the dist file and tries a VPATH configuration. Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+ case '$(DIST_ARCHIVES)' in \
+ *.tar.gz*) \
+ GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\
+ *.tar.bz2*) \
+ bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\
+ *.tar.Z*) \
+ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
+ *.shar.gz*) \
+ GZIP=$(GZIP_ENV) gunzip -c $(distdir).shar.gz | unshar ;;\
+ *.zip*) \
+ unzip $(distdir).zip ;;\
+ esac
+ chmod -R a-w $(distdir); chmod a+w $(distdir)
+ mkdir $(distdir)/_build
+ mkdir $(distdir)/_inst
+ chmod a-w $(distdir)
+ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
+ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
+ && cd $(distdir)/_build \
+ && ../configure --srcdir=.. --prefix="$$dc_install_base" \
+ $(DISTCHECK_CONFIGURE_FLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) \
+ && $(MAKE) $(AM_MAKEFLAGS) dvi \
+ && $(MAKE) $(AM_MAKEFLAGS) check \
+ && $(MAKE) $(AM_MAKEFLAGS) install \
+ && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+ && $(MAKE) $(AM_MAKEFLAGS) uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
+ distuninstallcheck \
+ && chmod -R a-w "$$dc_install_base" \
+ && ({ \
+ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
+ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
+ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
+ } || { rm -rf "$$dc_destdir"; exit 1; }) \
+ && rm -rf "$$dc_destdir" \
+ && $(MAKE) $(AM_MAKEFLAGS) dist \
+ && rm -rf $(DIST_ARCHIVES) \
+ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck
+ $(am__remove_distdir)
+ @(echo "$(distdir) archives ready for distribution: "; \
+ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
+ sed -e '1{h;s/./=/g;p;x;}' -e '$${p;x;}'
+distuninstallcheck:
+ @cd $(distuninstallcheck_dir) \
+ && test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \
+ || { echo "ERROR: files left after uninstall:" ; \
+ if test -n "$(DESTDIR)"; then \
+ echo " (check DESTDIR support)"; \
+ fi ; \
+ $(distuninstallcheck_listfiles) ; \
+ exit 1; } >&2
+distcleancheck: distclean
+ @if test '$(srcdir)' = . ; then \
+ echo "ERROR: distcleancheck can only run from a VPATH build" ; \
+ exit 1 ; \
+ fi
+ @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
+ || { echo "ERROR: files left in build directory after distclean:" ; \
+ $(distcleancheck_listfiles) ; \
+ exit 1; } >&2
+check-am: all-am
+check: check-recursive
+all-am: Makefile $(HEADERS) config.h
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+ @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+ `test -z '$(STRIP)' || \
+ echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+
+clean-generic:
+
+distclean-generic:
+ -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-hdr \
+ distclean-libtool distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+ @$(NORMAL_INSTALL)
+ $(MAKE) $(AM_MAKEFLAGS) install-data-hook
+
+install-exec-am:
+
+install-info: install-info-recursive
+
+install-man:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+ -rm -f $(am__CONFIG_DISTCLEAN_FILES)
+ -rm -rf $(top_srcdir)/autom4te.cache
+ -rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am: uninstall-info-am
+
+uninstall-info: uninstall-info-recursive
+
+.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am am--refresh check \
+ check-am clean clean-generic clean-libtool clean-recursive \
+ ctags ctags-recursive dist dist-all dist-bzip2 dist-gzip \
+ dist-shar dist-tarZ dist-zip distcheck distclean \
+ distclean-generic distclean-hdr distclean-libtool \
+ distclean-recursive distclean-tags distcleancheck distdir \
+ distuninstallcheck dvi dvi-am html html-am info info-am \
+ install install-am install-data install-data-am \
+ install-data-hook install-exec install-exec-am install-info \
+ install-info-am install-man install-strip installcheck \
+ installcheck-am installdirs installdirs-am maintainer-clean \
+ maintainer-clean-generic maintainer-clean-recursive \
+ mostlyclean mostlyclean-generic mostlyclean-libtool \
+ mostlyclean-recursive pdf pdf-am ps ps-am tags tags-recursive \
+ uninstall uninstall-am uninstall-info-am
+
+
+docs:
+ @cd doc && $(MAKE) $@
+
+distcheck-lax:
+ @$(MAKE) distcheck_lax=true distcheck
+
+pass1:
+ @$(MAKE) MULTIPASS_CFLAGS='$(PASS1_CFLAGS)'
+
+pass2:
+ @$(MAKE) MULTIPASS_CFLAGS='$(PASS2_CFLAGS)'
+
+debug:
+ @list='$(SUBDIRS)'; for subdir in $$list; do \
+ (cd $$subdir && $(MAKE) $@) || exit;\
+ done;
+
+debug-install: install-debug
+
+install-debug: debug
+ @list='$(SUBDIRS)'; for subdir in $$list; do \
+ (cd $$subdir && $(MAKE) $@) || exit; \
+ done;
+ $(MAKE) $(AM_MAKEFLAGS) install-data-hook
+
+prune-cache:
+ -rm -f config.cache
+
+release-check:
+ @./config.status misc/relchk.sh
+ @mv -f .cvsversion .cvsversion.tmp
+ @./autogen.sh noconfig && $(SHELL) misc/relchk.sh
+ @mv -f .cvsversion.tmp .cvsversion
+
+slackbuild:
+ @(cd misc && $(MAKE) SlackBuild) && \
+ PREFIX="/usr" misc/SlackBuild
+
+install-data-hook:
+ @rm -f $(DESTDIR)$(XINE_PLUGINDIR)/*.la
+ @rm -f $(DESTDIR)$(XINE_PLUGINDIR)/*/*.la
+ @if test -x "$(top_srcdir)/post-install.sh" ; then \
+ $(top_srcdir)/post-install.sh ; \
+ fi
+
+mostlyclean-generic:
+ -rm -f *~ \#* .*~ .\#*
+ -rm -f $(PACKAGE)_$(VERSION).tar.gz
+ -rm -f $(distdir).tar.gz $(PACKAGE).tgz package_descriptions
+ -rm -rf $(distdir)
+
+maintainer-clean-generic:
+ -@echo "This command is intended for maintainers to use;"
+ -@echo "it deletes files that may require special tools to rebuild."
+ -rm -f Makefile.in configure acinclude.m4 aclocal.m4
+ -rm -f config.h.in stamp-h.in ltconfig ltmain.sh
+ -rm -f config.guess config.sub install-sh missing mkinstalldirs
+ -rm -f depcomp config.log
+
+maintainer-clean-generic-hook:
+ rm -f config.status
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/configure.ac b/configure.ac
index f654e91d4..06ef24d84 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2404,6 +2404,8 @@ doc/man/Makefile
doc/man/en/Makefile
doc/hackersguide/Makefile
doc/faq/Makefile
+contrib/Makefile
+contrib/ffmpeg/makefile.xine
include/Makefile
include/xine.h
lib/Makefile
@@ -2438,15 +2440,6 @@ src/libdts/Makefile
src/libfaad/Makefile
src/libfaad/codebook/Makefile
src/libffmpeg/Makefile
-src/libffmpeg/libavcodec/Makefile
-src/libffmpeg/libavcodec/armv4l/Makefile
-src/libffmpeg/libavcodec/i386/Makefile
-src/libffmpeg/libavcodec/mlib/Makefile
-src/libffmpeg/libavcodec/alpha/Makefile
-src/libffmpeg/libavcodec/ppc/Makefile
-src/libffmpeg/libavcodec/sparc/Makefile
-src/libffmpeg/libavcodec/libpostproc/Makefile
-src/libffmpeg/libavutil/Makefile
src/libflac/Makefile
src/liblpcm/Makefile
src/libmad/Makefile
diff --git a/contrib/ffmpeg/COPYING b/contrib/ffmpeg/COPYING
new file mode 100644
index 000000000..1e0991447
--- /dev/null
+++ b/contrib/ffmpeg/COPYING
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/contrib/ffmpeg/CREDITS b/contrib/ffmpeg/CREDITS
new file mode 100644
index 000000000..f7f086a3c
--- /dev/null
+++ b/contrib/ffmpeg/CREDITS
@@ -0,0 +1,47 @@
+This file contains the name of the people who have contributed to
+FFmpeg. The names are sorted alphabetically by last name.
+
+Michel Bardiaux
+Fabrice Bellard
+Patrice Bensoussan
+Alex Beregszaszi
+BERO
+Mario Brito
+Ronald Bultje
+Maarten Daniels
+Reimar Doeffinger
+Tim Ferguson
+Brian Foley
+Arpad Gereoffy
+Philip Gladstone
+Vladimir Gneushev
+Roine Gustafsson
+David Hammerton
+Wolfgang Hesseler
+Falk Hueffner
+Steven Johnson
+Zdenek Kabelac
+Robin Kay
+Todd Kirby
+Nick Kurshev
+Benjamin Larsson
+Loïc Le Loarer
+Daniel Maas
+Mike Melanson
+Loren Merritt
+Jeff Muizelaar
+Michael Niedermayer
+François Revol
+Peter Ross
+Måns Rullgård
+Roman Shaposhnik
+Oded Shimon
+Dieter Shirley
+Konstantin Shishkov
+Juan J. Sierralta
+Ewald Snel
+Sascha Sommer
+Leon van Stuivenberg
+Roberto Togni
+Lionel Ulmer
+Reynaldo Verdejo
diff --git a/contrib/ffmpeg/Changelog b/contrib/ffmpeg/Changelog
new file mode 100644
index 000000000..5267b5321
--- /dev/null
+++ b/contrib/ffmpeg/Changelog
@@ -0,0 +1,389 @@
+version <next>
+- DV50 AKA DVCPRO50 encoder, decoder, muxer and demuxer
+- TechSmith Camtasia (TSCC) video decoder
+- IBM Ultimotion (ULTI) video decoder
+- Sierra Online audio file demuxer and decoder
+- Apple QuickDraw (qdrw) video decoder
+- Creative ADPCM audio decoder (16 bits as well as 8 bits schemes)
+- Electronic Arts Multimedia (WVE/UV2/etc.) file demuxer
+- Miro VideoXL (VIXL) video decoder
+- H.261 video encoder
+- QPEG video decoder
+- Nullsoft Video (NSV) file demuxer
+- Shorten audio decoder
+- LOCO video decoder
+- Apple Lossless Audio Codec (ALAC) decoder
+- Winnov WNV1 video decoder
+- Autodesk Animator Studio Codec (AASC) decoder
+- Indeo 2 video decoder
+- Fraps FPS1 video decoder
+- Snow video encoder/decoder
+- Sonic audio encoder/decoder
+- Vorbis audio encoder/decoder
+- Macromedia ADPCM decoder
+- Duck TrueMotion 2 video decoder
+- support for decoding FLX and DTA extensions in FLIC files
+- H.264 custom quantization matrices support
+- ffserver fixed, it should now be usable again
+- QDM2 audio decoder
+- Real Cooker audio decoder
+- TrueSpeech audio decoder
+- WMA2 audio decoder fixed, now all files should play correctly
+- RealAudio 14.4 and 28.8 decoders fixed
+- JPEG-LS encoder and decoder
+- CamStudio video decoder
+- build system improvements
+- tabs and trailing whitespace removed from the codebase
+- AIFF/AIFF-C audio format, encoding and decoding
+- ADTS AAC file reading and writing
+- Creative VOC file reading and writing
+- American Laser Games multimedia (*.mm) playback system
+- Zip Blocks Motion Video decoder
+- Improved Theora/VP3 decoder
+- True Audio (TTA) decoder
+- AVS demuxer and video decoder
+- Smacker demuxer and decoder
+- NuppelVideo/MythTV demuxer and RTjpeg decoder
+- KMVC decoder
+- MPEG-2 intra vlc support
+- MPEG-2 4:2:2 encoder
+- Flash Screen Video decoder
+- GXF demuxer
+- Chinese AVS decoder
+- GXF muxer
+- MXF demuxer
+- VC-1/WMV3/WMV9 video decoder
+- MacIntel support
+- AVISynth support
+- VMware video decoder
+- VP5 video decoder
+- VP6 video decoder
+- WavPack lossless audio decoder
+- Targa (.TGA) picture decoder
+- Delphine Software .cin demuxer/audio and video decoder
+- Tiertex .seq demuxer/video decoder
+- MTV demuxer
+- TIFF picture decoder
+- GIF picture decoder
+- Intel Music decoder
+
+version 0.4.9-pre1:
+
+- DV encoder, DV muxer
+- Microsoft RLE video decoder
+- Microsoft Video-1 decoder
+- Apple Animation (RLE) decoder
+- Apple Graphics (SMC) decoder
+- Apple Video (RPZA) decoder
+- Cinepak decoder
+- Sega FILM (CPK) file demuxer
+- Westwood multimedia support (VQA & AUD files)
+- Id Quake II CIN playback support
+- 8BPS video decoder
+- FLIC playback support
+- RealVideo 2.0 (RV20) decoder
+- Duck TrueMotion v1 (DUCK) video decoder
+- Sierra VMD demuxer and video decoder
+- MSZH and ZLIB decoder support
+- SVQ1 video encoder
+- AMR-WB support
+- PPC optimizations
+- rate distortion optimal cbp support
+- rate distorted optimal ac prediction for MPEG-4
+- rate distorted optimal lambda->qp support
+- AAC encoding with libfaac
+- Sunplus JPEG codec (SP5X) support
+- use Lagrange multipler instead of QP for ratecontrol
+- Theora/VP3 decoding support
+- XA and ADX ADPCM codecs
+- export MPEG-2 active display area / pan scan
+- Add support for configuring with IBM XLC
+- floating point AAN DCT
+- initial support for zygo video (not complete)
+- RGB ffv1 support
+- new audio/video parser API
+- av_log() system
+- av_read_frame() and av_seek_frame() support
+- missing last frame fixes
+- seek by mouse in ffplay
+- noise reduction of DCT coefficients
+- H.263 OBMC & 4MV support
+- H.263 alternative inter vlc support
+- H.263 loop filter
+- H.263 slice structured mode
+- interlaced DCT support for MPEG-2 encoding
+- stuffing to stay above min_bitrate
+- MB type & QP visualization
+- frame stepping for ffplay
+- interlaced motion estimation
+- alternate scantable support
+- SVCD scan offset support
+- closed GOP support
+- SSE2 FDCT
+- quantizer noise shaping
+- G.726 ADPCM audio codec
+- MS ADPCM encoding
+- multithreaded/SMP motion estimation
+- multithreaded/SMP encoding for MPEG-1/MPEG-2/MPEG-4/H.263
+- multithreaded/SMP decoding for MPEG-2
+- FLAC decoder
+- Metrowerks CodeWarrior suppport
+- H.263+ custom pcf support
+- nicer output for 'ffmpeg -formats'
+- Matroska demuxer
+- SGI image format, encoding and decoding
+- H.264 loop filter support
+- H.264 CABAC support
+- nicer looking arrows for the motion vector vissualization
+- improved VCD support
+- audio timestamp drift compensation
+- MPEG-2 YUV 422/444 support
+- polyphase kaiser windowed sinc and blackman nuttall windowed sinc audio resample
+- better image scaling
+- H.261 support
+- correctly interleave packets during encoding
+- VIS optimized motion compensation
+- intra_dc_precision>0 encoding support
+- support reuse of motion vectors/MB types/field select values of the source video
+- more accurate deblock filter
+- padding support
+- many optimizations and bugfixes
+
+version 0.4.8:
+
+- MPEG-2 video encoding (Michael)
+- Id RoQ playback subsystem (Mike Melanson and Tim Ferguson)
+- Wing Commander III Movie (.mve) file playback subsystem (Mike Melanson
+ and Mario Brito)
+- Xan DPCM audio decoder (Mario Brito)
+- Interplay MVE playback subsystem (Mike Melanson)
+- Duck DK3 and DK4 ADPCM audio decoders (Mike Melanson)
+
+version 0.4.7:
+
+- RealAudio 1.0 (14_4) and 2.0 (28_8) native decoders. Author unknown, code from mplayerhq
+ (originally from public domain player for Amiga at http://www.honeypot.net/audio)
+- current version now also compiles with older GCC (Fabrice)
+- 4X multimedia playback system including 4xm file demuxer (Mike
+ Melanson), and 4X video and audio codecs (Michael)
+- Creative YUV (CYUV) decoder (Mike Melanson)
+- FFV1 codec (our very simple lossless intra only codec, compresses much better
+ than HuffYUV) (Michael)
+- ASV1 (Asus), H.264, Intel indeo3 codecs have been added (various)
+- tiny PNG encoder and decoder, tiny GIF decoder, PAM decoder (PPM with
+ alpha support), JPEG YUV colorspace support. (Fabrice Bellard)
+- ffplay has been replaced with a newer version which uses SDL (optionally)
+ for multiplatform support (Fabrice)
+- Sorenson Version 3 codec (SVQ3) support has been added (decoding only) - donated
+ by anonymous
+- AMR format has been added (Johannes Carlsson)
+- 3GP support has been added (Johannes Carlsson)
+- VP3 codec has been added (Mike Melanson)
+- more MPEG-1/2 fixes
+- better multiplatform support, MS Visual Studio fixes (various)
+- AltiVec optimizations (Magnus Damn and others)
+- SH4 processor support has been added (BERO)
+- new public interfaces (avcodec_get_pix_fmt) (Roman Shaposhnick)
+- VOB streaming support (Brian Foley)
+- better MP3 autodetection (Andriy Rysin)
+- qpel encoding (Michael)
+- 4mv+b frames encoding finally fixed (Michael)
+- chroma ME (Michael)
+- 5 comparison functions for ME (Michael)
+- B-frame encoding speedup (Michael)
+- WMV2 codec (unfinished - Michael)
+- user specified diamond size for EPZS (Michael)
+- Playstation STR playback subsystem, still experimental (Mike and Michael)
+- ASV2 codec (Michael)
+- CLJR decoder (Alex)
+
+.. And lots more new enhancements and fixes.
+
+version 0.4.6:
+
+- completely new integer only MPEG audio layer 1/2/3 decoder rewritten
+ from scratch
+- Recoded DCT and motion vector search with gcc (no longer depends on nasm)
+- fix quantization bug in AC3 encoder
+- added PCM codecs and format. Corrected WAV/AVI/ASF PCM issues
+- added prototype ffplay program
+- added GOB header parsing on H.263/H.263+ decoder (Juanjo)
+- bug fix on MCBPC tables of H.263 (Juanjo)
+- bug fix on DC coefficients of H.263 (Juanjo)
+- added Advanced Prediction Mode on H.263/H.263+ decoder (Juanjo)
+- now we can decode H.263 streams found in QuickTime files (Juanjo)
+- now we can decode H.263 streams found in VIVO v1 files(Juanjo)
+- preliminary RTP "friendly" mode for H.263/H.263+ coding. (Juanjo)
+- added GOB header for H.263/H.263+ coding on RTP mode (Juanjo)
+- now H.263 picture size is returned on the first decoded frame (Juanjo)
+- added first regression tests
+- added MPEG-2 TS demuxer
+- new demux API for libav
+- more accurate and faster IDCT (Michael)
+- faster and entropy-controlled motion search (Michael)
+- two pass video encoding (Michael)
+- new video rate control (Michael)
+- added MSMPEG4V1, MSMPEGV2 and WMV1 support (Michael)
+- great performance improvement of video encoders and decoders (Michael)
+- new and faster bit readers and vlc parsers (Michael)
+- high quality encoding mode: tries all macroblock/VLC types (Michael)
+- added DV video decoder
+- preliminary RTP/RTSP support in ffserver and libavformat
+- H.263+ AIC decoding/encoding support (Juanjo)
+- VCD MPEG-PS mode (Juanjo)
+- PSNR stuff (Juanjo)
+- simple stats output (Juanjo)
+- 16-bit and 15-bit RGB/BGR/GBR support (Bisqwit)
+
+version 0.4.5:
+
+- some header fixes (Zdenek Kabelac <kabi@informatics.muni.cz>)
+- many MMX optimizations (Nick Kurshev <nickols_k@mail.ru>)
+- added configure system (actually a small shell script)
+- added MPEG audio layer 1/2/3 decoding using LGPL'ed mpglib by
+ Michael Hipp (temporary solution - waiting for integer only
+ decoder)
+- fixed VIDIOCSYNC interrupt
+- added Intel H.263 decoding support ('I263' AVI fourCC)
+- added Real Video 1.0 decoding (needs further testing)
+- simplified image formats again. Added PGM format (=grey
+ pgm). Renamed old PGM to PGMYUV.
+- fixed msmpeg4 slice issues (tell me if you still find problems)
+- fixed OpenDivX bugs with newer versions (added VOL header decoding)
+- added support for MPlayer interface
+- added macroblock skip optimization
+- added MJPEG decoder
+- added mmx/mmxext IDCT from libmpeg2
+- added pgmyuvpipe, ppm, and ppm_pipe formats (original patch by Celer
+ <celer@shell.scrypt.net>)
+- added pixel format conversion layer (e.g. for MJPEG or PPM)
+- added deinterlacing option
+- MPEG-1/2 fixes
+- MPEG-4 vol header fixes (Jonathan Marsden <snmjbm@pacbell.net>)
+- ARM optimizations (Lionel Ulmer <lionel.ulmer@free.fr>).
+- Windows porting of file converter
+- added MJPEG raw format (input/ouput)
+- added JPEG image format support (input/output)
+
+version 0.4.4:
+
+- fixed some std header definitions (Bjorn Lindgren
+ <bjorn.e.lindgren@telia.com>).
+- added MPEG demuxer (MPEG-1 and 2 compatible).
+- added ASF demuxer
+- added prototype RM demuxer
+- added AC3 decoding (done with libac3 by Aaron Holtzman)
+- added decoding codec parameter guessing (.e.g. for MPEG, because the
+ header does not include them)
+- fixed header generation in MPEG-1, AVI and ASF muxer: wmplayer can now
+ play them (only tested video)
+- fixed H.263 white bug
+- fixed phase rounding in img resample filter
+- add MMX code for polyphase img resample filter
+- added CPU autodetection
+- added generic title/author/copyright/comment string handling (ASF and RM
+ use them)
+- added SWF demux to extract MP3 track (not usable yet because no MP3
+ decoder)
+- added fractional frame rate support
+- codecs are no longer searched by read_header() (should fix ffserver
+ segfault)
+
+version 0.4.3:
+
+- BGR24 patch (initial patch by Jeroen Vreeken <pe1rxq@amsat.org>)
+- fixed raw yuv output
+- added motion rounding support in MPEG-4
+- fixed motion bug rounding in MSMPEG4
+- added B-frame handling in video core
+- added full MPEG-1 decoding support
+- added partial (frame only) MPEG-2 support
+- changed the FOURCC code for H.263 to "U263" to be able to see the
+ +AVI/H.263 file with the UB Video H.263+ decoder. MPlayer works with
+ this +codec ;) (JuanJo).
+- Halfpel motion estimation after MB type selection (JuanJo)
+- added pgm and .Y.U.V output format
+- suppressed 'img:' protocol. Simply use: /tmp/test%d.[pgm|Y] as input or
+ output.
+- added pgmpipe I/O format (original patch from Martin Aumueller
+ <lists@reserv.at>, but changed completely since we use a format
+ instead of a protocol)
+
+version 0.4.2:
+
+- added H.263/MPEG-4/MSMPEG4 decoding support. MPEG-4 decoding support
+ (for OpenDivX) is almost complete: 8x8 MVs and rounding are
+ missing. MSMPEG4 support is complete.
+- added prototype MPEG-1 decoder. Only I- and P-frames handled yet (it
+ can decode ffmpeg MPEGs :-)).
+- added libavcodec API documentation (see apiexample.c).
+- fixed image polyphase bug (the bottom of some images could be
+ greenish)
+- added support for non clipped motion vectors (decoding only)
+ and image sizes non-multiple of 16
+- added support for AC prediction (decoding only)
+- added file overwrite confirmation (can be disabled with -y)
+- added custom size picture to H.263 using H.263+ (Juanjo)
+
+version 0.4.1:
+
+- added MSMPEG4 (aka DivX) compatible encoder. Changed default codec
+ of AVI and ASF to DIV3.
+- added -me option to set motion estimation method
+ (default=log). suppressed redundant -hq option.
+- added options -acodec and -vcodec to force a given codec (useful for
+ AVI for example)
+- fixed -an option
+- improved dct_quantize speed
+- factorized some motion estimation code
+
+version 0.4.0:
+
+- removing grab code from ffserver and moved it to ffmpeg. Added
+ multistream support to ffmpeg.
+- added timeshifting support for live feeds (option ?date=xxx in the
+ URL)
+- added high quality image resize code with polyphase filter (need
+ mmx/see optimisation). Enable multiple image size support in ffserver.
+- added multi live feed support in ffserver
+- suppressed master feature from ffserver (it should be done with an
+ external program which opens the .ffm url and writes it to another
+ ffserver)
+- added preliminary support for video stream parsing (WAV and AVI half
+ done). Added proper support for audio/video file conversion in
+ ffmpeg.
+- added preliminary support for video file sending from ffserver
+- redesigning I/O subsystem: now using URL based input and output
+ (see avio.h)
+- added WAV format support
+- added "tty user interface" to ffmpeg to stop grabbing gracefully
+- added MMX/SSE optimizations to SAD (Sums of Absolutes Differences)
+ (Juan J. Sierralta P. a.k.a. "Juanjo" <juanjo@atmlab.utfsm.cl>)
+- added MMX DCT from mpeg2_movie 1.5 (Juanjo)
+- added new motion estimation algorithms, log and phods (Juanjo)
+- changed directories: libav for format handling, libavcodec for
+ codecs
+
+version 0.3.4:
+
+- added stereo in MPEG audio encoder
+
+version 0.3.3:
+
+- added 'high quality' mode which use motion vectors. It can be used in
+ real time at low resolution.
+- fixed rounding problems which caused quality problems at high
+ bitrates and large GOP size
+
+version 0.3.2: small fixes
+
+- ASF fixes
+- put_seek bug fix
+
+version 0.3.1: added avi/divx support
+
+- added AVI support
+- added MPEG-4 codec compatible with OpenDivX. It is based on the H.263 codec
+- added sound for flash format (not tested)
+
+version 0.3: initial public release
diff --git a/contrib/ffmpeg/Doxyfile b/contrib/ffmpeg/Doxyfile
new file mode 100644
index 000000000..9e4fdcdd0
--- /dev/null
+++ b/contrib/ffmpeg/Doxyfile
@@ -0,0 +1,1038 @@
+# Doxyfile 1.3-rc1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# General configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = ffmpeg
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = doxy
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch,
+# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en
+# (Japanese with english messages), Korean, Norwegian, Polish, Portuguese,
+# Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these class will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited
+# members of a class in the documentation of that class as if those members were
+# ordinary class members. Constructors, destructors and assignment operators of
+# the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. It is allowed to use relative paths in the argument list.
+
+STRIP_FROM_PATH = .
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower case letters. If set to YES upper case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# users are adviced to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explict @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = YES
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+
+DETAILS_AT_TOP = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# reimplements.
+
+INHERIT_DOCS = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consist of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C.
+# For instance some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources
+# only. Doxygen will then generate output that is more tailored for Java.
+# For instance namespaces will be presented as packages, qualified scopes
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT =
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp
+# *.h++ *.idl *.odl
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories
+# that are symbolic links (a Unix filesystem feature) are excluded from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+
+EXCLUDE_PATTERNS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+
+INPUT_FILTER =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+
+SOURCE_BROWSER = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default)
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default)
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output dir.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non empty doxygen will try to run
+# the html help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the Html help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript and frames is required (for instance Mozilla, Netscape 4.0+,
+# or Internet explorer 4.0+). Note that for large projects the tree generation
+# can take a very long time. In such cases it is better to disable this feature.
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimised for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assigments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_XML = NO
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_PREDEFINED tags.
+
+EXPAND_ONLY_PREDEF = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed.
+
+PREDEFINED = __attribute__(x)="" "RENAME(x)=x ## _TMPL" "DEF(x)=x ## _TMPL" \
+ HAVE_AV_CONFIG_H HAVE_MMX HAVE_MMX2 HAVE_3DNOW \
+ ATTR_ALIGN(x)=""
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+#EXPAND_AS_DEFINED = FF_COMMON_FRAME
+EXPAND_AS_DEFINED = declare_idct(idct, table, idct_row_head, idct_row, idct_row_tail, idct_row_mid)
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse the
+# parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::addtions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tagfiles.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in Html, RTF and LaTeX) for classes with base or
+# super classes. Setting the tag to NO turns the diagrams off. Note that this
+# option is superceded by the HAVE_DOT option below. This is only a fallback. It is
+# recommended to install and use dot, since it yield more powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found on the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_WIDTH = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT = 1024
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermedate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::addtions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO
+
+# The CGI_NAME tag should be the name of the CGI script that
+# starts the search engine (doxysearch) with the correct parameters.
+# A script with this name will be generated by doxygen.
+
+CGI_NAME = search.cgi
+
+# The CGI_URL tag should be the absolute URL to the directory where the
+# cgi binaries are located. See the documentation of your http daemon for
+# details.
+
+CGI_URL =
+
+# The DOC_URL tag should be the absolute URL to the directory where the
+# documentation is located. If left blank the absolute path to the
+# documentation, with file:// prepended to it, will be used.
+
+DOC_URL =
+
+# The DOC_ABSPATH tag should be the absolute path to the directory where the
+# documentation is located. If left blank the directory on the local machine
+# will be used.
+
+DOC_ABSPATH =
+
+# The BIN_ABSPATH tag must point to the directory where the doxysearch binary
+# is installed.
+
+BIN_ABSPATH = /usr/local/bin/
+
+# The EXT_DOC_PATHS tag can be used to specify one or more paths to
+# documentation generated for other projects. This allows doxysearch to search
+# the documentation for these projects as well.
+
+EXT_DOC_PATHS =
diff --git a/contrib/ffmpeg/INSTALL b/contrib/ffmpeg/INSTALL
new file mode 100644
index 000000000..a636c5367
--- /dev/null
+++ b/contrib/ffmpeg/INSTALL
@@ -0,0 +1,14 @@
+
+1) Type './configure' create the configuration (use './configure
+--help' to have the configure options).
+
+'configure' can be launched from another directory than the ffmpeg
+sources to put the objects at that place. In that case, use an
+absolute path when launching 'configure',
+e.g. /ffmpegdir/ffmpeg/configure.
+
+2) Then type 'make' to build ffmpeg. On BSD systems, type 'gmake'
+instead of 'make'. You may need to install GNU make first.
+
+3) Type 'make install' to install ffmpeg and ffserver in
+/usr/local/bin.
diff --git a/contrib/ffmpeg/MAINTAINERS b/contrib/ffmpeg/MAINTAINERS
new file mode 100644
index 000000000..410523390
--- /dev/null
+++ b/contrib/ffmpeg/MAINTAINERS
@@ -0,0 +1,243 @@
+FFmpeg maintainers
+==================
+
+Below is a list of the people maintaining different parts of the
+FFmpeg code.
+
+
+Project Leader
+==============
+
+Michael Niedermayer
+ final design decisions
+
+
+Applications
+============
+
+ffmpeg:
+ ffmpeg.c Michael Niedermayer
+
+ Video Hooks:
+ vhook
+ vhook/watermark.c Marcus Engene
+ vhook/ppm.c
+ vhook/drawtext.c
+ vhook/fish.c
+ vhook/null.c
+ vhook/imlib2.c
+
+ffplay:
+ ffplay.c
+
+ffserver:
+ ffserver.c, ffserver.h
+
+Commandline utility code:
+ cmdutils.c, cmdutils.h
+
+QuickTime faststart:
+ qt-faststart.c Mike Melanson
+
+
+Miscellaneous Areas
+===================
+
+documentation Mike Melanson, Diego Biurrun
+website Mike Melanson, Diego Biurrun
+build system (configure,Makefiles) Diego Biurrun, Mans Rullgard
+project server Diego Biurrun, Mans Rullgard
+mailinglists Michael Niedermayer, Baptiste Coudurier
+
+
+libavutil
+=========
+
+External Interfaces:
+ libavutil/avutil.h
+Internal Interfaces:
+ libavutil/common.h
+
+Other:
+ intfloat* Michael Niedermayer
+ rational.c, rational.h Michael Niedermayer
+ mathematics.c, mathematics.h Michael Niedermayer
+ integer.c, integer.h Michael Niedermayer
+ bswap.h
+
+
+libavcodec
+==========
+
+Generic Parts:
+ External Interfaces:
+ avcodec.h Michael Niedermayer
+ utility code:
+ utils.c
+ mem.c
+ opt.c, opt.h
+ arithmetic expression evaluator:
+ eval.c Michael Niedermayer
+ audio and video frame extraction:
+ parser.c
+ bitsream reading:
+ bitstream.c, bitstream.h Michael Niedermayer
+ CABAC:
+ cabac.h, cabac.c Michael Niedermayer
+ DSP utilities:
+ dsputils.c, dsputils.h Michael Niedermayer
+ entropy coding:
+ rangecoder.c, rangecoder.h Michael Niedermayer
+ floating point AAN DCT:
+ faandct.c, faandct.h Michael Niedermayer
+ Golomb coding:
+ golomb.c, golomb.h Michael Niedermayer
+ motion estimation:
+ motion* Michael Niedermayer
+ rate control:
+ ratecontrol.c
+ xvid_rc.c Michael Niedermayer
+ simple IDCT:
+ simple_idct.c, simple_idct.h Michael Niedermayer
+ postprocessing:
+ libpostproc/* Michael Niedermayer
+
+Codecs:
+ 4xm.c Michael Niedermayer
+ 8bps.c Roberto Togni
+ aasc.c Kostya Shishkov
+ asv* Michael Niedermayer
+ bmp.c Mans Rullgard
+ cavs* Stefan Gehrer
+ cinepak.c Roberto Togni
+ cljr Alex Beregszaszi
+ cook.c, cookdata.h Benjamin Larsson
+ cscd.c Reimar Doeffinger
+ dpcm.c Mike Melanson
+ dv.c Roman Shaposhnik
+ ffv1.c Michael Niedermayer
+ flac.c Alex Beregszaszi
+ flacenc.c Justin Ruggles
+ flashsv.c Benjamin Larsson
+ flicvideo.c Mike Melanson
+ g726.c Roman Shaposhnik
+ gifdec.c Baptiste Coudurier
+ h264* Loren Merritt, Michael Niedermayer
+ h261* Michael Niedermayer
+ h263* Michael Niedermayer
+ huffyuv.c Michael Niedermayer
+ idcinvideo.c Mike Melanson
+ imc* Benjamin Larsson
+ indeo2* Kostya Shishkov
+ interplayvideo.c Mike Melanson
+ jpeg_ls.c Kostya Shishkov
+ kmvc.c Kostya Shishkov
+ lcl.c Roberto Togni
+ loco.c Kostya Shishkov
+ lzo.h, lzo.c Reimar Doeffinger
+ mdec.c Michael Niedermayer
+ mjpeg.c Michael Niedermayer
+ mpeg12.c, mpeg12data.h Michael Niedermayer
+ mpegvideo.c, mpegvideo.h Michael Niedermayer
+ msmpeg4.c, msmpeg4data.h Michael Niedermayer
+ msrle.c Mike Melanson
+ msvideo1.c Mike Melanson
+ nuv.c Reimar Doeffinger
+ oggtheora.c Mans Rullgard
+ qdm2.c, qdm2data.h Roberto Togni
+ qdrw.c Kostya Shishkov
+ qpeg.c Kostya Shishkov
+ qtrle.c Mike Melanson
+ ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
+ resample2.c Michael Niedermayer
+ rpza.c Roberto Togni
+ rtjpeg.c, rtjpeg.h Reimar Doeffinger
+ rv10.c Michael Niedermayer
+ smc.c Mike Melanson
+ snow.c Michael Niedermayer, Loren Merritt
+ sonic.c Alex Beregszaszi
+ svq3.c Michael Niedermayer
+ targa.c Kostya Shishkov
+ tiff.c Kostya Shishkov
+ truemotion1* Mike Melanson
+ truemotion2* Kostya Shishkov
+ truespeech.c Kostya Shishkov
+ tscc.c Kostya Shishkov
+ ulti* Kostya Shishkov
+ vc1* Kostya Shishkov
+ vcr1.c Michael Niedermayer
+ vmnc.c Kostya Shishkov
+ vorbis_enc.c Oded Shimon
+ vp3* Mike Melanson
+ vp5 Aurelien Jacobs
+ vp6 Aurelien Jacobs
+ vqavideo.c Mike Melanson
+ wavpack.c Kostya Shishkov
+ wmv2.c Michael Niedermayer
+ wnv1.c Kostya Shishkov
+ x264.c Mans Rullgard
+ xan.c Mike Melanson
+ xl.c Kostya Shishkov
+ xvmcvideo.c Ivan Kalvachev
+ zmbv.c Kostya Shishkov
+
+
+libavformat
+===========
+
+Generic parts:
+ External Interface:
+ libavformat/avformat.h
+ Utility Code:
+ libavformat/utils.c
+
+
+Muxers/Demuxers:
+ 4xm.c Mike Melanson
+ adtsenc.c Mans Rullgard
+ aiff.c Baptiste Coudurier
+ avi* Michael Niedermayer
+ crc.c Michael Niedermayer
+ daud.c Reimar Doeffinger
+ dc1394.c, dv.c Roman Shaposhnik
+ flic.c Mike Melanson
+ flvdec.c, flvenc.c Michael Niedermayer
+ gxf.c Reimar Doeffinger
+ gxfenc.c Baptiste Coudurier
+ idcin.c Mike Melanson
+ idroq.c Mike Melanson
+ ipmovie.c Mike Melanson
+ img2.c Michael Niedermayer
+ matroska.c Aurelien Jacobs
+ mov.c Francois Revol, Michael Niedermayer
+ movenc.c Michael Niedermayer, Baptiste Coudurier
+ mpegts* Mans Rullgard
+ mtv.c Reynaldo H. Verdejo Pinochet
+ mxf.c Baptiste Coudurier
+ nsvdec.c Francois Revol
+ nut.c Alex Beregszaszi
+ nuv.c Reimar Doeffinger
+ ogg2.c, ogg2.h Mans Rullgard
+ oggparsevorbis.c Mans Rullgard
+ oggparseogm.c Mans Rullgard
+ psxstr.c Mike Melanson
+ raw.c Michael Niedermayer
+ rm.c Roberto Togni
+ segafilm.c Mike Melanson
+ v4l2.c Luca Abeni
+ voc.c Aurelien Jacobs
+ wav.c Michael Niedermayer
+ wc3movie.c Mike Melanson
+ westwood.c Mike Melanson
+ wv.c Kostya Shishkov
+
+
+Operating systems / CPU architectures
+=====================================
+
+Alpha Mans Rullgard, Falk Hueffner
+BeOS Francois Revol
+i386 Michael Niedermayer
+Mac OS X / PowerPC Romain Dolbeau
+Amiga / PowerPC Colin Ward
+Linux / PowerPC Luca Barbato
diff --git a/contrib/ffmpeg/Makefile b/contrib/ffmpeg/Makefile
new file mode 100644
index 000000000..929522c3b
--- /dev/null
+++ b/contrib/ffmpeg/Makefile
@@ -0,0 +1,241 @@
+#
+# Main ffmpeg Makefile
+# (c) 2000-2004 Fabrice Bellard
+#
+include config.mak
+
+VPATH=$(SRC_PATH_BARE)
+
+CFLAGS=$(OPTFLAGS) -I$(BUILD_ROOT) -I$(SRC_PATH) -I$(SRC_PATH)/libavutil \
+ -I$(SRC_PATH)/libavcodec -I$(SRC_PATH)/libavformat -I$(SRC_PATH)/libswscale \
+ -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_ISOC9X_SOURCE
+LDFLAGS+= -g
+
+ifeq ($(CONFIG_FFMPEG),yes)
+MANPAGES=doc/ffmpeg.1
+PROGS_G+=ffmpeg_g$(EXESUF)
+PROGS+=ffmpeg$(EXESUF)
+endif
+
+ifeq ($(CONFIG_FFSERVER),yes)
+MANPAGES+=doc/ffserver.1
+PROGS+=ffserver$(EXESUF)
+endif
+
+ifeq ($(CONFIG_FFPLAY),yes)
+MANPAGES+=doc/ffplay.1
+PROGS_G+=ffplay_g$(EXESUF)
+PROGS+=ffplay$(EXESUF)
+endif
+
+BASENAMES=ffmpeg ffplay ffserver
+ALLPROGS=$(addsuffix $(EXESUF), $(BASENAMES))
+ALLPROGS_G=$(addsuffix _g$(EXESUF), $(BASENAMES))
+ALLMANPAGES=$(addsuffix .1, $(BASENAMES))
+
+ifeq ($(BUILD_SHARED),yes)
+DEP_LIBS=libavcodec/$(SLIBPREF)avcodec$(SLIBSUF) libavformat/$(SLIBPREF)avformat$(SLIBSUF)
+else
+DEP_LIBS=libavcodec/$(LIBPREF)avcodec$(LIBSUF) libavformat/$(LIBPREF)avformat$(LIBSUF)
+endif
+
+ifeq ($(CONFIG_VHOOK),yes)
+VHOOK=videohook
+INSTALLVHOOK=install-vhook
+endif
+
+ifeq ($(BUILD_DOC),yes)
+DOC=documentation
+INSTALLMAN=install-man
+endif
+
+OBJS = ffmpeg.o ffserver.o cmdutils.o ffplay.o
+SRCS = $(OBJS:.o=.c) $(ASM_OBJS:.o=.s)
+LDFLAGS := -L$(BUILD_ROOT)/libavformat -L$(BUILD_ROOT)/libavcodec -L$(BUILD_ROOT)/libavutil $(LDFLAGS)
+EXTRALIBS := -lavformat$(BUILDSUF) -lavcodec$(BUILDSUF) -lavutil$(BUILDSUF) $(EXTRALIBS)
+
+ifeq ($(CONFIG_SWSCALER),yes)
+LDFLAGS+=-L./libswscale
+EXTRALIBS+=-lswscale$(BUILDSUF)
+endif
+
+all: lib $(PROGS_G) $(PROGS) $(VHOOK) $(DOC)
+
+lib:
+ $(MAKE) -C libavutil all
+ $(MAKE) -C libavcodec all
+ $(MAKE) -C libavformat all
+ifeq ($(CONFIG_PP),yes)
+ $(MAKE) -C libpostproc all
+endif
+ifeq ($(CONFIG_SWSCALER),yes)
+ $(MAKE) -C libswscale all
+endif
+
+ffmpeg_g$(EXESUF): ffmpeg.o cmdutils.o .libs
+ $(CC) $(LDFLAGS) -o $@ ffmpeg.o cmdutils.o $(EXTRALIBS)
+
+ffserver$(EXESUF): ffserver.o .libs
+ $(CC) $(LDFLAGS) $(FFSERVERLDFLAGS) -o $@ ffserver.o $(EXTRALIBS)
+
+ffplay_g$(EXESUF): ffplay.o cmdutils.o .libs
+ $(CC) $(LDFLAGS) -o $@ ffplay.o cmdutils.o $(EXTRALIBS) $(SDL_LIBS)
+
+%$(EXESUF): %_g$(EXESUF)
+ cp -p $< $@
+ $(STRIP) $@
+
+version.h:
+ $(SRC_PATH)/version.sh $(SRC_PATH)
+
+output_example$(EXESUF): output_example.o .libs
+ $(CC) $(LDFLAGS) -o $@ output_example.o $(EXTRALIBS)
+
+qt-faststart$(EXESUF): qt-faststart.c
+ $(CC) $(CFLAGS) $< -o $@
+
+cws2fws$(EXESUF): cws2fws.c
+ $(CC) $< -o $@ -lz
+
+ffplay.o: ffplay.c
+ $(CC) $(CFLAGS) $(SDL_CFLAGS) -c -o $@ $<
+
+ffmpeg.o ffplay.o ffserver.o: version.h
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c -o $@ $<
+
+videohook: .libs
+ $(MAKE) -C vhook all
+
+documentation:
+ $(MAKE) -C doc all
+
+install: install-progs install-libs install-headers $(INSTALLMAN) $(INSTALLVHOOK)
+
+ifeq ($(BUILD_SHARED),yes)
+install-progs: $(PROGS) install-libs
+else
+install-progs: $(PROGS)
+endif
+ install -d "$(bindir)"
+ install -c $(INSTALLSTRIP) -m 755 $(PROGS) "$(bindir)"
+
+# create the window installer
+wininstaller: all install
+ makensis ffinstall.nsi
+
+install-man:
+ install -d "$(mandir)/man1"
+ install -m 644 $(MANPAGES) "$(mandir)/man1"
+
+install-vhook:
+ $(MAKE) -C vhook install
+
+install-libs:
+ $(MAKE) -C libavutil install-libs
+ $(MAKE) -C libavcodec install-libs
+ $(MAKE) -C libavformat install-libs
+ifeq ($(CONFIG_PP),yes)
+ $(MAKE) -C libpostproc install-libs
+endif
+ifeq ($(CONFIG_SWSCALER),yes)
+ $(MAKE) -C libswscale install-libs
+endif
+
+ifeq ($(BUILD_SHARED),yes)
+ -$(LDCONFIG)
+endif
+
+install-headers:
+ $(MAKE) -C libavutil install-headers
+ $(MAKE) -C libavcodec install-headers
+ $(MAKE) -C libavformat install-headers
+ifeq ($(CONFIG_PP),yes)
+ $(MAKE) -C libpostproc install-headers
+endif
+ $(MAKE) -C libswscale install-headers
+
+uninstall: uninstall-progs uninstall-libs uninstall-headers uninstall-man uninstall-vhook
+
+uninstall-progs:
+ rm -f $(addprefix $(bindir)/, $(ALLPROGS))
+
+uninstall-man:
+ rm -f $(addprefix $(mandir)/man1/,$(ALLMANPAGES))
+
+uninstall-vhook:
+ $(MAKE) -C vhook uninstall
+
+uninstall-libs:
+ $(MAKE) -C libavutil uninstall-libs
+ $(MAKE) -C libavcodec uninstall-libs
+ $(MAKE) -C libavformat uninstall-libs
+ $(MAKE) -C libpostproc uninstall-libs
+
+uninstall-headers:
+ $(MAKE) -C libavutil uninstall-headers
+ $(MAKE) -C libavcodec uninstall-headers
+ $(MAKE) -C libavformat uninstall-headers
+ $(MAKE) -C libpostproc uninstall-headers
+ -rmdir "$(incdir)"
+ -rmdir "$(prefix)/include/postproc"
+
+depend dep: .depend
+ $(MAKE) -C libavutil depend
+ $(MAKE) -C libavcodec depend
+ $(MAKE) -C libavformat depend
+ifeq ($(CONFIG_PP),yes)
+ $(MAKE) -C libpostproc depend
+endif
+ifeq ($(CONFIG_SWSCALER),yes)
+ $(MAKE) -C libswscale depend
+endif
+ifeq ($(CONFIG_VHOOK),yes)
+ $(MAKE) -C vhook depend
+endif
+
+.depend: $(SRCS) version.h
+ $(CC) -MM $(CFLAGS) $(SDL_CFLAGS) $(filter-out %.h,$^) 1>.depend
+
+.libs: lib
+ @test -f .libs || touch .libs
+ @for i in $(DEP_LIBS) ; do if test $$i -nt .libs ; then touch .libs; fi ; done
+
+clean:
+ $(MAKE) -C libavutil clean
+ $(MAKE) -C libavcodec clean
+ $(MAKE) -C libavformat clean
+ $(MAKE) -C libpostproc clean
+ $(MAKE) -C libswscale clean
+ $(MAKE) -C tests clean
+ $(MAKE) -C vhook clean
+ $(MAKE) -C doc clean
+ rm -f *.o *.d *~ .libs gmon.out TAGS $(ALLPROGS) $(ALLPROGS_G) \
+ output_example$(EXESUF) qt-faststart$(EXESUF) cws2fws$(EXESUF)
+
+distclean: clean
+ $(MAKE) -C libavutil distclean
+ $(MAKE) -C libavcodec distclean
+ $(MAKE) -C libavformat distclean
+ $(MAKE) -C libpostproc distclean
+ $(MAKE) -C libswscale distclean
+ $(MAKE) -C tests distclean
+ $(MAKE) -C vhook distclean
+ rm -f .depend version.h config.* *.pc
+
+TAGS:
+ etags *.[ch] libavformat/*.[ch] libavcodec/*.[ch]
+
+# regression tests
+
+codectest libavtest test-server fulltest test mpeg4 mpeg: $(PROGS)
+ $(MAKE) -C tests $@
+
+.PHONY: all lib videohook documentation install* wininstaller uninstall*
+.PHONY: dep depend clean distclean TAGS
+.PHONY: codectest libavtest test-server fulltest test mpeg4 mpeg
+
+ifneq ($(wildcard .depend),)
+include .depend
+endif
diff --git a/contrib/ffmpeg/README b/contrib/ffmpeg/README
new file mode 100644
index 000000000..ad98fc645
--- /dev/null
+++ b/contrib/ffmpeg/README
@@ -0,0 +1,19 @@
+FFmpeg README
+-------------
+
+1) Documentation
+----------------
+
+* Read the documentation in the doc/ directory.
+
+2) Licensing
+------------
+
+* Read the file COPYING. ffmpeg and the associated libraries EXCEPT
+ liba52 and libpostproc are licensed under the Lesser GNU General
+ Public License.
+
+* liba52 and libpostproc are distributed under the GNU General Public
+ License and their compilation and use is optional in ffmpeg.
+
+Fabrice Bellard.
diff --git a/contrib/ffmpeg/berrno.h b/contrib/ffmpeg/berrno.h
new file mode 100644
index 000000000..eb3bd0cd4
--- /dev/null
+++ b/contrib/ffmpeg/berrno.h
@@ -0,0 +1,44 @@
+#ifndef BERRNO_H
+#define BERRNO_H
+
+#include <Errors.h>
+
+// mmu_man: this is needed for http.c (defined errno)
+#include <errno.h>
+
+#ifdef ENOENT
+#undef ENOENT
+#endif
+#define ENOENT 2
+
+#ifdef EINTR
+#undef EINTR
+#endif
+#define EINTR 4
+
+#ifdef EIO
+#undef EIO
+#endif
+#define EIO 5
+
+#ifdef EAGAIN
+#undef EAGAIN
+#endif
+#define EAGAIN 11
+
+#ifdef ENOMEM
+#undef ENOMEM
+#endif
+#define ENOMEM 12
+
+#ifdef EINVAL
+#undef EINVAL
+#endif
+#define EINVAL 22
+
+#ifdef EPIPE
+#undef EPIPE
+#endif
+#define EPIPE 32
+
+#endif /* BERRNO_H */
diff --git a/contrib/ffmpeg/build_avopt b/contrib/ffmpeg/build_avopt
new file mode 100755
index 000000000..fcf165765
--- /dev/null
+++ b/contrib/ffmpeg/build_avopt
@@ -0,0 +1,9 @@
+#!/bin/sh
+sed 's/unsigned//g' |\
+ sed 's/enum//g' |\
+ egrep '^ *(int|float|double|AVRational|char *\*) *[a-zA-Z_0-9]* *;' |\
+ sed 's/^ *\([^ ]*\)[ *]*\([^;]*\);.*$/{"\2", NULL, OFFSET(\2), FF_OPT_TYPE_\U\1, DEFAULT, \1_MIN, \1_MAX},/' |\
+ sed 's/AVRATIONAL_M/INT_M/g'|\
+ sed 's/TYPE_AVRATIONAL/TYPE_RATIONAL/g'|\
+ sed 's/FLOAT_M/FLT_M/g'|\
+ sed 's/FF_OPT_TYPE_CHAR/FF_OPT_TYPE_STRING/g'
diff --git a/contrib/ffmpeg/clean-diff b/contrib/ffmpeg/clean-diff
new file mode 100755
index 000000000..98e26a79f
--- /dev/null
+++ b/contrib/ffmpeg/clean-diff
@@ -0,0 +1,11 @@
+#!/bin/sh
+sed '/^+[^+]/!s/ /TaBBaT/g' |\
+ expand -t `seq -s , 9 8 200` |\
+ sed 's/TaBBaT/ /g' |\
+ sed '/^+[^+]/s/ * $//' |\
+ tr -d '\015' |\
+ tr '\n' '°' |\
+ sed 's/\(@@[^@]*@@°[^@]*\)/\n\1/g' |\
+ egrep -v '@@[^@]*@@°(( [^°]*°)|([+-][[:space:]]*°)|(-[[:space:]]*([^°]*)°\+[[:space:]]*\5°))*$' |\
+ tr -d '\n' |\
+ tr '°' '\n'
diff --git a/contrib/ffmpeg/cmdutils.c b/contrib/ffmpeg/cmdutils.c
new file mode 100644
index 000000000..2c53d90da
--- /dev/null
+++ b/contrib/ffmpeg/cmdutils.c
@@ -0,0 +1,141 @@
+/*
+ * Various utilities for command line tools
+ * Copyright (c) 2000-2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#define HAVE_AV_CONFIG_H
+#include "avformat.h"
+#include "common.h"
+
+#include "cmdutils.h"
+
+void show_help_options(const OptionDef *options, const char *msg, int mask, int value)
+{
+ const OptionDef *po;
+ int first;
+
+ first = 1;
+ for(po = options; po->name != NULL; po++) {
+ char buf[64];
+ if ((po->flags & mask) == value) {
+ if (first) {
+ printf("%s", msg);
+ first = 0;
+ }
+ pstrcpy(buf, sizeof(buf), po->name);
+ if (po->flags & HAS_ARG) {
+ pstrcat(buf, sizeof(buf), " ");
+ pstrcat(buf, sizeof(buf), po->argname);
+ }
+ printf("-%-17s %s\n", buf, po->help);
+ }
+ }
+}
+
+static const OptionDef* find_option(const OptionDef *po, const char *name){
+ while (po->name != NULL) {
+ if (!strcmp(name, po->name))
+ break;
+ po++;
+ }
+ return po;
+}
+
+void parse_options(int argc, char **argv, const OptionDef *options)
+{
+ const char *opt, *arg;
+ int optindex, handleoptions=1;
+ const OptionDef *po;
+
+ /* parse options */
+ optindex = 1;
+ while (optindex < argc) {
+ opt = argv[optindex++];
+
+ if (handleoptions && opt[0] == '-' && opt[1] != '\0') {
+ if (opt[1] == '-' && opt[2] == '\0') {
+ handleoptions = 0;
+ continue;
+ }
+ po= find_option(options, opt + 1);
+ if (!po->name)
+ po= find_option(options, "default");
+ if (!po->name) {
+unknown_opt:
+ fprintf(stderr, "%s: unrecognized option '%s'\n", argv[0], opt);
+ exit(1);
+ }
+ arg = NULL;
+ if (po->flags & HAS_ARG) {
+ arg = argv[optindex++];
+ if (!arg) {
+ fprintf(stderr, "%s: missing argument for option '%s'\n", argv[0], opt);
+ exit(1);
+ }
+ }
+ if (po->flags & OPT_STRING) {
+ char *str;
+ str = av_strdup(arg);
+ *po->u.str_arg = str;
+ } else if (po->flags & OPT_BOOL) {
+ *po->u.int_arg = 1;
+ } else if (po->flags & OPT_INT) {
+ *po->u.int_arg = atoi(arg);
+ } else if (po->flags & OPT_FLOAT) {
+ *po->u.float_arg = atof(arg);
+ } else if (po->flags & OPT_FUNC2) {
+ if(po->u.func2_arg(opt+1, arg)<0)
+ goto unknown_opt;
+ } else {
+ po->u.func_arg(arg);
+ }
+ } else {
+ parse_arg_file(opt);
+ }
+ }
+}
+
+void print_error(const char *filename, int err)
+{
+ switch(err) {
+ case AVERROR_NUMEXPECTED:
+ fprintf(stderr, "%s: Incorrect image filename syntax.\n"
+ "Use '%%d' to specify the image number:\n"
+ " for img1.jpg, img2.jpg, ..., use 'img%%d.jpg';\n"
+ " for img001.jpg, img002.jpg, ..., use 'img%%03d.jpg'.\n",
+ filename);
+ break;
+ case AVERROR_INVALIDDATA:
+ fprintf(stderr, "%s: Error while parsing header\n", filename);
+ break;
+ case AVERROR_NOFMT:
+ fprintf(stderr, "%s: Unknown format\n", filename);
+ break;
+ case AVERROR_IO:
+ fprintf(stderr, "%s: I/O error occured\n"
+ "Usually that means that input file is truncated and/or corrupted.\n",
+ filename);
+ break;
+ case AVERROR_NOMEM:
+ fprintf(stderr, "%s: memory allocation error occured\n", filename);
+ break;
+ default:
+ fprintf(stderr, "%s: Error while opening file\n", filename);
+ break;
+ }
+}
diff --git a/contrib/ffmpeg/cmdutils.h b/contrib/ffmpeg/cmdutils.h
new file mode 100644
index 000000000..d9c66f015
--- /dev/null
+++ b/contrib/ffmpeg/cmdutils.h
@@ -0,0 +1,34 @@
+#ifndef _CMD_UTILS_H
+#define _CMD_UTILS_H
+
+typedef struct {
+ const char *name;
+ int flags;
+#define HAS_ARG 0x0001
+#define OPT_BOOL 0x0002
+#define OPT_EXPERT 0x0004
+#define OPT_STRING 0x0008
+#define OPT_VIDEO 0x0010
+#define OPT_AUDIO 0x0020
+#define OPT_GRAB 0x0040
+#define OPT_INT 0x0080
+#define OPT_FLOAT 0x0100
+#define OPT_SUBTITLE 0x0200
+#define OPT_FUNC2 0x0400
+ union {
+ void (*func_arg)(const char *); //FIXME passing error code as int return would be nicer then exit() in the func
+ int *int_arg;
+ char **str_arg;
+ float *float_arg;
+ int (*func2_arg)(const char *, const char *);
+ } u;
+ const char *help;
+ const char *argname;
+} OptionDef;
+
+void show_help_options(const OptionDef *options, const char *msg, int mask, int value);
+void parse_options(int argc, char **argv, const OptionDef *options);
+void parse_arg_file(const char *filename);
+void print_error(const char *filename, int err);
+
+#endif /* _CMD_UTILS_H */
diff --git a/contrib/ffmpeg/common.mak b/contrib/ffmpeg/common.mak
new file mode 100644
index 000000000..d4a8dca41
--- /dev/null
+++ b/contrib/ffmpeg/common.mak
@@ -0,0 +1,100 @@
+#
+# common bits used by all libraries
+#
+
+VPATH = $(SRC_PATH_BARE)/lib$(NAME)
+SRC_DIR = "$(VPATH)"
+
+CFLAGS += -DHAVE_AV_CONFIG_H -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE \
+ -D_ISOC9X_SOURCE -I$(BUILD_ROOT) -I$(SRC_PATH) \
+ -I$(SRC_PATH)/libavutil $(OPTFLAGS)
+SRCS := $(OBJS:.o=.c) $(ASM_OBJS:.o=.S) $(CPPOBJS:.o=.cpp)
+OBJS := $(OBJS) $(ASM_OBJS) $(CPPOBJS)
+STATIC_OBJS := $(OBJS) $(STATIC_OBJS)
+SHARED_OBJS := $(OBJS) $(SHARED_OBJS)
+
+all: $(EXTRADEPS) $(LIB) $(SLIBNAME)
+
+$(LIB): $(STATIC_OBJS)
+ rm -f $@
+ $(AR) rc $@ $^ $(EXTRAOBJS)
+ $(RANLIB) $@
+
+$(SLIBNAME): $(SLIBNAME_WITH_MAJOR)
+ ln -sf $^ $@
+
+$(SLIBNAME_WITH_MAJOR): $(SHARED_OBJS)
+ $(CC) $(SHFLAGS) $(LDFLAGS) -o $@ $^ $(EXTRALIBS) $(EXTRAOBJS)
+ $(SLIB_EXTRA_CMD)
+
+%.o: %.c
+ $(CC) $(CFLAGS) $(LIBOBJFLAGS) -c -o $@ $<
+
+%.o: %.S
+ $(CC) $(CFLAGS) $(LIBOBJFLAGS) -c -o $@ $<
+
+# BeOS: remove -Wall to get rid of all the "multibyte constant" warnings
+%.o: %.cpp
+ g++ $(subst -Wall,,$(CFLAGS)) -c -o $@ $<
+
+%: %.o $(LIB)
+ $(CC) $(LDFLAGS) -o $@ $^ $(EXTRALIBS)
+
+depend dep: $(SRCS)
+ $(CC) -MM $(CFLAGS) $^ 1>.depend
+
+clean::
+ rm -f *.o *.d *~ *.a *.lib *.so *.so.* *.dylib *.dll \
+ *.def *.dll.a *.exp
+
+distclean: clean
+ rm -f .depend
+
+ifeq ($(BUILD_SHARED),yes)
+INSTLIBTARGETS += install-lib-shared
+endif
+ifeq ($(BUILD_STATIC),yes)
+INSTLIBTARGETS += install-lib-static
+endif
+
+install: install-libs install-headers
+
+install-libs: $(INSTLIBTARGETS)
+
+install-lib-shared: $(SLIBNAME)
+ install -d "$(shlibdir)"
+ install $(INSTALLSTRIP) -m 755 $(SLIBNAME) \
+ "$(shlibdir)/$(SLIBNAME_WITH_VERSION)"
+ cd "$(shlibdir)" && \
+ ln -sf $(SLIBNAME_WITH_VERSION) $(SLIBNAME_WITH_MAJOR)
+ cd "$(shlibdir)" && \
+ ln -sf $(SLIBNAME_WITH_VERSION) $(SLIBNAME)
+
+install-lib-static: $(LIB)
+ install -d "$(libdir)"
+ install -m 644 $(LIB) "$(libdir)"
+ $(LIB_INSTALL_EXTRA_CMD)
+
+install-headers:
+ install -d "$(incdir)"
+ install -d "$(libdir)/pkgconfig"
+ install -m 644 $(addprefix $(SRC_DIR)/,$(HEADERS)) "$(incdir)"
+ install -m 644 $(BUILD_ROOT)/lib$(NAME).pc "$(libdir)/pkgconfig"
+
+uninstall: uninstall-libs uninstall-headers
+
+uninstall-libs:
+ -rm -f "$(shlibdir)/$(SLIBNAME_WITH_MAJOR)" \
+ "$(shlibdir)/$(SLIBNAME)" \
+ "$(shlibdir)/$(SLIBNAME_WITH_VERSION)"
+ -rm -f "$(libdir)/$(LIB)"
+
+uninstall-headers:
+ rm -f $(addprefix "$(incdir)/",$(HEADERS))
+ rm -f "$(libdir)/pkgconfig/lib$(NAME).pc"
+
+.PHONY: all depend dep clean distclean install* uninstall*
+
+ifneq ($(wildcard .depend),)
+include .depend
+endif
diff --git a/contrib/ffmpeg/configure b/contrib/ffmpeg/configure
new file mode 100755
index 000000000..8da8f0916
--- /dev/null
+++ b/contrib/ffmpeg/configure
@@ -0,0 +1,2186 @@
+#!/bin/sh
+#
+# FFmpeg configure script
+#
+# Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+# Copyright (c) 2005-2006 Diego Biurrun
+# Copyright (c) 2005-2006 Mans Rullgard
+#
+
+# make sure we are running under a compatible shell
+# try to make this part work with most shells
+
+try_exec(){
+ type "$1" >/dev/null 2>&1 && exec "$@"
+}
+
+unset foo
+(: ${foo%%bar}) 2>/dev/null && ! (: ${foo?}) 2>/dev/null
+if test "$?" != 0; then
+ export FF_CONF_EXEC
+ if test "0$FF_CONF_EXEC" -lt 1; then
+ FF_CONF_EXEC=1
+ try_exec bash "$0" "$@"
+ fi
+ if test "0$FF_CONF_EXEC" -lt 2; then
+ FF_CONF_EXEC=2
+ try_exec ksh "$0" "$@"
+ fi
+ if test "0$FF_CONF_EXEC" -lt 3; then
+ FF_CONF_EXEC=3
+ try_exec /usr/xpg4/bin/sh "$0" "$@"
+ fi
+ echo "No compatible shell script interpreter found."
+ echo "This configure script requires a POSIX compatible shell"
+ echo "such as bash or ksh."
+ if test "$BASH_VERSION" = '2.04.0(1)-release'; then
+ echo "This bash version ($BASH_VERSION) is broken on your platform."
+ echo "Upgrade to a later version if available."
+ fi
+ exit 1
+fi
+
+show_help(){
+ echo "Usage: configure [options]"
+ echo "Options: [defaults in brackets after descriptions]"
+ echo
+ echo "Standard options:"
+ echo " --help print this message"
+ echo " --log[=FILE|yes|no] log tests and output to FILE [config.err]"
+ echo " --prefix=PREFIX install in PREFIX [$PREFIX]"
+ echo " --libdir=DIR install libs in DIR [PREFIX/lib]"
+ echo " --shlibdir=DIR install shared libs in DIR [PREFIX/lib]"
+ echo " --incdir=DIR install includes in DIR [PREFIX/include/ffmpeg]"
+ echo " --mandir=DIR install man page in DIR [PREFIX/man]"
+ echo " --enable-mp3lame enable MP3 encoding via libmp3lame [default=no]"
+ echo " --enable-libnut enable NUT support via libnut [default=no]"
+ echo " --enable-libogg enable Ogg support via libogg [default=no]"
+ echo " --enable-vorbis enable Vorbis support via libvorbis [default=no]"
+ echo " --enable-faad enable FAAD support via libfaad [default=no]"
+ echo " --enable-faadbin build FAAD support with runtime linking [default=no]"
+ echo " --enable-faac enable FAAC support via libfaac [default=no]"
+ echo " --enable-libgsm enable GSM support via libgsm [default=no]"
+ echo " --enable-xvid enable XviD support via xvidcore [default=no]"
+ echo " --enable-x264 enable H.264 encoding via x264 [default=no]"
+ echo " --enable-mingw32 enable MinGW native/cross Windows compile"
+ echo " --enable-mingwce enable MinGW native/cross WinCE compile"
+ echo " --enable-a52 enable GPLed A52 support [default=no]"
+ echo " --enable-a52bin open liba52.so.0 at runtime [default=no]"
+ echo " --enable-dts enable GPLed DTS support [default=no]"
+ echo " --enable-pp enable GPLed postprocessing support [default=no]"
+ echo " --enable-static build static libraries [default=yes]"
+ echo " --disable-static do not build static libraries [default=no]"
+ echo " --enable-shared build shared libraries [default=no]"
+ echo " --disable-shared do not build shared libraries [default=yes]"
+ echo " --enable-amr_nb enable amr_nb float audio codec"
+ echo " --enable-amr_nb-fixed use fixed point for amr-nb codec"
+ echo " --enable-amr_wb enable amr_wb float audio codec"
+ echo " --enable-amr_if2 enable amr_wb IF2 audio codec"
+ echo " --enable-sunmlib use Sun medialib [default=no]"
+ echo " --enable-pthreads use pthreads [default=no]"
+ echo " --enable-dc1394 enable IIDC-1394 grabbing using libdc1394"
+ echo " and libraw1394 [default=no]"
+ echo " --enable-swscaler software scaler support [default=no]"
+ echo " --enable-avisynth allow reading AVISynth script files [default=no]"
+ echo " --enable-gpl allow use of GPL code, the resulting libav*"
+ echo " and ffmpeg will be under GPL [default=no]"
+ echo ""
+ echo "Advanced options (experts only):"
+ echo " --source-path=PATH path to source code [$source_path]"
+ echo " --build-path=PATH path to build directory [.]"
+ echo " --cross-prefix=PREFIX use PREFIX for compilation tools [$cross_prefix]"
+ echo " --cross-compile assume a cross-compiler is used"
+ echo " --cc=CC use C compiler CC [$cc]"
+ echo " --make=MAKE use specified make [$make]"
+ echo " --extra-cflags=ECFLAGS add ECFLAGS to CFLAGS [$CFLAGS]"
+ echo " --extra-ldflags=ELDFLAGS add ELDFLAGS to LDFLAGS [$LDFLAGS]"
+ echo " --extra-libs=ELIBS add ELIBS [$ELIBS]"
+ echo " --build-suffix=SUFFIX suffix for application specific build []"
+ echo " --arch=ARCH select architecture [$arch]"
+ echo " --cpu=CPU selects the minimum cpu required (affects
+ instruction selection, may crash on older CPUs)"
+ echo " --powerpc-perf-enable enable performance report on PPC"
+ echo " (requires enabling PMC)"
+ echo " --disable-mmx disable MMX usage"
+ echo " --disable-armv5te disable armv5te usage"
+ echo " --disable-iwmmxt disable iwmmxt usage"
+ echo " --disable-altivec disable AltiVec usage"
+ echo " --disable-audio-oss disable OSS audio support [default=no]"
+ echo " --disable-audio-beos disable BeOS audio support [default=no]"
+ echo " --disable-v4l disable video4linux grabbing [default=no]"
+ echo " --disable-v4l2 disable video4linux2 grabbing [default=no]"
+ echo " --disable-bktr disable bktr video grabbing [default=no]"
+ echo " --disable-dv1394 disable DV1394 grabbing [default=no]"
+ echo " --disable-network disable network support [default=no]"
+ echo " --disable-ipv6 disable ipv6 support [default=no]"
+ echo " --disable-zlib disable zlib [default=no]"
+ echo " --disable-vhook disable video hooking support"
+ echo " --enable-gprof enable profiling with gprof [$gprof]"
+ echo " --disable-debug disable debugging symbols"
+ echo " --disable-opts disable compiler optimizations"
+ echo " --enable-extra-warnings enable more compiler warnings"
+ echo " --disable-mpegaudio-hp faster (but less accurate)"
+ echo " MPEG audio decoding [default=no]"
+ echo " --disable-protocols disable I/O protocols support [default=no]"
+ echo " --disable-ffmpeg disable ffmpeg build"
+ echo " --disable-ffserver disable ffserver build"
+ echo " --disable-ffplay disable ffplay build"
+ echo " --enable-small optimize for size instead of speed"
+ echo " --enable-memalign-hack emulate memalign, interferes with memory debuggers"
+ echo " --disable-strip disable stripping of executables and shared libraries"
+ echo " --disable-encoder=NAME disables encoder NAME"
+ echo " --enable-encoder=NAME enables encoder NAME"
+ echo " --disable-decoder=NAME disables decoder NAME"
+ echo " --enable-decoder=NAME enables decoder NAME"
+ echo " --disable-encoders disables all encoders"
+ echo " --disable-decoders disables all decoders"
+ echo " --disable-muxer=NAME disables muxer NAME"
+ echo " --enable-muxer=NAME enables muxer NAME"
+ echo " --disable-muxers disables all muxers"
+ echo " --disable-demuxer=NAME disables demuxer NAME"
+ echo " --enable-demuxer=NAME enables demuxer NAME"
+ echo " --disable-demuxers disables all demuxers"
+ echo " --enable-parser=NAME enables parser NAME"
+ echo " --disable-parser=NAME disables parser NAME"
+ echo " --disable-parsers disables all parsers"
+ echo ""
+ echo "NOTE: Object files are built at the place where configure is launched."
+ exit 1
+}
+
+log(){
+ echo "$@" >>$logfile
+}
+
+log_file(){
+ log BEGIN $1
+ cat -n $1 >>$logfile
+ log END $1
+}
+
+echolog(){
+ log "$@"
+ echo "$@"
+}
+
+die(){
+ echolog "$@"
+ cat <<EOF
+If you think configure made a mistake, make sure you are using the latest
+version from SVN. If the latest version fails, report the problem to the
+ffmpeg-devel@mplayerhq.hu mailing list or IRC #ffmpeg on irc.freenode.net.
+EOF
+ if enabled logging; then
+ cat <<EOF
+Include the log file "$logfile" produced by configure as this will help
+solving the problem.
+EOF
+ else
+cat <<EOF
+Rerun configure with logging enabled (do not use --log=no), and include the
+log this produces with your report.
+EOF
+ fi
+ rm -f $TMPC $TMPO $TMPE $TMPS $TMPH
+ exit 1
+}
+
+# "tr '[a-z]' '[A-Z]'" is a workaround for Solaris tr not grokking "tr a-z A-Z"
+toupper(){
+ echo "$@" | tr '[a-z]' '[A-Z]'
+}
+
+set_all(){
+ value=$1
+ shift
+ for var in $*; do
+ eval $var=$value
+ done
+}
+
+enable(){
+ set_all yes $*
+}
+
+disable(){
+ set_all no $*
+}
+
+enabled(){
+ eval test "x\$$1" = "xyes"
+}
+
+enabled_all(){
+ for opt; do
+ enabled $opt || return 1
+ done
+}
+
+enabled_any(){
+ for opt; do
+ enabled $opt && return 0
+ done
+}
+
+print_config(){
+ pfx=$1
+ header=$2
+ makefile=$3
+ shift 3
+ for cfg; do
+ if enabled $cfg; then
+ ucname="${pfx}`toupper $cfg`"
+ echo "#define ${ucname} 1" >> $header
+ echo "${ucname}=yes" >> $makefile
+ fi
+ done
+}
+
+flags_saved(){
+ (: ${SAVE_CFLAGS?}) 2>/dev/null
+}
+
+save_flags(){
+ flags_saved && return
+ SAVE_CFLAGS="$CFLAGS"
+ SAVE_LDFLAGS="$LDFLAGS"
+ SAVE_extralibs="$extralibs"
+}
+
+restore_flags(){
+ CFLAGS="$SAVE_CFLAGS"
+ LDFLAGS="$SAVE_LDFLAGS"
+ extralibs="$SAVE_extralibs"
+ unset SAVE_CFLAGS
+ unset SAVE_LDFLAGS
+ unset SAVE_extralibs
+}
+
+temp_cflags(){
+ save_flags
+ CFLAGS="$CFLAGS $*"
+}
+
+temp_ldflags(){
+ save_flags
+ LDFLAGS="$LDFLAGS $*"
+}
+
+temp_extralibs(){
+ save_flags
+ extralibs="$extralibs $*"
+}
+
+append(){
+ var=$1
+ shift
+ flags_saved && eval "SAVE_$var=\"\$SAVE_$var $*\""
+ eval "$var=\"\$$var $*\""
+}
+
+add_cflags(){
+ append CFLAGS "$@"
+}
+
+add_ldflags(){
+ append LDFLAGS "$@"
+}
+
+add_extralibs(){
+ append extralibs "$@"
+}
+
+check_cmd(){
+ log "$@"
+ "$@" >>$logfile 2>&1
+}
+
+check_cc(){
+ log check_cc "$@"
+ cat >$TMPC
+ log_file $TMPC
+ check_cmd $cc $CFLAGS "$@" -c -o $TMPO $TMPC
+}
+
+check_cpp(){
+ log check_cpp "$@"
+ cat >$TMPC
+ log_file $TMPC
+ check_cmd $cc $CFLAGS "$@" -E -o $TMPO $TMPC
+}
+
+check_ld(){
+ log check_ld "$@"
+ check_cc || return
+ check_cmd $cc $LDFLAGS "$@" -o $TMPE $TMPO $extralibs
+}
+
+check_cflags(){
+ log check_cflags "$@"
+ check_cc "$@" <<EOF && add_cflags "$@"
+int x;
+EOF
+}
+
+check_ldflags(){
+ log check_ldflags "$@"
+ check_ld "$@" <<EOF && add_ldflags "$@"
+int main(){
+ return 0;
+}
+EOF
+}
+
+check_header(){
+ log check_header "$@"
+ header=$1
+ shift
+ check_cpp "$@" <<EOF
+#include <$header>
+int x;
+EOF
+ err=$?
+ var=`echo $header | sed 's/[^[:alnum:]]/_/g'`
+ test "$err" = 0 && enable $var || disable $var
+ return $err
+}
+
+check_func(){
+ log check_func "$@"
+ func=$1
+ shift
+ check_ld "$@" <<EOF
+extern int $func();
+int main(){
+ $func();
+}
+EOF
+ err=$?
+ test "$err" = 0 && enable $func || disable $func
+ return $err
+}
+
+check_lib(){
+ log check_lib "$@"
+ header="$1"
+ func="$2"
+ shift 2
+ temp_extralibs "$@"
+ check_header $header && check_func $func && add_extralibs "$@"
+ err=$?
+ restore_flags
+ return $err
+}
+
+check_exec(){
+ check_ld "$@" && { test "$cross_compile" = yes || $TMPE >>$logfile 2>&1; }
+}
+
+require(){
+ name="$1"
+ header="$2"
+ func="$3"
+ shift 3
+ check_lib $header $func "$@" || die "ERROR: $name not found"
+}
+
+CONFIG_LIST='
+ encoders
+ decoders
+ parsers
+ muxers
+ demuxers
+ a52
+ a52bin
+ amr
+ amr_nb
+ amr_nb_fixed
+ amr_wb
+ audio_beos
+ audio_oss
+ avisynth
+ beos_netserver
+ bktr
+ dc1394
+ dts
+ dv1394
+ faac
+ faad
+ faadbin
+ ffmpeg
+ ffplay
+ ffserver
+ gpl
+ ipv6
+ libgsm
+ libnut
+ libogg
+ libvorbis
+ memalign_hack
+ mp3lame
+ mpegaudio_hp
+ network
+ pp
+ protocols
+ swscaler
+ vhook
+ video4linux
+ video4linux2
+ wince
+ x264
+ xvid
+ zlib
+'
+
+HAVE_LIST='
+ altivec_h
+ beosthreads
+ byteswap_h
+ dcbzl
+ dlfcn_h
+ dlopen
+ freetype2
+ gprof
+ imlib2
+ inet_aton
+ localtime_r
+ lrintf
+ malloc_h
+ memalign
+ mlib
+ os2
+ os2threads
+ pthreads
+ sdl
+ sdl_video_size
+ threads
+ w32threads
+'
+
+# set temporary file name
+if test ! -z "$TMPDIR" ; then
+ TMPDIR1="${TMPDIR}"
+elif test ! -z "$TEMPDIR" ; then
+ TMPDIR1="${TEMPDIR}"
+else
+ TMPDIR1="/tmp"
+fi
+
+TMPC="${TMPDIR1}/ffmpeg-conf-${RANDOM}-$$-${RANDOM}.c"
+TMPO="${TMPDIR1}/ffmpeg-conf-${RANDOM}-$$-${RANDOM}.o"
+TMPE="${TMPDIR1}/ffmpeg-conf-${RANDOM}-$$-${RANDOM}"
+TMPS="${TMPDIR1}/ffmpeg-conf-${RANDOM}-$$-${RANDOM}.S"
+TMPH="${TMPDIR1}/ffmpeg-conf-${RANDOM}-$$-${RANDOM}.h"
+
+# default parameters
+logging="yes"
+logfile="config.err"
+PREFIX="/usr/local"
+libdir='${PREFIX}/lib'
+shlibdir="$libdir"
+incdir='${PREFIX}/include/ffmpeg'
+mandir='${PREFIX}/man'
+bindir='${PREFIX}/bin'
+cross_prefix=""
+cross_compile="no"
+cc="gcc"
+ar="ar"
+ranlib="ranlib"
+make="make"
+strip="strip"
+arch=`uname -m`
+cpu="generic"
+powerpc_perf="no"
+mmx="default"
+cmov="no"
+cmov_is_fast="no"
+armv5te="default"
+iwmmxt="default"
+altivec="default"
+dcbzl="no"
+mmi="default"
+case "$arch" in
+ i386|i486|i586|i686|i86pc|BePC)
+ arch="x86_32"
+ ;;
+ x86_64|amd64)
+ arch="x86_32"
+ canon_arch="`$cc -dumpmachine | sed -e 's,\([^-]*\)-.*,\1,'`"
+ if [ x"$canon_arch" = x"x86_64" -o x"$canon_arch" = x"amd64" ]; then
+ if [ -z "`echo $CFLAGS | grep -- -m32`" ]; then
+ arch="x86_64"
+ fi
+ fi
+ ;;
+ # armv4l is a subset of armv[567]*l
+ arm|armv[4567]*l)
+ arch="armv4l"
+ ;;
+ alpha)
+ arch="alpha"
+ ;;
+ "Power Macintosh"|ppc|ppc64|powerpc)
+ arch="powerpc"
+ ;;
+ mips|mipsel|IP*)
+ arch="mips"
+ ;;
+ sun4u|sparc64)
+ arch="sparc64"
+ ;;
+ sparc)
+ arch="sparc"
+ ;;
+ sh4)
+ arch="sh4"
+ ;;
+ parisc|parisc64)
+ arch="parisc"
+ ;;
+ s390|s390x)
+ arch="s390"
+ ;;
+ m68k)
+ arch="m68k"
+ ;;
+ ia64)
+ arch="ia64"
+ ;;
+ bfin)
+ arch="bfin"
+ ;;
+ *)
+ arch="unknown"
+ ;;
+esac
+gprof="no"
+video4linux="yes"
+video4linux2="yes"
+bktr="no"
+audio_oss="yes"
+audio_beos="no"
+dv1394="yes"
+dc1394="no"
+network="yes"
+ipv6="yes"
+zlib="yes"
+libgsm="no"
+mp3lame="no"
+libnut="no"
+libogg="no"
+libvorbis="no"
+faad="no"
+faadbin="no"
+faac="no"
+xvid="no"
+x264="no"
+a52="no"
+a52bin="no"
+dts="no"
+pp="no"
+mingw32="no"
+wince="no"
+os2="no"
+lstatic="yes"
+lshared="no"
+optimize="yes"
+debug="yes"
+extrawarnings="no"
+dostrip="yes"
+installstrip="-s"
+extralibs="-lm"
+bigendian="no"
+vhook="default"
+avisynth="no"
+dlfcn_h="no"
+dlopen="no"
+mpegaudio_hp="yes"
+SHFLAGS='-shared -Wl,-soname,$@'
+VHOOKSHFLAGS='$(SHFLAGS)'
+beos_netserver="no"
+protocols="yes"
+ffmpeg="yes"
+ffserver="yes"
+ffplay="yes"
+LIBOBJFLAGS=""
+FFLDFLAGS=-Wl,--warn-common
+LDLATEFLAGS='-Wl,-rpath-link,\$(BUILD_ROOT)/libavcodec -Wl,-rpath-link,\$(BUILD_ROOT)/libavformat -Wl,-rpath-link,\$(BUILD_ROOT)/libavutil'
+FFSERVERLDFLAGS=-Wl,-E
+LDCONFIG="ldconfig"
+LIBPREF="lib"
+LIBSUF=".a"
+LIB='$(LIBPREF)$(NAME)$(LIBSUF)'
+SLIBPREF="lib"
+SLIBSUF=".so"
+SLIBNAME='$(SLIBPREF)$(NAME)$(SLIBSUF)'
+SLIBNAME_WITH_VERSION='$(SLIBNAME).$(LIBVERSION)'
+SLIBNAME_WITH_MAJOR='$(SLIBNAME).$(LIBMAJOR)'
+EXESUF=""
+BUILDSUF=""
+amr_nb="no"
+amr_wb="no"
+amr_nb_fixed="no"
+amr_if2="no"
+mlib="no"
+pthreads="no"
+swscaler="no"
+gpl="no"
+memalign_hack="no"
+asmalign_pot="unknown"
+LIB_INSTALL_EXTRA_CMD='$(RANLIB) "$(libdir)/$(LIB)"'
+
+# OS specific
+targetos=`uname -s`
+case $targetos in
+BeOS)
+PREFIX="/boot/home/config"
+# helps building libavcodec
+add_cflags "-DPIC -fomit-frame-pointer"
+# 3 gcc releases known for BeOS, each with ugly bugs
+gcc_version="`$cc -v 2>&1 | grep version | cut -d ' ' -f3-`"
+case "$gcc_version" in
+2.9-beos-991026*|2.9-beos-000224*) echo "R5/GG gcc"
+mmx="no"
+;;
+*20010315*) echo "BeBits gcc"
+add_cflags "-fno-expensive-optimizations"
+;;
+esac
+SHFLAGS=-nostart
+# disable Linux things
+audio_oss="no"
+video4linux="no"
+video4linux2="no"
+dv1394="no"
+# enable BeOS things
+audio_beos="yes"
+beosthreads="yes"
+# no need for libm, but the inet stuff
+# Check for BONE
+if (echo $BEINCLUDES|grep 'headers/be/bone' >/dev/null); then
+extralibs="-lbind -lsocket"
+else
+beos_netserver="yes"
+extralibs="-lnet"
+fi ;;
+SunOS)
+video4linux="no"
+video4linux2="no"
+audio_oss="no"
+dv1394="no"
+make="gmake"
+FFLDFLAGS=""
+FFSERVERLDFLAGS=""
+SHFLAGS="-shared -Wl,-h,\$@"
+add_extralibs "-lsocket -lnsl"
+;;
+NetBSD)
+video4linux="no"
+video4linux2="no"
+bktr="yes"
+audio_oss="yes"
+dv1394="no"
+make="gmake"
+add_extralibs "-lossaudio"
+;;
+OpenBSD)
+video4linux="no"
+video4linux2="no"
+bktr="yes"
+audio_oss="yes"
+dv1394="no"
+make="gmake"
+LIBOBJFLAGS="\$(PIC)"
+LDCONFIG="ldconfig -m \$(shlibdir)"
+add_extralibs "-lossaudio"
+;;
+FreeBSD)
+video4linux="no"
+video4linux2="no"
+bktr="yes"
+audio_oss="yes"
+dv1394="no"
+make="gmake"
+add_cflags "-pthread"
+;;
+GNU/kFreeBSD)
+video4linux="no"
+video4linux2="no"
+bktr="yes"
+audio_oss="yes"
+dv1394="no"
+add_cflags "-pthread"
+;;
+BSD/OS)
+video4linux="no"
+video4linux2="no"
+bktr="yes"
+audio_oss="yes"
+dv1394="no"
+extralibs="-lpoll -lgnugetopt -lm"
+make="gmake"
+strip="strip -d"
+installstrip=""
+;;
+Darwin)
+cc="cc"
+video4linux="no"
+video4linux2="no"
+audio_oss="no"
+dv1394="no"
+SHFLAGS="-dynamiclib -Wl,-single_module -Wl,-install_name,\$(shlibdir)/\$(SLIBNAME),-current_version,\$(SPPVERSION),-compatibility_version,\$(SPPVERSION)"
+VHOOKSHFLAGS='-dynamiclib -Wl,-single_module -flat_namespace -undefined suppress -Wl,-install_name,$(shlibdir)/vhook/$@'
+extralibs=""
+strip="strip -x"
+installstrip=""
+FFLDFLAGS="-Wl,-dynamic,-search_paths_first"
+SLIBSUF=".dylib"
+SLIBNAME_WITH_FULLVERSION='$(SLIBPREF)$(NAME).$(LIBVERSION)$(SLIBSUF)'
+SLIBNAME_WITH_MAJOR='$(SLIBPREF)$(NAME).$(LIBMAJOR)$(SLIBSUF)'
+FFSERVERLDFLAGS=-Wl,-bind_at_load
+LIB_INSTALL_EXTRA_CMD='$(RANLIB) "$(libdir)/$(LIB)"'
+;;
+MINGW32*)
+# Note: the rest of the mingw32 config is done afterwards as mingw32
+# can be forced on the command line for Linux cross compilation.
+mingw32="yes"
+;;
+CYGWIN*)
+targetos=CYGWIN
+shlibdir='${PREFIX}/bin'
+video4linux="no"
+video4linux2="no"
+audio_oss="yes"
+dv1394="no"
+VHOOKSHFLAGS='-shared -L$(BUILD_ROOT)/libavformat -L$(BUILD_ROOT)/libavcodec -L$(BUILD_ROOT)/libavutil'
+VHOOKLIBS='-lavformat$(BUILDSUF) -lavcodec$(BUILDSUF) -lavutil$(BUILDSUF) $(EXTRALIBS)'
+extralibs=""
+EXESUF=".exe"
+SLIBPREF="cyg"
+SLIBSUF=".dll"
+SLIBNAME_WITH_VERSION='$(SLIBPREF)$(NAME)-$(LIBVERSION)$(SLIBSUF)'
+SLIBNAME_WITH_MAJOR='$(SLIBPREF)$(NAME)-$(LIBMAJOR)$(SLIBSUF)'
+SHFLAGS='-shared -Wl,--out-implib=lib$(NAME).dll.a'
+;;
+Linux)
+LDLATEFLAGS="-Wl,--as-needed $LDLATEFLAGS"
+;;
+IRIX*)
+targetos=IRIX
+ranlib="echo ignoring ranlib"
+video4linux="no"
+video4linux2="no"
+audio_oss="no"
+make="gmake"
+;;
+OS/2)
+TMPE=$TMPE".exe"
+ar="emxomfar -p128"
+ranlib="echo ignoring ranlib"
+strip="echo ignoring strip"
+add_cflags "-Zomf"
+FFLDFLAGS="-Zomf -Zstack 16384 -s"
+SHFLAGS="-Zdll -Zomf"
+FFSERVERLDFLAGS=""
+LIBPREF=""
+LIBSUF=".lib"
+SLIBPREF=""
+SLIBSUF=".dll"
+EXESUF=".exe"
+extralibs=""
+pkg_requires=""
+video4linux="no"
+video4linux2="no"
+audio_oss="no"
+dv1394="no"
+ffserver="no"
+vhook="no"
+os2="yes"
+os2threads="yes"
+;;
+*)
+targetos="${targetos}-UNKNOWN"
+;;
+esac
+
+# find source path
+source_path="`dirname \"$0\"`"
+source_path_used="yes"
+if test -z "$source_path" -o "$source_path" = "." ; then
+ source_path="`pwd`"
+ source_path_used="no"
+else
+ source_path="`cd \"$source_path\"; pwd`"
+ echo "$source_path" | grep -q '[[:blank:]]' &&
+ die "Out of tree builds are impossible with whitespace in source path."
+fi
+
+if test x"$1" = x"-h" -o x"$1" = x"--help" ; then
+ show_help
+fi
+
+FFMPEG_CONFIGURATION=" "
+for opt do
+ FFMPEG_CONFIGURATION="$FFMPEG_CONFIGURATION""$opt "
+done
+
+ENCODER_LIST=`sed -n 's/^[^#]*ENC.*, *\(.*\)).*/\1_encoder/p' "$source_path/libavcodec/allcodecs.c"`
+DECODER_LIST=`sed -n 's/^[^#]*DEC.*, *\(.*\)).*/\1_decoder/p' "$source_path/libavcodec/allcodecs.c"`
+PARSER_LIST=`sed -n 's/^[^#]*PARSER.*, *\(.*\)).*/\1_parser/p' "$source_path/libavcodec/allcodecs.c"`
+MUXER_LIST=`sed -n 's/^[^#]*_MUX.*, *\(.*\)).*/\1_muxer/p' "$source_path/libavformat/allformats.c"`
+DEMUXER_LIST=`sed -n 's/^[^#]*DEMUX.*, *\(.*\)).*/\1_demuxer/p' "$source_path/libavformat/allformats.c"`
+
+enable $ENCODER_LIST $DECODER_LIST $PARSER_LIST $MUXER_LIST $DEMUXER_LIST
+
+for opt do
+ optval="${opt#*=}"
+ case "$opt" in
+ --log)
+ ;;
+ --log=*) logging="$optval"
+ ;;
+ --prefix=*) PREFIX="$optval"; force_prefix=yes
+ ;;
+ --libdir=*) libdir="$optval"; force_libdir=yes
+ ;;
+ --shlibdir=*) shlibdir="$optval"
+ ;;
+ --incdir=*) incdir="$optval"
+ ;;
+ --mandir=*) mandir="$optval"
+ ;;
+ --source-path=*) source_path="$optval"
+ ;;
+ --build-path=*) build_path="$optval"
+ ;;
+ --cross-prefix=*) cross_prefix="$optval"
+ ;;
+ --cross-compile) cross_compile=yes
+ ;;
+ --cc=*) cc="$optval"
+ ;;
+ --make=*) make="$optval"
+ ;;
+ --extra-cflags=*) add_cflags "$optval"
+ ;;
+ --extra-ldflags=*) add_ldflags "$optval"
+ ;;
+ --extra-libs=*) add_extralibs "$optval"
+ ;;
+ --build-suffix=*) BUILDSUF="$optval"
+ ;;
+ --arch=*) arch="$optval"
+ ;;
+ --cpu=*) cpu="$optval"
+ ;;
+ --powerpc-perf-enable) powerpc_perf="yes"
+ ;;
+ --disable-mmx) mmx="no"
+ ;;
+ --disable-armv5te) armv5te="no"
+ ;;
+ --disable-iwmmxt) iwmmxt="no"
+ ;;
+ --disable-altivec) altivec="no"
+ ;;
+ --enable-gprof) gprof="yes"
+ ;;
+ --disable-v4l) video4linux="no"
+ ;;
+ --disable-v4l2) video4linux2="no"
+ ;;
+ --disable-bktr) bktr="no"
+ ;;
+ --disable-audio-oss) audio_oss="no"
+ ;;
+ --disable-audio-beos) audio_beos="no"
+ ;;
+ --disable-dv1394) dv1394="no"
+ ;;
+ --disable-network) network="no"; ffserver="no"
+ ;;
+ --disable-ipv6) ipv6="no";
+ ;;
+ --disable-zlib) zlib="no"
+ ;;
+ --enable-a52) a52="yes"
+ ;;
+ --enable-a52bin) a52bin="yes"
+ ;;
+ --enable-dts) dts="yes"
+ ;;
+ --enable-pp) pp="yes"
+ ;;
+ --enable-libgsm) libgsm="yes"
+ ;;
+ --enable-mp3lame) mp3lame="yes"
+ ;;
+ --enable-libnut) libnut="yes"
+ ;;
+ --enable-libogg) libogg="yes"
+ pkg_requires="$pkg_requires ogg >= 1.1"
+ ;;
+ --enable-vorbis) libvorbis="yes"
+ pkg_requires="$pkg_requires vorbis vorbisenc"
+ ;;
+ --enable-faad) faad="yes"
+ ;;
+ --enable-faadbin) faadbin="yes"
+ ;;
+ --enable-faac) faac="yes"
+ ;;
+ --enable-xvid) xvid="yes"
+ ;;
+ --enable-x264) x264="yes"
+ ;;
+ --enable-avisynth) avisynth="yes";
+ ;;
+ --enable-dc1394) dc1394="yes"
+ pkg_requires="$pkg_requires libraw1394"
+ ;;
+ --disable-vhook) vhook="no"
+ ;;
+ --enable-mingw32) mingw32="yes"
+ ;;
+ --enable-mingwce) wince="yes"
+ ;;
+ --enable-static) lstatic="yes"
+ ;;
+ --disable-static) lstatic="no"
+ ;;
+ --enable-shared) lshared="yes"
+ ;;
+ --disable-shared) lshared="no"
+ ;;
+ --disable-debug) debug="no"
+ ;;
+ --disable-opts) optimize="no"
+ ;;
+ --enable-extra-warnings) extrawarnings="yes"
+ ;;
+ --disable-mpegaudio-hp) mpegaudio_hp="no"
+ ;;
+ --disable-protocols) protocols="no"; network="no"; ffserver="no"
+ ;;
+ --disable-ffmpeg) ffmpeg="no"
+ ;;
+ --disable-ffserver) ffserver="no"
+ ;;
+ --disable-ffplay) ffplay="no"
+ ;;
+ --enable-small) optimize="small"
+ ;;
+ --enable-amr_nb) amr="yes"; amr_nb="yes"; amr_nb_fixed="no"
+ ;;
+ --enable-amr_nb-fixed) amr="yes"; amr_nb_fixed="yes"; amr_nb="no"
+ ;;
+ --enable-amr_wb) amr="yes"; amr_wb="yes"
+ ;;
+ --enable-amr_if2) amr="yes"; amr_if2="yes"
+ ;;
+ --enable-sunmlib) mlib="yes"
+ ;;
+ --enable-pthreads) pthreads="yes"
+ ;;
+ --enable-swscaler) swscaler="yes"
+ ;;
+ --enable-gpl) gpl="yes"
+ ;;
+ --enable-memalign-hack) memalign_hack="yes"
+ ;;
+ --disable-strip) dostrip="no"
+ ;;
+ --enable-encoder=*) enable ${optval}_encoder
+ ;;
+ --enable-decoder=*) enable ${optval}_decoder
+ ;;
+ --disable-encoder=*) disable ${optval}_encoder
+ ;;
+ --disable-decoder=*) disable ${optval}_decoder
+ ;;
+ --disable-encoders) disable $ENCODER_LIST
+ ;;
+ --disable-decoders) disable $DECODER_LIST
+ ;;
+ --enable-muxer=*) enable ${optval}_muxer
+ ;;
+ --disable-muxer=*) disable ${optval}_muxer
+ ;;
+ --disable-muxers) disable $MUXER_LIST; ffserver="no"
+ ;;
+ --enable-demuxer=*) enable ${optval}_demuxer
+ ;;
+ --disable-demuxer=*) disable ${optval}_demuxer
+ ;;
+ --disable-demuxers) disable $DEMUXER_LIST
+ ;;
+ --enable-parser=*) enable ${optval}_parser
+ ;;
+ --disable-parser=*) disable ${optval}_parser
+ ;;
+ --disable-parsers) disable $PARSER_LIST
+ ;;
+ --help) show_help
+ ;;
+ *)
+ echo "Unknown option \"$opt\"."
+ echo "See $0 --help for available options."
+ exit 1
+ ;;
+ esac
+done
+
+cd "$build_path"
+
+if test "$logging" != no; then
+ test "$logging" = yes || logfile="$logging"
+ echo "# $0 $@" >$logfile
+ set >>$logfile
+else
+ logfile=/dev/null
+fi
+
+if test "$mingw32" = "yes" -o "$wince" = "yes"; then
+ if test "$lshared" = "yes" && test "$lstatic" = "yes" ; then
+ cat <<EOF
+You can only build one library type at once on MinGW.
+Specify --disable-static --enable-shared to only build
+the shared libraries. To build only the static libraries
+you do not need to pass additional options.
+EOF
+ exit 1
+ fi
+ video4linux="no"
+ video4linux2="no"
+ bktr="no"
+ audio_oss="no"
+ dv1394="no"
+ dc1394="no"
+ ffserver="no"
+ network="no"
+ if enabled mingw32; then
+ w32threads="yes"
+ fi
+ if test "$wince" = "yes"; then
+ protocols="no"
+ fi
+ SLIBPREF=""
+ SLIBSUF=".dll"
+ EXESUF=".exe"
+ SLIBNAME_WITH_VERSION='$(SLIBPREF)$(NAME)-$(LIBVERSION)$(SLIBSUF)'
+ SLIBNAME_WITH_MAJOR='$(SLIBPREF)$(NAME)-$(LIBMAJOR)$(SLIBSUF)'
+ SLIB_EXTRA_CMD="-lib /machine:i386 /def:\$(@:${SLIBSUF}=.def)"
+ SHFLAGS="-shared -Wl,--output-def,\$(@:${SLIBSUF}=.def),--out-implib,lib\$(SLIBNAME:\$(SLIBSUF)=.dll.a)"
+ if test "$force_prefix" != yes; then PREFIX="$PROGRAMFILES/FFmpeg"; fi
+ if test "$force_libdir" != yes; then bindir='${PREFIX}'; fi
+ shlibdir='${PREFIX}'
+fi
+
+# Combine FFLDFLAGS and the LDFLAGS environment variable.
+LDFLAGS="$FFLDFLAGS $LDFLAGS"
+
+test -n "$cross_prefix" && cross_compile=yes
+cc="${cross_prefix}${cc}"
+ar="${cross_prefix}${ar}"
+ranlib="${cross_prefix}${ranlib}"
+strip="${cross_prefix}${strip}"
+
+# we need to build at least one lib type
+if test "$lstatic" = "no" && test "$lshared" = "no" ; then
+ cat <<EOF
+At least one library type must be built.
+Specify --enable-static to build the static libraries or --enable-shared to
+build the shared libraries as well. To only build the shared libraries specify
+--disable-static in addition to --enable-shared.
+EOF
+ exit 1;
+fi
+
+if test "$libvorbis" = "yes" ; then
+ if test "$libogg" = "no"; then
+ echo "libogg must be enabled to enable Vorbis."
+ fail="yes"
+ fi
+fi
+
+if test "$gpl" != "yes"; then
+ if test "$pp" != "no"; then
+ echo "The Postprocessing code is under GPL and --enable-gpl is not specified."
+ fail="yes"
+ fi
+
+ if test "$a52" != "no" -o "$a52bin" != "no"; then
+ echo "liba52 is under GPL and --enable-gpl is not specified."
+ fail="yes"
+ fi
+
+ if test "$xvid" != "no"; then
+ echo "libxvidcore is under GPL and --enable-gpl is not specified."
+ fail="yes"
+ fi
+
+ if test "$x264" != "no"; then
+ echo "x264 is under GPL and --enable-gpl is not specified."
+ fail="yes"
+ fi
+
+ if test "$dts" != "no"; then
+ echo "libdts is under GPL and --enable-gpl is not specified."
+ fail="yes"
+ fi
+
+ if test "$faad" != "no" -o "$faadbin" != "no"; then
+ if check_header faad.h; then
+ check_cc << EOF
+ #include <faad.h>
+ #ifndef FAAD2_VERSION
+ ok faad1
+ #endif
+ int main( void ) { return 0; }
+EOF
+ if test $? = 0 ; then
+ echo "FAAD2 is under GPL and --enable-gpl is not specified."
+ fail="yes"
+ fi
+ else
+ faad="no"
+ faadbin="no"
+ echo "FAAD test failed."
+ fi
+ fi
+
+ if test "$swscaler" != "no"; then
+ echo "The software scaler is under GPL and --enable-gpl is not specified."
+ fail="yes"
+ fi
+fi
+
+if test "$fail" = "yes"; then
+ exit 1
+fi
+
+# compute MMX state
+if test $mmx = "default"; then
+ if test $arch = "x86_32" -o $arch = "x86_64"; then
+ mmx="yes"
+ else
+ mmx="no"
+ fi
+fi
+
+#Darwin CC versions
+needmdynamicnopic="no"
+if test $targetos = Darwin; then
+ if test -n "`$cc -v 2>&1 | grep xlc`"; then
+ add_cflags "-qpdf2 -qlanglvl=extc99 -qmaxmem=-1 -qarch=auto -qtune=auto"
+ else
+ gcc_version="`$cc -v 2>&1 | grep version | cut -d ' ' -f3-`"
+ case "$gcc_version" in
+ *2.95*)
+ add_cflags "-no-cpp-precomp -pipe"
+ ;;
+ *[34].*)
+ add_cflags "-no-cpp-precomp -pipe -force_cpusubtype_ALL -Wno-sign-compare"
+ if test "$lshared" = no; then
+ needmdynamicnopic="yes"
+ fi
+ ;;
+ *)
+ add_cflags "-no-cpp-precomp -pipe"
+ if test "$lshared" = no; then
+ needmdynamicnopic="yes"
+ fi
+ ;;
+ esac
+ fi
+fi
+
+if test $optimize != "no"; then
+ add_cflags "-fomit-frame-pointer"
+fi
+
+# Can only do AltiVec on PowerPC
+if test $altivec = "default"; then
+ if test $arch = "powerpc"; then
+ altivec="yes"
+ else
+ altivec="no"
+ fi
+fi
+
+# Add processor-specific flags
+POWERPCMODE="32bits"
+if test $cpu != "generic"; then
+ case $cpu in
+ 601|ppc601|PowerPC601)
+ add_cflags "-mcpu=601"
+ if test $altivec = "yes"; then
+ echo "WARNING: Tuning for PPC601 but AltiVec enabled!";
+ fi
+ ;;
+ 603*|ppc603*|PowerPC603*)
+ add_cflags "-mcpu=603"
+ if test $altivec = "yes"; then
+ echo "WARNING: Tuning for PPC603 but AltiVec enabled!";
+ fi
+ ;;
+ 604*|ppc604*|PowerPC604*)
+ add_cflags "-mcpu=604"
+ if test $altivec = "yes"; then
+ echo "WARNING: Tuning for PPC604 but AltiVec enabled!";
+ fi
+ ;;
+ G3|g3|75*|ppc75*|PowerPC75*)
+ add_cflags "-mcpu=750 -mpowerpc-gfxopt"
+ if test $altivec = "yes"; then
+ echo "WARNING: Tuning for PPC75x but AltiVec enabled!";
+ fi
+ ;;
+ G4|g4|745*|ppc745*|PowerPC745*)
+ add_cflags "-mcpu=7450 -mpowerpc-gfxopt"
+ if test $altivec = "no"; then
+ echo "WARNING: Tuning for PPC745x but AltiVec disabled!";
+ fi
+ ;;
+ 74*|ppc74*|PowerPC74*)
+ add_cflags "-mcpu=7400 -mpowerpc-gfxopt"
+ if test $altivec = "no"; then
+ echo "WARNING: Tuning for PPC74xx but AltiVec disabled!";
+ fi
+ ;;
+ G5|g5|970|ppc970|PowerPC970|power4*|Power4*)
+ add_cflags "-mcpu=970 -mpowerpc-gfxopt -mpowerpc64"
+ if test $altivec = "no"; then
+ echo "WARNING: Tuning for PPC970 but AltiVec disabled!";
+ fi
+ POWERPCMODE="64bits"
+ ;;
+ # targets that do NOT support conditional mov (cmov)
+ i[345]86|pentium|pentium-mmx|k6|k6-[23]|winchip-c6|winchip2|c3)
+ add_cflags "-march=$cpu"
+ cmov="no"
+ ;;
+ # targets that do support conditional mov (cmov)
+ i686|pentiumpro|pentium[23]|pentium-m|athlon|athlon-tbird|athlon-4|athlon-[mx]p|athlon64|k8|opteron|athlon-fx)
+ add_cflags "-march=$cpu"
+ cmov="yes"
+ cmov_is_fast="yes"
+ ;;
+ # targets that do support conditional mov but on which it's slow
+ pentium4|prescott|nocona)
+ add_cflags "-march=$cpu"
+ cmov="yes"
+ cmov_is_fast="no"
+ ;;
+ sparc64)
+ add_cflags "-mcpu=v9"
+ ;;
+ *)
+ echo "WARNING: Unknown CPU \"$cpu\", ignored."
+ ;;
+ esac
+fi
+
+# make sure we can execute files in $TMPDIR
+cat >$TMPE 2>>$logfile <<EOF
+#! /bin/sh
+EOF
+chmod +x $TMPE >>$logfile 2>&1
+if ! $TMPE >>$logfile 2>&1; then
+ cat <<EOF
+Unable to create and execute files in $TMPDIR1. Set the TMPDIR environment
+variable to another directory and make sure that $TMPDIR1 is not mounted
+noexec.
+EOF
+ die "Sanity test failed."
+fi
+rm $TMPE
+
+# compiler sanity check
+check_exec <<EOF
+int main(){
+ return 0;
+}
+EOF
+if test "$?" != 0; then
+ echo "$cc is unable to create an executable file."
+ if test -z "$cross_prefix" -a "$cross_compile" = no; then
+ echo "If $cc is a cross-compiler, use the --cross-compile option."
+ echo "Only do this if you know what cross compiling means."
+ fi
+ die "C compiler test failed."
+fi
+
+# check for assembler specific support
+
+if test $arch = "powerpc"; then
+check_cc <<EOF && dcbzl=yes
+int main(void) {
+ register long zero = 0;
+ char data[1024];
+ asm volatile("dcbzl %0, %1" : : "b" (data), "r" (zero));
+return 0;
+}
+EOF
+fi
+
+# check for SIMD availability
+
+# AltiVec flags: The FSF version of GCC differs from the Apple version
+if test $arch = "powerpc"; then
+ if test $altivec = "yes"; then
+ if test -n "`$cc -v 2>&1 | grep version | grep Apple`"; then
+ add_cflags "-faltivec"
+ else
+ add_cflags "-maltivec -mabi=altivec"
+ fi
+ fi
+fi
+
+check_header altivec.h
+
+# check if our compiler supports Motorola AltiVec C API
+if test $altivec = "yes"; then
+ if test $altivec_h = "yes"; then
+ inc_altivec_h="#include <altivec.h>"
+ else
+ inc_altivec_h=
+ fi
+ check_cc <<EOF || altivec=no
+$inc_altivec_h
+int main(void) {
+ vector signed int v1, v2, v3;
+ v1 = vec_add(v2,v3);
+ return 0;
+}
+EOF
+fi
+
+# check armv5te instructions support
+if test $armv5te = "default" -a $arch = "armv4l"; then
+ armv5te=no
+ check_cc <<EOF && armv5te=yes
+ int main(void) {
+ __asm__ __volatile__ ("qadd r0, r0, r0");
+ }
+EOF
+fi
+
+# check iwmmxt support
+if test $iwmmxt = "default" -a $arch = "armv4l"; then
+ iwmmxt=no
+ check_cc <<EOF && iwmmxt=yes
+ int main(void) {
+ __asm__ __volatile__ ("wunpckelub wr6, wr4");
+ }
+EOF
+fi
+
+# mmi only available on mips
+if test $mmi = "default"; then
+ if test $arch = "mips"; then
+ mmi="yes"
+ else
+ mmi="no"
+ fi
+fi
+
+# check if our compiler supports mmi
+enabled mmi && check_cc <<EOF || mmi="no"
+int main(void) {
+ __asm__ ("lq \$2, 0(\$2)");
+ return 0;
+}
+EOF
+
+# ---
+# big/little-endian test
+if test "$cross_compile" = "no"; then
+ check_ld <<EOF || die "endian test failed" && $TMPE && bigendian="yes"
+#include <inttypes.h>
+int main(int argc, char ** argv){
+ volatile uint32_t i=0x01234567;
+ return (*((uint8_t*)(&i))) == 0x67;
+}
+EOF
+else
+ # programs cannot be launched if cross compiling, so make a static guess
+ if test "$arch" = "powerpc" -o "$arch" = "mips" ; then
+ bigendian="yes"
+ fi
+fi
+
+# ---
+# check availability of some header files
+
+check_header malloc.h
+check_func memalign
+
+if test "$memalign" = "no" -a "$mmx" = "yes" -a \
+ "$memalign_hack" != "yes" -a "$targetos" != "Darwin" -a \
+ "$targetos" != "FreeBSD" ; then
+ die "Error, no memalign() but SSE enabled, disable it or use --enable-memalign-hack."
+fi
+
+check_header byteswap.h
+
+check_func inet_aton
+check_func localtime_r
+enabled zlib && check_lib zlib.h zlibVersion -lz || zlib="no"
+
+# check for some common methods of building with pthread support
+# do this before the optional library checks as some of them require pthreads
+if enabled pthreads; then
+ if check_func pthread_create; then
+ :
+ elif check_func pthread_create -pthread; then
+ add_cflags -pthread
+ add_ldflags -pthread
+ elif check_func pthread_create -pthreads; then
+ add_cflags -pthreads
+ add_ldflags -pthreads
+ elif ! check_lib pthread.h pthread_create -lpthread; then
+ die "ERROR: can't find pthreads library"
+ fi
+fi
+
+# these are off by default, so fail if requested and not available
+enabled dts && require libdts dts.h dts_init -ldts -lm
+enabled libgsm && require libgsm gsm.h gsm_create -lgsm
+enabled mp3lame && require LAME lame/lame.h lame_init -lmp3lame -lm
+enabled libvorbis && require libvorbis vorbis/vorbisenc.h vorbis_info_init -lvorbis -lvorbisenc -logg
+enabled libogg && require libogg ogg/ogg.h ogg_sync_init -logg
+enabled libnut && require libnut libnut.h nut_demuxer_init -lnut
+enabled xvid && require XviD xvid.h xvid_global -lxvidcore
+enabled x264 && require x264 x264.h x264_encoder_open -lx264
+enabled dc1394 && require libdc1394 libdc1394/dc1394_control.h dc1394_create_handle -ldc1394_control -lraw1394
+enabled mlib && require mediaLib mlib_types.h mlib_VectorSub_S16_U8_Mod -lmlib
+
+# Ugh, faac uses stdcall calling convention on win32 so we can't use
+# the generic test functions
+if enabled faac; then
+ save_flags
+ temp_extralibs -lfaac
+ check_ld <<EOF && add_extralibs -lfaac || die "ERROR: libfaac not found"
+#include <stdint.h>
+#include <faac.h>
+int main(){
+ char *id, *cpr;
+ faacEncGetVersion(&id, &cpr);
+ return 0;
+}
+EOF
+ restore_flags
+fi
+
+# Ugh, recent faad2 versions have renamed all functions and #define the
+# old names in faad.h. Generic tests won't work.
+if enabled faad; then
+ save_flags
+ temp_extralibs -lfaad
+ check_ld <<EOF && add_extralibs -lfaad || die "ERROR: libfaad not found"
+#include <faad.h>
+int main(){
+ faacDecOpen();
+ return 0;
+}
+EOF
+ restore_flags
+fi
+
+# Ugh, avisynth uses WINAPI calls. Generic tests won't work.
+if enabled avisynth; then
+ save_flags
+ temp_extralibs -lvfw32
+ check_ld <<EOF && add_extralibs -lvfw32 || die "ERROR: vfw32 not found"
+#include <windows.h>
+#include <vfw.h>
+int main(){
+ AVIFileInit();
+ return 0;
+}
+EOF
+ restore_flags
+fi
+
+
+# test for lrintf in math.h
+check_exec <<EOF && lrintf=yes || lrintf=no
+#define _ISOC9X_SOURCE 1
+#include <math.h>
+int main( void ) { return (lrintf(3.999f) > 0)?0:1; }
+EOF
+
+_restrict=
+for restrict_keyword in restrict __restrict__ __restrict; do
+ check_cc <<EOF && _restrict=$restrict_keyword && break
+void foo(char * $restrict_keyword p);
+EOF
+done
+
+# dlopen/dlfcn.h probing
+
+check_header dlfcn.h
+
+if check_func dlopen; then
+ ldl=
+elif check_func dlopen -ldl; then
+ ldl=-ldl
+fi
+
+if test "$vhook" = "default"; then
+ vhook="$dlopen"
+fi
+
+if enabled_any vhook a52bin faadbin; then
+ add_extralibs $ldl
+fi
+
+if test "$targetos" = "CYGWIN" -a "$lstatic" = "yes" ; then
+ vhook="no"
+ echo
+ echo "At the moment vhooks don't work on Cygwin static builds."
+ echo "Patches welcome."
+ echo
+fi
+
+if enabled vhook; then
+ check_ldflags -rdynamic
+ check_ldflags -export-dynamic
+fi
+
+if enabled audio_beos; then
+ add_extralibs "-lmedia -lbe"
+fi
+
+##########################################
+# imlib check
+
+temp_extralibs -lImlib2
+check_ld <<EOF && imlib2=yes || imlib2=no
+#include <X11/Xlib.h>
+#include <Imlib2.h>
+int main( void ) { return (int) imlib_load_font("foo"); }
+EOF
+restore_flags
+
+##########################################
+# FreeType check
+
+freetype2=no
+if test "x$targetos" != "xBeOS"; then
+ if (freetype-config --version) >/dev/null 2>&1 ; then
+ temp_cflags `freetype-config --cflags`
+ temp_extralibs `freetype-config --libs`
+ check_ld <<EOF && freetype2=yes
+#include <ft2build.h>
+int main( void ) { return (int) FT_Init_FreeType(0); }
+EOF
+ restore_flags
+ fi
+fi
+
+##########################################
+# SDL check
+
+sdl_too_old=no
+sdl=no
+SDL_CONFIG="${cross_prefix}sdl-config"
+if ("${SDL_CONFIG}" --version) >/dev/null 2>&1 ; then
+ temp_cflags `"${SDL_CONFIG}" --cflags`
+ temp_extralibs `"${SDL_CONFIG}" --libs`
+ check_ld <<EOF
+#include <SDL.h>
+#undef main /* We don't want SDL to override our main() */
+int main( void ) { return SDL_Init (SDL_INIT_VIDEO); }
+EOF
+ if test $? = 0; then
+ _sdlversion=`"${SDL_CONFIG}" --version | sed 's/[^0-9]//g'`
+ if test "$_sdlversion" -lt 121 ; then
+ sdl_too_old=yes
+ else
+ sdl=yes
+ check_cc <<EOF && sdl_video_size=yes || sdl_video_size=no
+#include <SDL.h>
+int main(void){
+ const SDL_VideoInfo *vi = SDL_GetVideoInfo();
+ int w = vi->current_w;
+ return 0;
+}
+EOF
+ fi
+ fi
+ restore_flags
+fi
+
+enabled sdl || ffplay=no
+
+##########################################
+# texi2html check
+
+texi2html=no
+if (texi2html -version) >/dev/null 2>&1; then
+texi2html=yes
+fi
+
+##########################################
+# IPv6 check
+
+enabled network && enabled ipv6 && check_ld <<EOF && ipv6=yes || ipv6=no
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netdb.h>
+int main( void ) {
+ struct sockaddr_storage saddr;
+ struct ipv6_mreq mreq6;
+ getaddrinfo(0,0,0,0);
+ getnameinfo(0,0,0,0,0,0,0);
+ IN6_IS_ADDR_MULTICAST((const struct in6_addr *)0);
+}
+EOF
+
+# check for video4linux2 --- V4L2_PIX_FMT_YUV420
+enabled video4linux2 && check_cc <<EOF || video4linux2="no"
+#include <sys/time.h>
+#include <asm/types.h>
+#include <linux/videodev2.h>
+int dummy = V4L2_PIX_FMT_YUV420;
+struct v4l2_buffer dummy1;
+EOF
+
+enabled debug && add_cflags -g
+
+# add some useful compiler flags if supported
+check_cflags -Wdeclaration-after-statement
+check_cflags -Wall
+check_cflags -Wno-switch
+check_cflags -Wdisabled-optimization
+check_cflags -Wpointer-arith
+check_cflags -Wredundant-decls
+enabled extrawarnings && check_cflags -Winline
+
+# add some linker flags
+check_ldflags $LDLATEFLAGS
+
+# not all compilers support -Os
+test "$optimize" = "small" && check_cflags -Os
+
+if enabled optimize; then
+ if test -n "`$cc -v 2>&1 | grep xlc`"; then
+ add_cflags "-O5"
+ add_ldflags "-O5"
+ else
+ add_cflags "-O3"
+ fi
+fi
+
+# PIC flags for shared library objects where they are needed
+if test "$lshared" = "yes" ; then
+ # LIBOBJFLAGS may have already been set in the OS configuration
+ if test -z "$LIBOBJFLAGS" ; then
+ case "$arch" in
+ x86_64|ia64|alpha|sparc*) LIBOBJFLAGS="\$(PIC)" ;;
+ esac
+ fi
+fi
+
+if test "$gprof" = "yes" ; then
+ add_cflags "-p"
+ add_ldflags "-p"
+fi
+
+VHOOKCFLAGS="-fPIC $CFLAGS"
+test "$needmdynamicnopic" = yes && add_cflags -mdynamic-no-pic
+
+# find if .align arg is power-of-two or not
+if test $asmalign_pot = "unknown"; then
+ asmalign_pot="no"
+ echo 'asm (".align 3");' | check_cc && asmalign_pot="yes"
+fi
+
+echo "install prefix $PREFIX"
+echo "source path $source_path"
+echo "C compiler $cc"
+echo "make $make"
+echo "ARCH $arch ($cpu)"
+if test "$BUILDSUF" != ""; then
+ echo "build suffix $BUILDSUF"
+fi
+echo "big-endian $bigendian"
+if test $arch = "x86_32" -o $arch = "x86_64"; then
+ echo "MMX enabled $mmx"
+ echo "CMOV enabled $cmov"
+ echo "CMOV is fast $cmov_is_fast"
+fi
+if test $arch = "armv4l"; then
+ echo "ARMv5TE enabled $armv5te"
+ echo "IWMMXT enabled $iwmmxt"
+fi
+if test $arch = "mips"; then
+ echo "MMI enabled $mmi"
+fi
+if test $arch = "powerpc"; then
+ echo "AltiVec enabled $altivec"
+ echo "dcbzl available $dcbzl"
+fi
+echo "gprof enabled $gprof"
+echo "zlib enabled $zlib"
+echo "libgsm enabled $libgsm"
+echo "mp3lame enabled $mp3lame"
+echo "libnut enabled $libnut"
+echo "libogg enabled $libogg"
+echo "Vorbis enabled $libvorbis"
+echo "FAAD enabled $faad"
+echo "faadbin enabled $faadbin"
+echo "FAAC enabled $faac"
+echo "XviD enabled $xvid"
+echo "x264 enabled $x264"
+echo "a52 support $a52"
+echo "a52 dlopened $a52bin"
+echo "DTS support $dts"
+echo "pp support $pp"
+echo "Software Scaler enabled $swscaler"
+echo "AVISynth enabled $avisynth"
+echo "debug symbols $debug"
+echo "strip symbols $dostrip"
+echo "optimize $optimize"
+echo "static $lstatic"
+echo "shared $lshared"
+echo "video hooking $vhook"
+echo "SDL support $sdl"
+if test $sdl_too_old = "yes"; then
+ echo "-> Your SDL version is too old - please upgrade to have FFplay/SDL support."
+fi
+
+if test "$vhook" = "yes"; then
+ echo "Imlib2 support $imlib2"
+ echo "FreeType support $freetype2"
+fi
+echo "Sun medialib support" $mlib
+echo "pthreads support" $pthreads
+echo "AMR-NB float support" $amr_nb
+echo "AMR-NB fixed support" $amr_nb_fixed
+echo "AMR-WB float support" $amr_wb
+echo "AMR-WB IF2 support" $amr_if2
+echo "network support $network"
+if test "$network" = "yes" ; then
+ echo "IPv6 support $ipv6"
+fi
+echo ".align is power-of-two" $asmalign_pot
+if test "$gpl" = "no" ; then
+ echo "License: LGPL"
+else
+ echo "License: GPL"
+fi
+
+echo "Creating config.mak and config.h..."
+
+echo "# Automatically generated by configure - do not modify!" > config.mak
+echo "/* Automatically generated by configure - do not modify! */" > $TMPH
+echo "#define FFMPEG_CONFIGURATION "'"'"$FFMPEG_CONFIGURATION"'"' >> $TMPH
+
+echo "PREFIX=$PREFIX" >> config.mak
+echo "prefix=\$(DESTDIR)\${PREFIX}" >> config.mak
+echo "libdir=\$(DESTDIR)$libdir" >> config.mak
+echo "shlibdir=\$(DESTDIR)$shlibdir" >> config.mak
+echo "incdir=\$(DESTDIR)$incdir" >> config.mak
+echo "bindir=\$(DESTDIR)$bindir" >> config.mak
+echo "mandir=\$(DESTDIR)$mandir" >> config.mak
+echo "MAKE=$make" >> config.mak
+echo "CC=$cc" >> config.mak
+echo "AR=$ar" >> config.mak
+echo "RANLIB=$ranlib" >> config.mak
+if test "$dostrip" = "yes" ; then
+ echo "STRIP=$strip" >> config.mak
+ echo "INSTALLSTRIP=$installstrip" >> config.mak
+else
+ echo "STRIP=echo ignoring strip" >> config.mak
+ echo "INSTALLSTRIP=" >> config.mak
+fi
+
+echo "OPTFLAGS=$CFLAGS" >> config.mak
+echo "VHOOKCFLAGS=$VHOOKCFLAGS">>config.mak
+echo "LDFLAGS=$LDFLAGS" >> config.mak
+echo "LDCONFIG=$LDCONFIG" >> config.mak
+echo "FFSERVERLDFLAGS=$FFSERVERLDFLAGS" >> config.mak
+echo "SHFLAGS=$SHFLAGS" >> config.mak
+echo "VHOOKSHFLAGS=$VHOOKSHFLAGS" >> config.mak
+echo "VHOOKLIBS=$VHOOKLIBS" >> config.mak
+echo "LIBOBJFLAGS=$LIBOBJFLAGS" >> config.mak
+echo "BUILD_STATIC=$lstatic" >> config.mak
+echo "BUILDSUF=$BUILDSUF" >> config.mak
+echo "LIBPREF=$LIBPREF" >> config.mak
+echo "LIBSUF=\${BUILDSUF}$LIBSUF" >> config.mak
+if test "$lstatic" = "yes" ; then
+ echo "LIB=$LIB" >> config.mak
+else # Some Make complain if this variable does not exist.
+ echo "LIB=" >> config.mak
+fi
+echo "SLIBPREF=$SLIBPREF" >> config.mak
+echo "SLIBSUF=\${BUILDSUF}$SLIBSUF" >> config.mak
+echo "EXESUF=\${BUILDSUF}$EXESUF" >> config.mak
+echo "TARGET_OS=$targetos" >> config.mak
+
+ucarch=`toupper $arch`
+echo "TARGET_ARCH_${ucarch}=yes" >> config.mak
+echo "#define ARCH_${ucarch} 1" >> $TMPH
+
+# special cases
+case "$arch" in
+ x86_32|x86_64)
+ echo "TARGET_ARCH_X86=yes" >> config.mak
+ echo "#define ARCH_X86 1" >> $TMPH
+ ;;
+ powerpc)
+ if test "$POWERPCMODE" = "64bits"; then
+ echo "#define POWERPC_MODE_64BITS 1" >> $TMPH
+ fi
+ if test "$powerpc_perf" = "yes"; then
+ echo "#define POWERPC_PERFORMANCE_REPORT 1" >> $TMPH
+ fi
+ ;;
+ sparc64)
+ echo "TARGET_ARCH_SPARC=yes" >> config.mak
+ echo "#define ARCH_SPARC 1" >> $TMPH
+ ;;
+esac
+
+if test "$bigendian" = "yes" ; then
+ echo "WORDS_BIGENDIAN=yes" >> config.mak
+ echo "#define WORDS_BIGENDIAN 1" >> $TMPH
+fi
+if test "$mmx" = "yes" ; then
+ echo "TARGET_MMX=yes" >> config.mak
+ echo "#define HAVE_MMX 1" >> $TMPH
+ echo "#define __CPU__ 586" >> $TMPH
+fi
+if test "$cmov" = "yes" ; then
+ echo "TARGET_CMOV=yes" >> config.mak
+ echo "#define HAVE_CMOV 1" >> $TMPH
+fi
+if test "$cmov_is_fast" = "yes" ; then
+ echo "TARGET_CMOV_IS_FAST=yes" >> config.mak
+ echo "#define CMOV_IS_FAST 1" >> $TMPH
+fi
+if test "$armv5te" = "yes" ; then
+ echo "TARGET_ARMV5TE=yes" >> config.mak
+ echo "#define HAVE_ARMV5TE 1" >> $TMPH
+fi
+if test "$iwmmxt" = "yes" ; then
+ echo "TARGET_IWMMXT=yes" >> config.mak
+ echo "#define HAVE_IWMMXT 1" >> $TMPH
+fi
+if test "$mmi" = "yes" ; then
+ echo "TARGET_MMI=yes" >> config.mak
+ echo "#define HAVE_MMI 1" >> $TMPH
+fi
+
+if test "$altivec" = "yes" ; then
+ echo "TARGET_ALTIVEC=yes" >> config.mak
+ echo "#define HAVE_ALTIVEC 1" >> $TMPH
+fi
+
+if test "$sdl" = "yes" ; then
+ echo "SDL_LIBS=`"${SDL_CONFIG}" --libs`" >> config.mak
+ echo "SDL_CFLAGS=`"${SDL_CONFIG}" --cflags`" >> config.mak
+fi
+if test "$texi2html" = "yes"; then
+ echo "BUILD_DOC=yes" >> config.mak
+fi
+
+sws_version=`grep '#define LIBSWSCALE_VERSION ' "$source_path/libswscale/swscale.h" | sed 's/[^0-9\.]//g'`
+pp_version=`grep '#define LIBPOSTPROC_VERSION ' "$source_path/libpostproc/postprocess.h" | sed 's/[^0-9\.]//g'`
+lavc_version=`grep '#define LIBAVCODEC_VERSION ' "$source_path/libavcodec/avcodec.h" | sed 's/[^0-9\.]//g'`
+lavf_version=`grep '#define LIBAVFORMAT_VERSION ' "$source_path/libavformat/avformat.h" | sed 's/[^0-9\.]//g'`
+lavu_version=`grep '#define LIBAVUTIL_VERSION ' "$source_path/libavutil/avutil.h" | sed 's/[^0-9\.]//g'`
+
+
+
+if test "$lshared" = "yes" ; then
+ echo "#define BUILD_SHARED_AV 1" >> $TMPH
+ echo "BUILD_SHARED=yes" >> config.mak
+ echo "PIC=-fPIC -DPIC" >> config.mak
+ echo "SPPMAJOR=${lavc_version%%.*}" >> config.mak
+ echo "SPPVERSION=$lavc_version" >> config.mak
+ echo "LAVCMAJOR=${lavc_version%%.*}" >> config.mak
+ echo "LAVCVERSION=$lavc_version" >> config.mak
+ echo "LAVFMAJOR=${lavf_version%%.*}" >> config.mak
+ echo "LAVFVERSION=$lavf_version" >> config.mak
+ echo "LAVUMAJOR=${lavu_version%%.*}" >> config.mak
+ echo "LAVUVERSION=$lavu_version" >> config.mak
+ echo "SWSMAJOR=${sws_version%%.*}" >> config.mak
+ echo "SWSVERSION=$sws_version" >> config.mak
+ echo "SLIBNAME=${SLIBNAME}" >> config.mak
+ echo "SLIBNAME_WITH_VERSION=${SLIBNAME_WITH_VERSION}" >> config.mak
+ echo "SLIBNAME_WITH_MAJOR=${SLIBNAME_WITH_MAJOR}" >> config.mak
+ echo "SLIB_EXTRA_CMD=${SLIB_EXTRA_CMD}" >> config.mak
+fi
+echo "LIB_INSTALL_EXTRA_CMD=${LIB_INSTALL_EXTRA_CMD}" >> config.mak
+echo "EXTRALIBS=$extralibs" >> config.mak
+
+enabled_any $ENCODER_LIST && enable encoders
+enabled_any $DECODER_LIST && enable decoders
+enabled_any $MUXER_LIST && enable muxers
+enabled_any $DEMUXER_LIST && enable demuxers
+
+enabled_any pthreads beosthreads os2threads w32threads && enable threads
+
+print_config HAVE_ $TMPH config.mak $HAVE_LIST
+print_config CONFIG_ $TMPH config.mak $CONFIG_LIST
+
+if test "$targetos" = "Darwin"; then
+ echo "#define CONFIG_DARWIN 1" >> $TMPH
+fi
+
+echo "#define restrict $_restrict" >> $TMPH
+
+if test "$optimize" = "small"; then
+ echo "#define always_inline" >> $TMPH
+ echo "#define CONFIG_SMALL 1" >> $TMPH
+fi
+
+echo "SRC_PATH=\"$source_path\"" >> config.mak
+echo "SRC_PATH_BARE=$source_path" >> config.mak
+echo "BUILD_ROOT=\"$PWD\"" >> config.mak
+
+if test "$amr_if2" = "yes" ; then
+ echo "AMR_CFLAGS=-DIF2=1" >> config.mak
+fi
+
+# Apparently it's not possible to portably echo a backslash.
+if test "$asmalign_pot" = "yes" ; then
+ printf '#define ASMALIGN(ZEROBITS) ".align " #ZEROBITS "\\n\\t"\n' >> $TMPH
+else
+ printf '#define ASMALIGN(ZEROBITS) ".align 1<<" #ZEROBITS "\\n\\t"\n' >> $TMPH
+fi
+
+
+for codec in $DECODER_LIST $ENCODER_LIST $PARSER_LIST $DEMUXER_LIST $MUXER_LIST; do
+ ucname="`toupper $codec`"
+ config_name="CONFIG_$ucname"
+ enabled_name="ENABLE_$ucname"
+ if enabled $codec; then
+ echo "#define $config_name 1" >> $TMPH
+ echo "#define $enabled_name 1" >> $TMPH
+ echo "$config_name=yes" >> config.mak
+ else
+ echo "#define $enabled_name 0" >> $TMPH
+ fi
+done
+
+# Do not overwrite config.h if unchanged to avoid superfluous rebuilds.
+if ! cmp -s $TMPH config.h; then
+ mv -f $TMPH config.h
+else
+ echo "config.h is unchanged"
+fi
+
+rm -f $TMPO $TMPC $TMPE $TMPS $TMPH
+
+# build tree in object directory if source path is different from current one
+if test "$source_path_used" = "yes" ; then
+ DIRS="\
+ doc \
+ libavformat \
+ libavcodec \
+ libavcodec/alpha \
+ libavcodec/armv4l \
+ libavcodec/bfin \
+ libavcodec/i386 \
+ libavcodec/sparc \
+ libavcodec/mlib \
+ libavcodec/ppc \
+ libavcodec/liba52 \
+ libpostproc \
+ libavutil \
+ libswscale \
+ tests \
+ vhook \
+ "
+ FILES="\
+ Makefile \
+ common.mak \
+ libavformat/Makefile \
+ libavcodec/Makefile \
+ libpostproc/Makefile \
+ libavutil/Makefile \
+ libswscale/Makefile \
+ tests/Makefile \
+ vhook/Makefile \
+ doc/Makefile \
+ doc/texi2pod.pl \
+ "
+ for dir in $DIRS ; do
+ mkdir -p $dir
+ done
+ for f in $FILES ; do
+ ln -sf "$source_path/$f" $f
+ done
+fi
+
+# build pkg-config files libav*.pc and libpostproc.pc
+# libavutil.pc
+cat <<EOF >libavutil.pc
+prefix=$PREFIX
+exec_prefix=\${prefix}
+libdir=\${exec_prefix}/lib
+includedir=\${prefix}/include
+
+Name: libavutil
+Description: FFmpeg utility library
+Version: $lavu_version
+Requires:
+Conflicts:
+Libs: -L\${libdir} -lavutil
+Cflags: -I\${includedir} -I\${includedir}/ffmpeg
+EOF
+
+cat <<EOF >libavutil-uninstalled.pc
+prefix=
+exec_prefix=
+libdir=\${pcfiledir}/libavutil
+includedir=\${pcfiledir}/libavutil
+
+Name: libavutil
+Description: FFmpeg utility library
+Version: $lavu_version
+Requires:
+Conflicts:
+Libs: \${libdir}/${LIBPREF}avutil${LIBSUF}
+Cflags: -I\${includedir}
+EOF
+
+# libavcodec.pc
+cat <<EOF >libavcodec.pc
+prefix=$PREFIX
+exec_prefix=\${prefix}
+libdir=\${exec_prefix}/lib
+includedir=\${prefix}/include
+
+Name: libavcodec
+Description: FFmpeg codec library
+Version: $lavc_version
+Requires: $pkg_requires libavutil = $lavu_version
+Conflicts:
+Libs: -L\${libdir} -lavcodec $extralibs
+Cflags: -I\${includedir} -I\${includedir}/ffmpeg
+EOF
+
+cat <<EOF >libavcodec-uninstalled.pc
+prefix=
+exec_prefix=
+libdir=\${pcfiledir}/libavcodec
+includedir=\${pcfiledir}/libavcodec
+
+Name: libavcodec
+Description: FFmpeg codec library
+Version: $lavc_version
+Requires: $pkg_requires libavutil = $lavu_version
+Conflicts:
+Libs: \${libdir}/${LIBPREF}avcodec${LIBSUF} $extralibs
+Cflags: -I\${includedir}
+EOF
+
+# libavformat.pc
+cat <<EOF >libavformat.pc
+prefix=$PREFIX
+exec_prefix=\${prefix}
+libdir=\${exec_prefix}/lib
+includedir=\${prefix}/include
+
+Name: libavformat
+Description: FFmpeg container format library
+Version: $lavf_version
+Requires: $pkg_requires libavcodec = $lavc_version
+Conflicts:
+Libs: -L\${libdir} -lavformat $extralibs
+Cflags: -I\${includedir} -I\${includedir}/ffmpeg
+EOF
+
+cat <<EOF >libavformat-uninstalled.pc
+prefix=
+exec_prefix=
+libdir=\${pcfiledir}/libavformat
+includedir=\${pcfiledir}/libavformat
+
+Name: libavformat
+Description: FFmpeg container format library
+Version: $lavf_version
+Requires: $pkg_requires libavcodec = $lavc_version
+Conflicts:
+Libs: \${libdir}/${LIBPREF}avformat${LIBSUF} $extralibs
+Cflags: -I\${includedir}
+EOF
+
+
+# libpostproc.pc
+cat <<EOF >libpostproc.pc
+prefix=$PREFIX
+exec_prefix=\${prefix}
+libdir=\${exec_prefix}/lib
+includedir=\${prefix}/include
+
+Name: libpostproc
+Description: FFmpeg post processing library
+Version: $lavc_version
+Requires:
+Conflicts:
+Libs: -L\${libdir} -lpostproc
+Cflags: -I\${includedir} -I\${includedir}/postproc
+EOF
+
+cat <<EOF >libpostproc-uninstalled.pc
+prefix=
+exec_prefix=
+libdir=\${pcfiledir}/libpostproc
+includedir=\${pcfiledir}/libpostproc
+
+Name: libpostproc
+Description: FFmpeg post processing library
+Version: $lavc_version
+Requires:
+Conflicts:
+Libs: \${libdir}/${LIBPREF}postproc${LIBSUF}
+Cflags: -I\${includedir}
+EOF
+
+if test "$swscaler" != "no"; then
+ sws_pc_libs="-L\${libdir} -lswscale"
+ sws_pc_uninstalled_libs="\${libdir}/${LIBPREF}swscale${LIBSUF}"
+ sws_pc_requires="$pkg_requires libavutil = $lavu_version"
+else
+ sws_pc_libs=""
+ sws_pc_uninstalled_libs=""
+ sws_pc_requires="$pkg_requires libavcodec = $lavc_version"
+fi
+# libswscale.pc
+cat <<EOF >libswscale.pc
+prefix=$PREFIX
+exec_prefix=\${prefix}
+libdir=\${exec_prefix}/lib
+includedir=\${prefix}/include
+
+Name: libswscale
+Description: FFmpeg image rescaling library
+Version: $sws_version
+Requires: $sws_pc_requires
+Conflicts:
+Libs: $sws_pc_libs
+Cflags: -I\${includedir} -I\${includedir}/swscale
+EOF
+
+cat <<EOF >libswscale-uninstalled.pc
+prefix=
+exec_prefix=
+libdir=\${pcfiledir}/libswscale
+includedir=\${pcfiledir}/libswscale
+
+Name: libswscale
+Description: FFmpeg image rescaling library
+Version: $sws_version
+Requires: $sws_pc_requires
+Conflicts:
+Libs: $sws_pc_uninstalled_libs
+Cflags: -I\${includedir}
+EOF
diff --git a/contrib/ffmpeg/cws2fws.c b/contrib/ffmpeg/cws2fws.c
new file mode 100644
index 000000000..2e70c6618
--- /dev/null
+++ b/contrib/ffmpeg/cws2fws.c
@@ -0,0 +1,127 @@
+/*
+ * cws2fws by Alex Beregszaszi <alex@fsn.hu>
+ * Public domain.
+ *
+ * This utility converts compressed Macromedia Flash files to uncompressed ones.
+ *
+ */
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <zlib.h>
+
+#ifdef DEBUG
+#define dbgprintf printf
+#else
+#define dbgprintf
+#endif
+
+main(int argc, char *argv[])
+{
+ int fd_in, fd_out, comp_len, uncomp_len, tag, i, last_out;
+ char buf_in[1024], buf_out[65536];
+ z_stream zstream;
+ struct stat statbuf;
+
+ if (argc < 3)
+ {
+ printf("Usage: %s <infile.swf> <outfile.swf>\n", argv[0]);
+ exit(1);
+ }
+
+ fd_in = open(argv[1], O_RDONLY);
+ if (fd_in < 0)
+ {
+ perror("Error while opening: ");
+ exit(1);
+ }
+
+ fd_out = open(argv[2], O_WRONLY|O_CREAT, 00644);
+ if (fd_out < 0)
+ {
+ perror("Error while opening: ");
+ close(fd_in);
+ exit(1);
+ }
+
+ if (read(fd_in, &buf_in, 8) != 8)
+ {
+ printf("Header error\n");
+ close(fd_in);
+ close(fd_out);
+ exit(1);
+ }
+
+ if (buf_in[0] != 'C' || buf_in[1] != 'W' || buf_in[2] != 'S')
+ {
+ printf("Not a compressed flash file\n");
+ exit(1);
+ }
+
+ fstat(fd_in, &statbuf);
+ comp_len = statbuf.st_size;
+ uncomp_len = buf_in[4] | (buf_in[5] << 8) | (buf_in[6] << 16) | (buf_in[7] << 24);
+
+ printf("Compressed size: %d Uncompressed size: %d\n", comp_len-4, uncomp_len-4);
+
+ // write out modified header
+ buf_in[0] = 'F';
+ write(fd_out, &buf_in, 8);
+
+ zstream.zalloc = NULL;
+ zstream.zfree = NULL;
+ zstream.opaque = NULL;
+ inflateInit(&zstream);
+
+ for (i = 0; i < comp_len-8;)
+ {
+ int ret, len = read(fd_in, &buf_in, 1024);
+
+ dbgprintf("read %d bytes\n", len);
+
+ last_out = zstream.total_out;
+
+ zstream.next_in = &buf_in[0];
+ zstream.avail_in = len;
+ zstream.next_out = &buf_out[0];
+ zstream.avail_out = 65536;
+
+ ret = inflate(&zstream, Z_SYNC_FLUSH);
+ if (ret != Z_STREAM_END && ret != Z_OK)
+ {
+ printf("Error while decompressing: %d\n", ret);
+ inflateEnd(&zstream);
+ exit(1);
+ }
+
+ dbgprintf("a_in: %d t_in: %d a_out: %d t_out: %d -- %d out\n",
+ zstream.avail_in, zstream.total_in, zstream.avail_out, zstream.total_out,
+ zstream.total_out-last_out);
+
+ write(fd_out, &buf_out, zstream.total_out-last_out);
+
+ i += len;
+
+ if (ret == Z_STREAM_END || ret == Z_BUF_ERROR)
+ break;
+ }
+
+ if (zstream.total_out != uncomp_len-8)
+ {
+ printf("Size mismatch (%d != %d), updating header...\n",
+ zstream.total_out, uncomp_len-8);
+
+ buf_in[0] = (zstream.total_out+8) & 0xff;
+ buf_in[1] = (zstream.total_out+8 >> 8) & 0xff;
+ buf_in[2] = (zstream.total_out+8 >> 16) & 0xff;
+ buf_in[3] = (zstream.total_out+8 >> 24) & 0xff;
+
+ lseek(fd_out, 4, SEEK_SET);
+ write(fd_out, &buf_in, 4);
+ }
+
+ inflateEnd(&zstream);
+ close(fd_in);
+ close(fd_out);
+}
diff --git a/contrib/ffmpeg/doc/Makefile b/contrib/ffmpeg/doc/Makefile
new file mode 100644
index 000000000..4fc9dfb8f
--- /dev/null
+++ b/contrib/ffmpeg/doc/Makefile
@@ -0,0 +1,20 @@
+-include ../config.mak
+
+VPATH=$(SRC_PATH_BARE)/doc
+
+all: ffmpeg-doc.html faq.html ffserver-doc.html ffplay-doc.html hooks.html \
+ ffmpeg.1 ffserver.1 ffplay.1
+
+%.html: %.texi Makefile
+ texi2html -monolithic -number $<
+
+%.pod: %-doc.texi
+ ./texi2pod.pl $< $@
+
+%.1: %.pod
+ pod2man --section=1 --center=" " --release=" " $< > $@
+
+clean:
+ rm -f *.html *.pod *.1
+
+.PHONY: all clean
diff --git a/contrib/ffmpeg/doc/TODO b/contrib/ffmpeg/doc/TODO
new file mode 100644
index 000000000..8271659d2
--- /dev/null
+++ b/contrib/ffmpeg/doc/TODO
@@ -0,0 +1,82 @@
+ffmpeg TODO list:
+----------------
+
+Fabrice's TODO list: (unordered)
+-------------------
+Short term:
+
+- seeking API and example in ffplay
+- use AVFMTCTX_DISCARD_PKT in ffplay so that DV has a chance to work
+- add RTSP regression test (both client and server)
+- make ffserver allocate AVFormatContext
+- clean up (incompatible change, for 0.5.0):
+ * AVStream -> AVComponent
+ * AVFormatContext -> AVInputStream/AVOutputStream
+ * suppress rate_emu from AVCodecContext
+- add new float/integer audio filterting and conversion : suppress
+ CODEC_ID_PCM_xxc and use CODEC_ID_RAWAUDIO.
+- fix telecine and frame rate conversion
+
+Long term (ask me if you want to help):
+
+- commit new imgconvert API and new PIX_FMT_xxx alpha formats
+- commit new LGPL'ed float and integer-only AC3 decoder
+- add WMA integer-only decoder
+- add new MPEG4-AAC audio decoder (both integer-only and float version)
+
+Michael's TODO list: (unordered) (if anyone wanna help with sth, just ask)
+-------------------
+- optimize H264 CABAC
+- more optimizations
+- simper rate control
+
+Francois' TODO list: (unordered, without any timeframe)
+-------------------
+- test MACE decoder against the openquicktime one as suggested by A'rpi
+- BeOS audio input grabbing backend
+- BeOS video input grabbing backend
+- have a REAL BeOS errno fix (return MKERROR(EXXX);), not a hack
+- publish my BeOS libposix on BeBits so I can officially support ffserver :)
+- check the whole code for thread-safety (global and init stuff)
+
+Philip'a TODO list: (alphabetically ordered) (please help)
+------------------
+- Add a multi-ffm filetype so that feeds can be recorded into multiple files rather
+ than one big file.
+- Authenticated users support -- where the authentication is in the URL
+- Change ASF files so that the embedded timestamp in the frames is right rather
+ than being an offset from the start of the stream
+- Make ffm files more resilient to changes in the codec structures so that you
+ can play old ffm files.
+
+unassigned TODO: (unordered)
+---------------
+- use AVFrame for audio codecs too
+- rework aviobuf.c buffering strategy and fix url_fskip
+- generate optimal huffman tables for mjpeg encoding
+- fix ffserver regression tests
+- support xvids motion estimation
+- support x264s motion estimation
+- support x264s rate control
+- SNOW: non translational motion compensation
+- SNOW: more optimal quantization
+- SNOW: 4x4 block support
+- SNOW: 1/8 pel motion compensation support
+- SNOW: iterative motion estimation based on subsampled images
+- FLAC: lossy encoding (viterbi and naive scalar quantization)
+- libavfilter
+- JPEG2000 decoder & encoder
+- MPEG4 GMC encoding support
+- macroblock based pixel format (better cache locality, somewhat complex, one paper claimed it faster for high res)
+- finish NUT implementation
+- seeking regression test
+- regression tests for codecs which dont have an encoder (I+P frame bitstream in svn)
+- add support for using mplayers video filters to ffmpeg
+- reverse engeneer RV30/RV40
+- finish implementation of WMV2 j-picture
+- H264 encoder
+- per MB ratecontrol (so VCD and such do work better)
+- replace/rewrite libavcodec/fdctref.c
+- write a script which iteratively changes all functions between always_inline and noinline and benchmarks the result to find the best set of inlined functions
+- set up roundup bugtracker somewhere with (newBug, reproduced, analyzed, fixed, worksForMe, duplicate, wontFix, invalid, needMoreInfo, newPatch, ok, applied, rejected, needChanges, newRequest, implemented, wontImplement, invalidReq) states and a checked integer
+- convert all the non SIMD asm into small asm vs. C testcases and submit them to the gcc devels so they can improve gcc
diff --git a/contrib/ffmpeg/doc/faq.texi b/contrib/ffmpeg/doc/faq.texi
new file mode 100644
index 000000000..9f1e8ec2d
--- /dev/null
+++ b/contrib/ffmpeg/doc/faq.texi
@@ -0,0 +1,312 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle FFmpeg FAQ
+@titlepage
+@sp 7
+@center @titlefont{FFmpeg FAQ}
+@sp 3
+@end titlepage
+
+
+@chapter General Problems
+
+@section I cannot read this file although this format seems to be supported by ffmpeg.
+
+Even if ffmpeg can read the file format, it may not support all its
+codecs. Please consult the supported codec list in the ffmpeg
+documentation.
+
+@section How do I encode JPEGs to another format ?
+
+If the JPEGs are named img1.jpg, img2.jpg, img3.jpg,..., use:
+
+@example
+ ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
+@end example
+
+@samp{%d} is replaced by the image number.
+
+@file{img%03d.jpg} generates @file{img001.jpg}, @file{img002.jpg}, etc...
+
+The same system is used for the other image formats.
+
+@section How do I encode movie to single pictures ?
+
+Use:
+
+@example
+ ffmpeg -i movie.mpg movie%d.jpg
+@end example
+
+The @file{movie.mpg} used as input will be converted to
+@file{movie1.jpg}, @file{movie2.jpg}, etc...
+
+Instead of relying on file format self-recognition, you may also use
+@table @option
+@item -vcodec ppm
+@item -vcodec png
+@item -vcodec mjpeg
+@end table
+to force the encoding.
+
+Applying that to the previous example:
+@example
+ ffmpeg -i movie.mpg -f image2 -vcodec mjpeg menu%d.jpg
+@end example
+
+Beware that there is no "jpeg" codec. Use "mjpeg" instead.
+
+@section FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it ?
+
+No. FFmpeg only supports open source codecs. Windows DLLs are not
+portable, bloated and often slow.
+
+@section I get "Unsupported codec (id=86043) for input stream #0.1". What is the problem ?
+
+This is the Qcelp codec, FFmpeg has no support for that codec currently. Try mencoder/mplayer it might work.
+
+@section Why do I see a slight quality degradation with multithreaded MPEG* encoding ?
+
+For multithreaded MPEG* encoding, the encoded slices must be independent,
+otherwise thread n would practically have to wait for n-1 to finish, so it's
+quite logical that there is a small reduction of quality. This is not a bug.
+
+@section How can I read from the standard input or write to the standard output ?
+
+Use @file{-} as filename.
+
+@section Why does ffmpeg not decode audio in VOB files ?
+
+The audio is AC3 (a.k.a. A/52). AC3 decoding is an optional component in ffmpeg
+as the component that handles AC3 decoding (liba52) is currently released under
+the GPL. If you have liba52 installed on your system, enable AC3 decoding
+with @code{./configure --enable-a52}. Take care: by
+enabling AC3, you automatically change the license of libavcodec from
+LGPL to GPL.
+
+@section Which codecs are supported by Windows ?
+
+Windows does not support standard formats like MPEG very well, unless you
+install some additional codecs
+
+The following list of video codecs should work on most Windows systems:
+@table @option
+@item msmpeg4v2
+.avi/.asf
+@item msmpeg4
+.asf only
+@item wmv1
+.asf only
+@item wmv2
+.asf only
+@item mpeg4
+only if you have some MPEG-4 codec installed like ffdshow or XviD
+@item mpeg1
+.mpg only
+@end table
+Note, ASF files often have .wmv or .wma extensions in Windows. It should also
+be mentioned that Microsoft claims a patent on the ASF format, and may sue
+or threaten users who create ASF files with non-Microsoft software. It is
+strongly advised to avoid ASF where possible.
+
+The following list of audio codecs should work on most Windows systems:
+@table @option
+@item adpcm_ima_wav
+@item adpcm_ms
+@item pcm
+@item mp3
+if some MP3 codec like LAME is installed
+@end table
+
+@section Why does the chrominance data seem to be sampled at a different time from the luminance data on bt8x8 captures on Linux?
+
+This is a well-known bug in the bt8x8 driver. For 2.4.26 there is a patch at
+(@url{http://mplayerhq.hu/~michael/bttv-420-2.4.26.patch}). This may also
+apply cleanly to other 2.4-series kernels.
+
+@section How do I avoid the ugly aliasing artifacts in bt8x8 captures on Linux?
+
+Pass 'combfilter=1 lumafilter=1' to the bttv driver. Note though that 'combfilter=1'
+will cause somewhat too strong filtering. A fix is to apply (@url{http://mplayerhq.hu/~michael/bttv-comb-2.4.26.patch})
+or (@url{http://mplayerhq.hu/~michael/bttv-comb-2.6.6.patch})
+and pass 'combfilter=2'.
+
+@section I have a problem with an old version of ffmpeg; where should I report it?
+Nowhere. Upgrade to the latest release or if there is no recent release upgrade
+to Subversion HEAD. You could also try to report it. Maybe you will get lucky and
+become the first person in history to get an answer different from "upgrade
+to Subversion HEAD".
+
+@section -f jpeg doesn't work.
+
+Try '-f image2 test%d.jpg'.
+
+@section Why can I not change the framerate?
+
+Some codecs, like MPEG-1/2, only allow a small number of fixed framerates.
+Choose a different codec with the -vcodec command line option.
+
+@section ffmpeg does not work; What is wrong?
+
+Try a 'make distclean' in the ffmpeg source directory. If this does not help see
+(@url{http://ffmpeg.org/bugreports.php}).
+
+@section How do I encode XviD or DivX video with ffmpeg?
+
+Both XviD and DivX (version 4+) are implementations of the ISO MPEG-4
+standard (note that there are many other coding formats that use this
+same standard). Thus, use '-vcodec mpeg4' to encode these formats. The
+default fourcc stored in an MPEG-4-coded file will be 'FMP4'. If you want
+a different fourcc, use the '-vtag' option. E.g., '-vtag xvid' will
+force the fourcc 'xvid' to be stored as the video fourcc rather than the
+default.
+
+@section How do I encode videos which play on the iPod?
+
+@table @option
+@item needed stuff
+-acodec aac -vcodec mpeg4 width<=320 height<=240
+@item working stuff
+4mv, title
+@item non-working stuff
+B-frames
+@item example command line
+ffmpeg -i input -acodec aac -ab 128 -vcodec mpeg4 -b 1200kb -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -s 320x180 -title X output.mp4
+@end table
+
+@section How do I encode videos which play on the PSP?
+
+@table @option
+@item needed stuff
+-acodec aac -vcodec mpeg4 width*height<=76800 width%16=0 height%16=0 -ar 24000 -r 30000/1001 or 15000/1001 -f psp
+@item working stuff
+4mv, title
+@item non-working stuff
+B-frames
+@item example command line
+ffmpeg -i input -acodec aac -ab 128 -vcodec mpeg4 -b 1200kb -ar 24000 -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -s 368x192 -r 30000/1001 -title X -f psp output.mp4
+@item needed stuff for H.264
+-acodec aac -vcodec h264 width*height<=76800 width%16=0? height%16=0? -ar 48000 -coder 1 -r 30000/1001 or 15000/1001 -f psp
+@item working stuff for H.264
+title, loop filter
+@item non-working stuff for H.264
+CAVLC
+@item example command line
+ffmpeg -i input -acodec aac -ab 128 -vcodec h264 -b 1200kb -ar 48000 -mbd 2 -coder 1 -cmp 2 -subcmp 2 -s 368x192 -r 30000/1001 -title X -f psp -flags loop -trellis 2 -partitions parti4x4+parti8x8+partp4x4+partp8x8+partb8x8 output.mp4
+@end table
+
+@section How can I read DirectShow files?
+
+If you have built FFmpeg with @code{./configure --enable-avisynth}
+(only possible on MinGW/Cygwin platforms),
+then you may use any file that DirectShow can read as input.
+(Be aware that this feature has been recently added,
+so you will need to help yourself in case of problems.)
+
+Just create an "input.avs" text file with this single line ...
+@example
+ DirectShowSource("C:\path to your file\yourfile.asf")
+@end example
+... and then feed that text file to FFmpeg:
+@example
+ ffmpeg -i input.avs
+@end example
+
+For ANY other help on Avisynth, please visit @url{http://www.avisynth.org/}.
+
+@chapter Development
+
+@section When will the next FFmpeg version be released? / Why are FFmpeg releases so few and far between?
+
+Like most open source projects FFmpeg suffers from a certain lack of
+manpower. For this reason the developers have to prioritize the work
+they do and putting out releases is not at the top of the list, fixing
+bugs and reviewing patches takes precedence. Please don't complain or
+request more timely and/or frequent releases unless you are willing to
+help out creating them.
+
+@section Why doesn't FFmpeg support feature [xyz]?
+
+Because no one has taken on that task yet. FFmpeg development is
+driven by the tasks that are important to the individual developers.
+If there is a feature that is important to you, the best way to get
+it implemented is to undertake the task yourself.
+
+
+@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat ?
+
+Yes. Read the Developers Guide of the FFmpeg documentation. Alternatively,
+examine the source code for one of the many open source projects that
+already incorporate ffmpeg at (@url{projects.php}).
+
+@section Can you support my C compiler XXX ?
+
+No. Only GCC is supported. GCC is ported to most systems available and there
+is no need to pollute the source code with @code{#ifdef}s
+related to the compiler.
+
+@section Can I use FFmpeg or libavcodec under Windows ?
+
+Yes, but the MinGW tools @emph{must} be used to compile FFmpeg. You
+can link the resulting DLLs with any other Windows program. Read the
+@emph{Native Windows Compilation} and @emph{Visual C++ compatibility}
+sections in the FFmpeg documentation to find more information.
+
+@section Can you add automake, libtool or autoconf support ?
+
+No. These tools are too bloated and they complicate the build. Moreover,
+since only @samp{gcc} is supported they would add little advantages in
+terms of portability.
+
+@section Why not rewrite ffmpeg in object-oriented C++ ?
+
+ffmpeg is already organized in a highly modular manner and does not need to
+be rewritten in a formal object language. Further, many of the developers
+favor straight C; it works for them. For more arguments on this matter,
+read "Programming Religion" at (@url{http://lkml.org/faq/lkmlfaq-15.html}).
+
+@section Why are the ffmpeg programs devoid of debugging symbols ?
+
+The build process creates ffmpeg_g, ffplay_g, etc. which contain full debug
+information. Those binaries are strip'd to create ffmpeg, ffplay, etc. If
+you need the debug information, used the *_g versions.
+
+@section I do not like the LGPL, can I contribute code under the GPL instead ?
+
+Yes, as long as the code is optional and can easily and cleanly be placed
+under #ifdef CONFIG_GPL without breaking anything. So for example a new codec
+or filter would be OK under GPL while a bugfix to LGPL code would not.
+
+@section I want to compile xyz.c alone but my compiler produced many errors.
+
+Common code is in its own files in libav* and is used by the individual
+codecs. They will not work without the common parts, you have to compile
+the whole libav*. If you wish, disable some parts with configure switches.
+You can also try to hack it and remove more, but if you had problems fixing
+the compilation failure then you are probably not qualified for this.
+
+@section Visual C++ produces many errors.
+
+Visual C++ is not compliant to the C standard and does not support
+the inline assembly used in FFmpeg.
+If you wish - for whatever weird reason - to use Visual C++ for your
+project then you can link the Visual C++ code with libav* as long as
+you compile the latter with a working C compiler. For more information, see
+the @emph{Visual C++ compatibility} section in the FFmpeg documentation.
+
+There have been efforts to make FFmpeg compatible with Visual C++ in the
+past. However, they have all been rejected as too intrusive, especially
+since MinGW does the job perfectly adequately. None of the core developers
+work with Visual C++ and thus this item is low priority. Should you find
+the silver bullet that solves this problem, feel free to shoot it at us.
+
+@section I have a file in memory / a API different from *open/*read/ libc how do i use it with libavformat ?
+
+You have to implement a URLProtocol, see libavformat/file.c in FFmpeg
+and libmpdemux/demux_lavf.c in MPlayer sources.
+
+@section I get "No compatible shell script interpreter found." in MSys.
+
+The standard MSys bash (2.04) is broken. You need to install 2.05 or later.
+
+@bye
diff --git a/contrib/ffmpeg/doc/ffmpeg-doc.texi b/contrib/ffmpeg/doc/ffmpeg-doc.texi
new file mode 100644
index 000000000..2d814c0fb
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffmpeg-doc.texi
@@ -0,0 +1,1607 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle FFmpeg Documentation
+@titlepage
+@sp 7
+@center @titlefont{FFmpeg Documentation}
+@sp 3
+@end titlepage
+
+
+@chapter Introduction
+
+FFmpeg is a very fast video and audio converter. It can also grab from
+a live audio/video source.
+
+The command line interface is designed to be intuitive, in the sense
+that FFmpeg tries to figure out all parameters that can possibly be
+derived automatically. You usually only have to specify the target
+bitrate you want.
+
+FFmpeg can also convert from any sample rate to any other, and resize
+video on the fly with a high quality polyphase filter.
+
+@chapter Quick Start
+
+@c man begin EXAMPLES
+@section Video and Audio grabbing
+
+FFmpeg can use a video4linux compatible video source and any Open Sound
+System audio source:
+
+@example
+ffmpeg /tmp/out.mpg
+@end example
+
+Note that you must activate the right video source and channel before
+launching FFmpeg with any TV viewer such as xawtv
+(@url{http://bytesex.org/xawtv/}) by Gerd Knorr. You also
+have to set the audio recording levels correctly with a
+standard mixer.
+
+@section Video and Audio file format conversion
+
+* FFmpeg can use any supported file format and protocol as input:
+
+Examples:
+
+* You can use YUV files as input:
+
+@example
+ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
+@end example
+
+It will use the files:
+@example
+/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
+/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
+@end example
+
+The Y files use twice the resolution of the U and V files. They are
+raw files, without header. They can be generated by all decent video
+decoders. You must specify the size of the image with the @option{-s} option
+if FFmpeg cannot guess it.
+
+* You can input from a raw YUV420P file:
+
+@example
+ffmpeg -i /tmp/test.yuv /tmp/out.avi
+@end example
+
+test.yuv is a file containing raw YUV planar data. Each frame is composed
+of the Y plane followed by the U and V planes at half vertical and
+horizontal resolution.
+
+* You can output to a raw YUV420P file:
+
+@example
+ffmpeg -i mydivx.avi hugefile.yuv
+@end example
+
+* You can set several input files and output files:
+
+@example
+ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
+@end example
+
+Converts the audio file a.wav and the raw YUV video file a.yuv
+to MPEG file a.mpg.
+
+* You can also do audio and video conversions at the same time:
+
+@example
+ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
+@end example
+
+Converts a.wav to MPEG audio at 22050Hz sample rate.
+
+* You can encode to several formats at the same time and define a
+mapping from input stream to output streams:
+
+@example
+ffmpeg -i /tmp/a.wav -ab 64 /tmp/a.mp2 -ab 128 /tmp/b.mp2 -map 0:0 -map 0:0
+@end example
+
+Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. '-map
+file:index' specifies which input stream is used for each output
+stream, in the order of the definition of output streams.
+
+* You can transcode decrypted VOBs
+
+@example
+ffmpeg -i snatch_1.vob -f avi -vcodec mpeg4 -b 800k -g 300 -bf 2 -acodec mp3 -ab 128 snatch.avi
+@end example
+
+This is a typical DVD ripping example; the input is a VOB file, the
+output an AVI file with MPEG-4 video and MP3 audio. Note that in this
+command we use B-frames so the MPEG-4 stream is DivX5 compatible, and
+GOP size is 300 which means one intra frame every 10 seconds for 29.97fps
+input video. Furthermore, the audio stream is MP3-encoded so you need
+to enable LAME support by passing @code{--enable-mp3lame} to configure.
+The mapping is particularly useful for DVD transcoding
+to get the desired audio language.
+
+NOTE: To see the supported input formats, use @code{ffmpeg -formats}.
+@c man end
+
+@chapter Invocation
+
+@section Syntax
+
+The generic syntax is:
+
+@example
+@c man begin SYNOPSIS
+ffmpeg [[infile options][@option{-i} @var{infile}]]... @{[outfile options] @var{outfile}@}...
+@c man end
+@end example
+@c man begin DESCRIPTION
+If no input file is given, audio/video grabbing is done.
+
+As a general rule, options are applied to the next specified
+file. Therefore, order is important, and you can have the same
+option on the command line multiple times. Each occurrence is
+then applied to the next input or output file.
+
+* To set the video bitrate of the output file to 64kbit/s:
+@example
+ffmpeg -i input.avi -b 64k output.avi
+@end example
+
+* To force the frame rate of the input and output file to 24 fps:
+@example
+ffmpeg -r 24 -i input.avi output.avi
+@end example
+
+* To force the frame rate of the output file to 24 fps:
+@example
+ffmpeg -i input.avi -r 24 output.avi
+@end example
+
+* To force the frame rate of input file to 1 fps and the output file to 24 fps:
+@example
+ffmpeg -r 1 -i input.avi -r 24 output.avi
+@end example
+
+The format option may be needed for raw input files.
+
+By default, FFmpeg tries to convert as losslessly as possible: It
+uses the same audio and video parameters for the outputs as the one
+specified for the inputs.
+@c man end
+
+@c man begin OPTIONS
+@section Main options
+
+@table @option
+@item -L
+Show license.
+
+@item -h
+Show help.
+
+@item -version
+Show version.
+
+@item -formats
+Show available formats, codecs, protocols, ...
+
+@item -f fmt
+Force format.
+
+@item -i filename
+input filename
+
+@item -y
+Overwrite output files.
+
+@item -t duration
+Set the recording time in seconds.
+@code{hh:mm:ss[.xxx]} syntax is also supported.
+
+@item -fs limit_size
+Set the file size limit.
+
+@item -ss position
+Seek to given time position in seconds.
+@code{hh:mm:ss[.xxx]} syntax is also supported.
+
+@item -itsoffset offset
+Set the input time offset in seconds.
+@code{[-]hh:mm:ss[.xxx]} syntax is also supported.
+This option affects all the input files that follow it.
+The offset is added to the timestamps of the input files.
+Specifying a positive offset means that the corresponding
+streams are delayed by 'offset' seconds.
+
+@item -title string
+Set the title.
+
+@item -timestamp time
+Set the timestamp.
+
+@item -author string
+Set the author.
+
+@item -copyright string
+Set the copyright.
+
+@item -comment string
+Set the comment.
+
+@item -album string
+Set the album.
+
+@item -track number
+Set the track.
+
+@item -year number
+Set the year.
+
+@item -v verbose
+Control amount of logging.
+
+@item -target type
+Specify target file type ("vcd", "svcd", "dvd", "dv", "dv50", "pal-vcd",
+"ntsc-svcd", ... ). All the format options (bitrate, codecs,
+buffer sizes) are then set automatically. You can just type:
+
+@example
+ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
+@end example
+
+Nevertheless you can specify additional options as long as you know
+they do not conflict with the standard, as in:
+
+@example
+ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
+@end example
+
+@item -dframes number
+Set the number of data frames to record.
+
+@item -scodec codec
+Force subtitle codec ('copy' to copy stream).
+
+@item -newsubtitle
+Add a new subtitle stream to the current output stream.
+
+@item -slang code
+Set the ISO 639 language code (3 letters) of the current subtitle stream.
+
+@end table
+
+@section Video Options
+
+@table @option
+@item -b bitrate
+Set the video bitrate in bit/s (default = 200 kb/s).
+@item -vframes number
+Set the number of video frames to record.
+@item -r fps
+Set frame rate (Hz value, fraction or abbreviation), (default = 25).
+@item -s size
+Set frame size. The format is @samp{wxh} (default = 160x128).
+The following abbreviations are recognized:
+@table @samp
+@item sqcif
+128x96
+@item qcif
+176x144
+@item cif
+352x288
+@item 4cif
+704x576
+@end table
+
+@item -aspect aspect
+Set aspect ratio (4:3, 16:9 or 1.3333, 1.7777).
+@item -croptop size
+Set top crop band size (in pixels).
+@item -cropbottom size
+Set bottom crop band size (in pixels).
+@item -cropleft size
+Set left crop band size (in pixels).
+@item -cropright size
+Set right crop band size (in pixels).
+@item -padtop size
+Set top pad band size (in pixels).
+@item -padbottom size
+Set bottom pad band size (in pixels).
+@item -padleft size
+Set left pad band size (in pixels).
+@item -padright size
+Set right pad band size (in pixels).
+@item -padcolor (hex color)
+Set color of padded bands. The value for padcolor is expressed
+as a six digit hexadecimal number where the first two digits
+represent red, the middle two digits green and last two digits
+blue (default = 000000 (black)).
+@item -vn
+Disable video recording.
+@item -bt tolerance
+Set video bitrate tolerance (in bit/s).
+@item -maxrate bitrate
+Set max video bitrate tolerance (in bit/s).
+@item -minrate bitrate
+Set min video bitrate tolerance (in bit/s).
+@item -bufsize size
+Set rate control buffer size (in bits).
+@item -vcodec codec
+Force video codec to @var{codec}. Use the @code{copy} special value to
+tell that the raw codec data must be copied as is.
+@item -sameq
+Use same video quality as source (implies VBR).
+
+@item -pass n
+Select the pass number (1 or 2). It is useful to do two pass
+encoding. The statistics of the video are recorded in the first
+pass and the video is generated at the exact requested bitrate
+in the second pass.
+
+@item -passlogfile file
+Set two pass logfile name to @var{file}.
+
+@item -newvideo
+Add a new video stream to the current output stream.
+
+@end table
+
+@section Advanced Video Options
+
+@table @option
+@item -pix_fmt format
+Set pixel format.
+@item -g gop_size
+Set the group of pictures size.
+@item -intra
+Use only intra frames.
+@item -vdt n
+Discard threshold.
+@item -qscale q
+Use fixed video quantizer scale (VBR).
+@item -qmin q
+minimum video quantizer scale (VBR)
+@item -qmax q
+maximum video quantizer scale (VBR)
+@item -qdiff q
+maximum difference between the quantizer scales (VBR)
+@item -qblur blur
+video quantizer scale blur (VBR)
+@item -qcomp compression
+video quantizer scale compression (VBR)
+
+@item -lmin lambda
+minimum video lagrange factor (VBR)
+@item -lmax lambda
+max video lagrange factor (VBR)
+@item -mblmin lambda
+minimum macroblock quantizer scale (VBR)
+@item -mblmax lambda
+maximum macroblock quantizer scale (VBR)
+
+These four options (lmin, lmax, mblmin, mblmax) use 'lambda' units,
+but you may use the QP2LAMBDA constant to easily convert from 'q' units:
+@example
+ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
+@end example
+
+@item -rc_init_cplx complexity
+initial complexity for single pass encoding
+@item -b_qfactor factor
+qp factor between P- and B-frames
+@item -i_qfactor factor
+qp factor between P- and I-frames
+@item -b_qoffset offset
+qp offset between P- and B-frames
+@item -i_qoffset offset
+qp offset between P- and I-frames
+@item -rc_eq equation
+Set rate control equation (@pxref{FFmpeg formula
+evaluator}) (default = @code{tex^qComp}).
+@item -rc_override override
+rate control override for specific intervals
+@item -me method
+Set motion estimation method to @var{method}.
+Available methods are (from lowest to best quality):
+@table @samp
+@item zero
+Try just the (0, 0) vector.
+@item phods
+@item log
+@item x1
+@item epzs
+(default method)
+@item full
+exhaustive search (slow and marginally better than epzs)
+@end table
+
+@item -dct_algo algo
+Set DCT algorithm to @var{algo}. Available values are:
+@table @samp
+@item 0
+FF_DCT_AUTO (default)
+@item 1
+FF_DCT_FASTINT
+@item 2
+FF_DCT_INT
+@item 3
+FF_DCT_MMX
+@item 4
+FF_DCT_MLIB
+@item 5
+FF_DCT_ALTIVEC
+@end table
+
+@item -idct_algo algo
+Set IDCT algorithm to @var{algo}. Available values are:
+@table @samp
+@item 0
+FF_IDCT_AUTO (default)
+@item 1
+FF_IDCT_INT
+@item 2
+FF_IDCT_SIMPLE
+@item 3
+FF_IDCT_SIMPLEMMX
+@item 4
+FF_IDCT_LIBMPEG2MMX
+@item 5
+FF_IDCT_PS2
+@item 6
+FF_IDCT_MLIB
+@item 7
+FF_IDCT_ARM
+@item 8
+FF_IDCT_ALTIVEC
+@item 9
+FF_IDCT_SH4
+@item 10
+FF_IDCT_SIMPLEARM
+@end table
+
+@item -er n
+Set error resilience to @var{n}.
+@table @samp
+@item 1
+FF_ER_CAREFUL (default)
+@item 2
+FF_ER_COMPLIANT
+@item 3
+FF_ER_AGGRESSIVE
+@item 4
+FF_ER_VERY_AGGRESSIVE
+@end table
+
+@item -ec bit_mask
+Set error concealment to @var{bit_mask}. @var{bit_mask} is a bit mask of
+the following values:
+@table @samp
+@item 1
+FF_EC_GUESS_MVS (default = enabled)
+@item 2
+FF_EC_DEBLOCK (default = enabled)
+@end table
+
+@item -bf frames
+Use 'frames' B-frames (supported for MPEG-1, MPEG-2 and MPEG-4).
+@item -mbd mode
+macroblock decision
+@table @samp
+@item 0
+FF_MB_DECISION_SIMPLE: Use mb_cmp (cannot change it yet in FFmpeg).
+@item 1
+FF_MB_DECISION_BITS: Choose the one which needs the fewest bits.
+@item 2
+FF_MB_DECISION_RD: rate distortion
+@end table
+
+@item -4mv
+Use four motion vector by macroblock (MPEG-4 only).
+@item -part
+Use data partitioning (MPEG-4 only).
+@item -bug param
+Work around encoder bugs that are not auto-detected.
+@item -strict strictness
+How strictly to follow the standards.
+@item -aic
+Enable Advanced intra coding (h263+).
+@item -umv
+Enable Unlimited Motion Vector (h263+)
+
+@item -deinterlace
+Deinterlace pictures.
+@item -ilme
+Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
+Use this option if your input file is interlaced and you want
+to keep the interlaced format for minimum losses.
+The alternative is to deinterlace the input stream with
+@option{-deinterlace}, but deinterlacing introduces losses.
+@item -psnr
+Calculate PSNR of compressed frames.
+@item -vstats
+Dump video coding statistics to @file{vstats_HHMMSS.log}.
+@item -vhook module
+Insert video processing @var{module}. @var{module} contains the module
+name and its parameters separated by spaces.
+@item -top n
+top=1/bottom=0/auto=-1 field first
+@item -dc precision
+Intra_dc_precision.
+@item -vtag fourcc/tag
+Force video tag/fourcc.
+@item -qphist
+Show QP histogram.
+@item -vbsf bitstream filter
+Bitstream filters available are "dump_extra", "remove_extra", "noise".
+@end table
+
+@section Audio Options
+
+@table @option
+@item -aframes number
+Set the number of audio frames to record.
+@item -ar freq
+Set the audio sampling frequency (default = 44100 Hz).
+@item -ab bitrate
+Set the audio bitrate in kbit/s (default = 64).
+@item -ac channels
+Set the number of audio channels (default = 1).
+@item -an
+Disable audio recording.
+@item -acodec codec
+Force audio codec to @var{codec}. Use the @code{copy} special value to
+specify that the raw codec data must be copied as is.
+@item -newaudio
+Add a new audio track to the output file. If you want to specify parameters,
+do so before @code{-newaudio} (@code{-acodec}, @code{-ab}, etc..).
+
+Mapping will be done automatically, if the number of output streams is equal to
+the number of input streams, else it will pick the first one that matches. You
+can override the mapping using @code{-map} as usual.
+
+Example:
+@example
+ffmpeg -i file.mpg -vcodec copy -acodec ac3 -ab 384 test.mpg -acodec mp2 -ab 192 -newaudio
+@end example
+@item -alang code
+Set the ISO 639 language code (3 letters) of the current audio stream.
+@end table
+
+@section Advanced Audio options:
+
+@table @option
+@item -atag fourcc/tag
+Force audio tag/fourcc.
+@item -absf bitstream filter
+Bitstream filters available are "dump_extra", "remove_extra", "noise", "mp3comp", "mp3decomp".
+@end table
+
+@section Subtitle options:
+
+@table @option
+@item -scodec codec
+Force subtitle codec ('copy' to copy stream).
+@item -newsubtitle
+Add a new subtitle stream to the current output stream.
+@item -slang code
+Set the ISO 639 language code (3 letters) of the current subtitle stream.
+@end table
+
+@section Audio/Video grab options
+
+@table @option
+@item -vd device
+sEt video grab device (e.g. @file{/dev/video0}).
+@item -vc channel
+Set video grab channel (DV1394 only).
+@item -tvstd standard
+Set television standard (NTSC, PAL (SECAM)).
+@item -dv1394
+Set DV1394 grab.
+@item -ad device
+Set audio device (e.g. @file{/dev/dsp}).
+@item -grab format
+Request grabbing using.
+@item -gd device
+Set grab device.
+@end table
+
+@section Advanced options
+
+@table @option
+@item -map input stream id[:input stream id]
+Set stream mapping from input streams to output streams.
+Just enumerate the input streams in the order you want them in the output.
+[input stream id] sets the (input) stream to sync against.
+@item -map_meta_data outfile:infile
+Set meta data information of outfile from infile.
+@item -debug
+Print specific debug info.
+@item -benchmark
+Add timings for benchmarking.
+@item -dump
+Dump each input packet.
+@item -hex
+When dumping packets, also dump the payload.
+@item -bitexact
+Only use bit exact algorithms (for codec testing).
+@item -ps size
+Set packet size in bits.
+@item -re
+Read input at native frame rate. Mainly used to simulate a grab device.
+@item -loop_input
+Loop over the input stream. Currently it works only for image
+streams. This option is used for automatic FFserver testing.
+@item -loop_output number_of_times
+Repeatedly loop output for formats that support looping such as animated GIF
+(0 will loop the output infinitely).
+@item -threads count
+Thread count.
+@item -vsync parameter
+Video sync method. Video will be stretched/squeezed to match the timestamps,
+it is done by duplicating and dropping frames. With -map you can select from
+which stream the timestamps should be taken. You can leave either video or
+audio unchanged and sync the remaining stream(s) to the unchanged one.
+@item -async samples_per_second
+Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
+the parameter is the maximum samples per second by which the audio is changed.
+-async 1 is a special case where only the start of the audio stream is corrected
+without any later correction.
+@end table
+
+@node FFmpeg formula evaluator
+@section FFmpeg formula evaluator
+
+When evaluating a rate control string, FFmpeg uses an internal formula
+evaluator.
+
+The following binary operators are available: @code{+}, @code{-},
+@code{*}, @code{/}, @code{^}.
+
+The following unary operators are available: @code{+}, @code{-},
+@code{(...)}.
+
+The following functions are available:
+@table @var
+@item sinh(x)
+@item cosh(x)
+@item tanh(x)
+@item sin(x)
+@item cos(x)
+@item tan(x)
+@item exp(x)
+@item log(x)
+@item squish(x)
+@item gauss(x)
+@item abs(x)
+@item max(x, y)
+@item min(x, y)
+@item gt(x, y)
+@item lt(x, y)
+@item eq(x, y)
+@item bits2qp(bits)
+@item qp2bits(qp)
+@end table
+
+The following constants are available:
+@table @var
+@item PI
+@item E
+@item iTex
+@item pTex
+@item tex
+@item mv
+@item fCode
+@item iCount
+@item mcVar
+@item var
+@item isI
+@item isP
+@item isB
+@item avgQP
+@item qComp
+@item avgIITex
+@item avgPITex
+@item avgPPTex
+@item avgBPTex
+@item avgTex
+@end table
+
+@c man end
+
+@ignore
+
+@setfilename ffmpeg
+@settitle FFmpeg video converter
+
+@c man begin SEEALSO
+ffserver(1), ffplay(1) and the HTML documentation of @file{ffmpeg}.
+@c man end
+
+@c man begin AUTHOR
+Fabrice Bellard
+@c man end
+
+@end ignore
+
+@section Protocols
+
+The filename can be @file{-} to read from standard input or to write
+to standard output.
+
+FFmpeg also handles many protocols specified with an URL syntax.
+
+Use 'ffmpeg -formats' to see a list of the supported protocols.
+
+The protocol @code{http:} is currently used only to communicate with
+FFserver (see the FFserver documentation). When FFmpeg will be a
+video player it will also be used for streaming :-)
+
+@chapter Tips
+
+@itemize
+@item For streaming at very low bitrate application, use a low frame rate
+and a small GOP size. This is especially true for RealVideo where
+the Linux player does not seem to be very fast, so it can miss
+frames. An example is:
+
+@example
+ffmpeg -g 3 -r 3 -t 10 -b 50k -s qcif -f rv10 /tmp/b.rm
+@end example
+
+@item The parameter 'q' which is displayed while encoding is the current
+quantizer. The value 1 indicates that a very good quality could
+be achieved. The value 31 indicates the worst quality. If q=31 appears
+too often, it means that the encoder cannot compress enough to meet
+your bitrate. You must either increase the bitrate, decrease the
+frame rate or decrease the frame size.
+
+@item If your computer is not fast enough, you can speed up the
+compression at the expense of the compression ratio. You can use
+'-me zero' to speed up motion estimation, and '-intra' to disable
+motion estimation completely (you have only I-frames, which means it
+is about as good as JPEG compression).
+
+@item To have very low audio bitrates, reduce the sampling frequency
+(down to 22050 kHz for MPEG audio, 22050 or 11025 for AC3).
+
+@item To have a constant quality (but a variable bitrate), use the option
+'-qscale n' when 'n' is between 1 (excellent quality) and 31 (worst
+quality).
+
+@item When converting video files, you can use the '-sameq' option which
+uses the same quality factor in the encoder as in the decoder.
+It allows almost lossless encoding.
+
+@end itemize
+
+
+@chapter external libraries
+
+FFmpeg can be hooked up with a number of external libraries to add support
+for more formats.
+
+@section AMR
+
+AMR comes in two different flavors, WB and NB. FFmpeg can make use of the
+AMR WB (floating-point mode) and the AMR NB (both floating-point and
+fixed-point mode) reference decoders and encoders.
+
+@itemize
+
+@item For AMR WB floating-point download TS26.204 V5.1.0 from
+@url{http://www.3gpp.org/ftp/Specs/archive/26_series/26.204/26204-510.zip}
+and extract the source to @file{libavcodec/amrwb_float/}.
+
+@item For AMR NB floating-point download TS26.104 REL-5 V5.1.0 from
+@url{http://www.3gpp.org/ftp/Specs/archive/26_series/26.104/26104-510.zip}
+and extract the source to @file{libavcodec/amr_float/}.
+If you try this on Alpha, you may need to change @code{Word32} to
+@code{int} in @file{amr/typedef.h}.
+
+@item For AMR NB fixed-point download TS26.073 REL-5 V5.1.0 from
+@url{http://www.3gpp.org/ftp/Specs/archive/26_series/26.073/26073-510.zip}
+and extract the source to @file{libavcodec/amr}.
+You must also add @code{-DMMS_IO} and remove @code{-pedantic-errors}
+to/from @code{CFLAGS} in @file{libavcodec/amr/makefile}, i.e.
+``@code{CFLAGS = -Wall -I. \$(CFLAGS_\$(MODE)) -D\$(VAD) -DMMS_IO}''.
+
+@end itemize
+
+
+@chapter Supported File Formats and Codecs
+
+You can use the @code{-formats} option to have an exhaustive list.
+
+@section File Formats
+
+FFmpeg supports the following file formats through the @code{libavformat}
+library:
+
+@multitable @columnfractions .4 .1 .1 .4
+@item Supported File Format @tab Encoding @tab Decoding @tab Comments
+@item MPEG audio @tab X @tab X
+@item MPEG-1 systems @tab X @tab X
+@tab muxed audio and video
+@item MPEG-2 PS @tab X @tab X
+@tab also known as @code{VOB} file
+@item MPEG-2 TS @tab @tab X
+@tab also known as DVB Transport Stream
+@item ASF@tab X @tab X
+@item AVI@tab X @tab X
+@item WAV@tab X @tab X
+@item Macromedia Flash@tab X @tab X
+@tab Only embedded audio is decoded.
+@item FLV @tab X @tab X
+@tab Macromedia Flash video files
+@item Real Audio and Video @tab X @tab X
+@item Raw AC3 @tab X @tab X
+@item Raw MJPEG @tab X @tab X
+@item Raw MPEG video @tab X @tab X
+@item Raw PCM8/16 bits, mulaw/Alaw@tab X @tab X
+@item Raw CRI ADX audio @tab X @tab X
+@item Raw Shorten audio @tab @tab X
+@item SUN AU format @tab X @tab X
+@item NUT @tab X @tab X @tab NUT Open Container Format
+@item QuickTime @tab X @tab X
+@item MPEG-4 @tab X @tab X
+@tab MPEG-4 is a variant of QuickTime.
+@item Raw MPEG4 video @tab X @tab X
+@item DV @tab X @tab X
+@item 4xm @tab @tab X
+@tab 4X Technologies format, used in some games.
+@item Playstation STR @tab @tab X
+@item Id RoQ @tab @tab X
+@tab Used in Quake III, Jedi Knight 2, other computer games.
+@item Interplay MVE @tab @tab X
+@tab Format used in various Interplay computer games.
+@item WC3 Movie @tab @tab X
+@tab Multimedia format used in Origin's Wing Commander III computer game.
+@item Sega FILM/CPK @tab @tab X
+@tab Used in many Sega Saturn console games.
+@item Westwood Studios VQA/AUD @tab @tab X
+@tab Multimedia formats used in Westwood Studios games.
+@item Id Cinematic (.cin) @tab @tab X
+@tab Used in Quake II.
+@item FLIC format @tab @tab X
+@tab .fli/.flc files
+@item Sierra VMD @tab @tab X
+@tab Used in Sierra CD-ROM games.
+@item Sierra Online @tab @tab X
+@tab .sol files used in Sierra Online games.
+@item Matroska @tab @tab X
+@item Electronic Arts Multimedia @tab @tab X
+@tab Used in various EA games; files have extensions like WVE and UV2.
+@item Nullsoft Video (NSV) format @tab @tab X
+@item ADTS AAC audio @tab X @tab X
+@item Creative VOC @tab X @tab X @tab Created for the Sound Blaster Pro.
+@item American Laser Games MM @tab @tab X
+@tab Multimedia format used in games like Mad Dog McCree
+@item AVS @tab @tab X
+@tab Multimedia format used by the Creature Shock game.
+@item Smacker @tab @tab X
+@tab Multimedia format used by many games.
+@item GXF @tab X @tab X
+@tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley playout servers.
+@item CIN @tab @tab X
+@tab Multimedia format used by Delphine Software games.
+@item MXF @tab @tab X
+@tab Material eXchange Format SMPTE 377M, used by D-Cinema, broadcast industry.
+@item SEQ @tab @tab X
+@tab Tiertex .seq files used in the DOS CDROM version of the game Flashback.
+@end multitable
+
+@code{X} means that encoding (resp. decoding) is supported.
+
+@section Image Formats
+
+FFmpeg can read and write images for each frame of a video sequence. The
+following image formats are supported:
+
+@multitable @columnfractions .4 .1 .1 .4
+@item Supported Image Format @tab Encoding @tab Decoding @tab Comments
+@item PGM, PPM @tab X @tab X
+@item PAM @tab X @tab X @tab PAM is a PNM extension with alpha support.
+@item PGMYUV @tab X @tab X @tab PGM with U and V components in YUV 4:2:0
+@item JPEG @tab X @tab X @tab Progressive JPEG is not supported.
+@item .Y.U.V @tab X @tab X @tab one raw file per component
+@item animated GIF @tab X @tab X @tab Only uncompressed GIFs are generated.
+@item PNG @tab X @tab X @tab 2 bit and 4 bit/pixel not supported yet.
+@item Targa @tab @tab X @tab Targa (.TGA) image format.
+@item TIFF @tab @tab X @tab Only 24 bit/pixel images are supported.
+@item SGI @tab X @tab X @tab SGI RGB image format
+@end multitable
+
+@code{X} means that encoding (resp. decoding) is supported.
+
+@section Video Codecs
+
+@multitable @columnfractions .4 .1 .1 .4
+@item Supported Codec @tab Encoding @tab Decoding @tab Comments
+@item MPEG-1 video @tab X @tab X
+@item MPEG-2 video @tab X @tab X
+@item MPEG-4 @tab X @tab X
+@item MSMPEG4 V1 @tab X @tab X
+@item MSMPEG4 V2 @tab X @tab X
+@item MSMPEG4 V3 @tab X @tab X
+@item WMV7 @tab X @tab X
+@item WMV8 @tab X @tab X @tab not completely working
+@item WMV9 @tab @tab X @tab not completely working
+@item VC1 @tab @tab X
+@item H.261 @tab X @tab X
+@item H.263(+) @tab X @tab X @tab also known as RealVideo 1.0
+@item H.264 @tab @tab X
+@item RealVideo 1.0 @tab X @tab X
+@item RealVideo 2.0 @tab X @tab X
+@item MJPEG @tab X @tab X
+@item lossless MJPEG @tab X @tab X
+@item JPEG-LS @tab X @tab X @tab fourcc: MJLS, lossless and near-lossless is supported
+@item Apple MJPEG-B @tab @tab X
+@item Sunplus MJPEG @tab @tab X @tab fourcc: SP5X
+@item DV @tab X @tab X
+@item HuffYUV @tab X @tab X
+@item FFmpeg Video 1 @tab X @tab X @tab experimental lossless codec (fourcc: FFV1)
+@item FFmpeg Snow @tab X @tab X @tab experimental wavelet codec (fourcc: SNOW)
+@item Asus v1 @tab X @tab X @tab fourcc: ASV1
+@item Asus v2 @tab X @tab X @tab fourcc: ASV2
+@item Creative YUV @tab @tab X @tab fourcc: CYUV
+@item Sorenson Video 1 @tab X @tab X @tab fourcc: SVQ1
+@item Sorenson Video 3 @tab @tab X @tab fourcc: SVQ3
+@item On2 VP3 @tab @tab X @tab still experimental
+@item On2 VP5 @tab @tab X @tab fourcc: VP50
+@item On2 VP6 @tab @tab X @tab fourcc: VP62
+@item Theora @tab @tab X @tab still experimental
+@item Intel Indeo 3 @tab @tab X
+@item FLV @tab X @tab X @tab Sorenson H.263 used in Flash
+@item Flash Screen Video @tab @tab X @tab fourcc: FSV1
+@item ATI VCR1 @tab @tab X @tab fourcc: VCR1
+@item ATI VCR2 @tab @tab X @tab fourcc: VCR2
+@item Cirrus Logic AccuPak @tab @tab X @tab fourcc: CLJR
+@item 4X Video @tab @tab X @tab Used in certain computer games.
+@item Sony Playstation MDEC @tab @tab X
+@item Id RoQ @tab @tab X @tab Used in Quake III, Jedi Knight 2, other computer games.
+@item Xan/WC3 @tab @tab X @tab Used in Wing Commander III .MVE files.
+@item Interplay Video @tab @tab X @tab Used in Interplay .MVE files.
+@item Apple Animation @tab @tab X @tab fourcc: 'rle '
+@item Apple Graphics @tab @tab X @tab fourcc: 'smc '
+@item Apple Video @tab @tab X @tab fourcc: rpza
+@item Apple QuickDraw @tab @tab X @tab fourcc: qdrw
+@item Cinepak @tab @tab X
+@item Microsoft RLE @tab @tab X
+@item Microsoft Video-1 @tab @tab X
+@item Westwood VQA @tab @tab X
+@item Id Cinematic Video @tab @tab X @tab Used in Quake II.
+@item Planar RGB @tab @tab X @tab fourcc: 8BPS
+@item FLIC video @tab @tab X
+@item Duck TrueMotion v1 @tab @tab X @tab fourcc: DUCK
+@item Duck TrueMotion v2 @tab @tab X @tab fourcc: TM20
+@item VMD Video @tab @tab X @tab Used in Sierra VMD files.
+@item MSZH @tab @tab X @tab Part of LCL
+@item ZLIB @tab X @tab X @tab Part of LCL, encoder experimental
+@item TechSmith Camtasia @tab @tab X @tab fourcc: TSCC
+@item IBM Ultimotion @tab @tab X @tab fourcc: ULTI
+@item Miro VideoXL @tab @tab X @tab fourcc: VIXL
+@item QPEG @tab @tab X @tab fourccs: QPEG, Q1.0, Q1.1
+@item LOCO @tab @tab X @tab
+@item Winnov WNV1 @tab @tab X @tab
+@item Autodesk Animator Studio Codec @tab @tab X @tab fourcc: AASC
+@item Fraps FPS1 @tab @tab X @tab
+@item CamStudio @tab @tab X @tab fourcc: CSCD
+@item American Laser Games Video @tab @tab X @tab Used in games like Mad Dog McCree
+@item ZMBV @tab @tab X @tab
+@item AVS Video @tab @tab X @tab Video encoding used by the Creature Shock game.
+@item Smacker Video @tab @tab X @tab Video encoding used in Smacker.
+@item RTjpeg @tab @tab X @tab Video encoding used in NuppelVideo files.
+@item KMVC @tab @tab X @tab Codec used in Worms games.
+@item VMware Video @tab @tab X @tab Codec used in videos captured by VMware.
+@item Cin Video @tab @tab X @tab Codec used in Delphine Software games.
+@item Tiertex Seq Video @tab @tab X @tab Codec used in DOS CDROM FlashBack game.
+@end multitable
+
+@code{X} means that encoding (resp. decoding) is supported.
+
+@section Audio Codecs
+
+@multitable @columnfractions .4 .1 .1 .1 .7
+@item Supported Codec @tab Encoding @tab Decoding @tab Comments
+@item MPEG audio layer 2 @tab IX @tab IX
+@item MPEG audio layer 1/3 @tab IX @tab IX
+@tab MP3 encoding is supported through the external library LAME.
+@item AC3 @tab IX @tab IX
+@tab liba52 is used internally for decoding.
+@item Vorbis @tab X @tab X
+@item WMA V1/V2 @tab @tab X
+@item AAC @tab X @tab X
+@tab Supported through the external library libfaac/libfaad.
+@item Microsoft ADPCM @tab X @tab X
+@item MS IMA ADPCM @tab X @tab X
+@item QT IMA ADPCM @tab @tab X
+@item 4X IMA ADPCM @tab @tab X
+@item G.726 ADPCM @tab X @tab X
+@item Duck DK3 IMA ADPCM @tab @tab X
+@tab Used in some Sega Saturn console games.
+@item Duck DK4 IMA ADPCM @tab @tab X
+@tab Used in some Sega Saturn console games.
+@item Westwood Studios IMA ADPCM @tab @tab X
+@tab Used in Westwood Studios games like Command and Conquer.
+@item SMJPEG IMA ADPCM @tab @tab X
+@tab Used in certain Loki game ports.
+@item CD-ROM XA ADPCM @tab @tab X
+@item CRI ADX ADPCM @tab X @tab X
+@tab Used in Sega Dreamcast games.
+@item Electronic Arts ADPCM @tab @tab X
+@tab Used in various EA titles.
+@item Creative ADPCM @tab @tab X
+@tab 16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2
+@item RA144 @tab @tab X
+@tab Real 14400 bit/s codec
+@item RA288 @tab @tab X
+@tab Real 28800 bit/s codec
+@item RADnet @tab X @tab IX
+@tab Real low bitrate AC3 codec, liba52 is used for decoding.
+@item AMR-NB @tab X @tab X
+@tab Supported through an external library.
+@item AMR-WB @tab X @tab X
+@tab Supported through an external library.
+@item DV audio @tab @tab X
+@item Id RoQ DPCM @tab @tab X
+@tab Used in Quake III, Jedi Knight 2, other computer games.
+@item Interplay MVE DPCM @tab @tab X
+@tab Used in various Interplay computer games.
+@item Xan DPCM @tab @tab X
+@tab Used in Origin's Wing Commander IV AVI files.
+@item Sierra Online DPCM @tab @tab X
+@tab Used in Sierra Online game audio files.
+@item Apple MACE 3 @tab @tab X
+@item Apple MACE 6 @tab @tab X
+@item FLAC lossless audio @tab @tab X
+@item Shorten lossless audio @tab @tab X
+@item Apple lossless audio @tab @tab X
+@tab QuickTime fourcc 'alac'
+@item FFmpeg Sonic @tab X @tab X
+@tab experimental lossy/lossless codec
+@item Qdesign QDM2 @tab @tab X
+@tab there are still some distortions
+@item Real COOK @tab @tab X
+@tab All versions except 5.1 are supported
+@item DSP Group TrueSpeech @tab @tab X
+@item True Audio (TTA) @tab @tab X
+@item Smacker Audio @tab @tab X
+@item WavPack Audio @tab @tab X
+@item Cin Audio @tab @tab X
+@tab Codec used in Delphine Software games.
+@item Intel Music Coder @tab @tab X
+@end multitable
+
+@code{X} means that encoding (resp. decoding) is supported.
+
+@code{I} means that an integer-only version is available, too (ensures high
+performance on systems without hardware floating point support).
+
+@chapter Platform Specific information
+
+@section Linux
+
+FFmpeg should be compiled with at least GCC 2.95.3. GCC 3.2 is the
+preferred compiler now for FFmpeg. All future optimizations will depend on
+features only found in GCC 3.2.
+
+@section BSD
+
+BSD make will not build FFmpeg, you need to install and use GNU Make
+(@file{gmake}).
+
+@section Windows
+
+@subsection Native Windows compilation
+
+@itemize
+@item Install the current versions of MSYS and MinGW from
+@url{http://www.mingw.org/}. You can find detailed installation
+instructions in the download section and the FAQ.
+
+@item If you want to test the FFplay, also download
+the MinGW development library of SDL 1.2.x
+(@file{SDL-devel-1.2.x-mingw32.tar.gz}) from
+@url{http://www.libsdl.org}. Unpack it in a temporary directory, and
+unpack the archive @file{i386-mingw32msvc.tar.gz} in the MinGW tool
+directory. Edit the @file{sdl-config} script so that it gives the
+correct SDL directory when invoked.
+
+@item Extract the current version of FFmpeg.
+
+@item Start the MSYS shell (file @file{msys.bat}).
+
+@item Change to the FFmpeg directory and follow
+ the instructions of how to compile FFmpeg (file
+@file{INSTALL}). Usually, launching @file{./configure} and @file{make}
+suffices. If you have problems using SDL, verify that
+@file{sdl-config} can be launched from the MSYS command line.
+
+@item You can install FFmpeg in @file{Program Files/FFmpeg} by typing
+@file{make install}. Don't forget to copy @file{SDL.dll} to the place
+you launch @file{ffplay} from.
+
+@end itemize
+
+Notes:
+@itemize
+
+@item The target @file{make wininstaller} can be used to create a
+Nullsoft based Windows installer for FFmpeg and FFplay. @file{SDL.dll}
+must be copied to the FFmpeg directory in order to build the
+installer.
+
+@item By using @code{./configure --enable-shared} when configuring FFmpeg,
+you can build @file{avcodec.dll} and @file{avformat.dll}. With
+@code{make install} you install the FFmpeg DLLs and the associated
+headers in @file{Program Files/FFmpeg}.
+
+@item Visual C++ compatibility: If you used @code{./configure --enable-shared}
+when configuring FFmpeg, FFmpeg tries to use the Microsoft Visual
+C++ @code{lib} tool to build @code{avcodec.lib} and
+@code{avformat.lib}. With these libraries you can link your Visual C++
+code directly with the FFmpeg DLLs (see below).
+
+@end itemize
+
+@subsection Visual C++ compatibility
+
+FFmpeg will not compile under Visual C++ -- and it has too many
+dependencies on the GCC compiler to make a port viable. However,
+if you want to use the FFmpeg libraries in your own applications,
+you can still compile those applications using Visual C++. An
+important restriction to this is that you have to use the
+dynamically linked versions of the FFmpeg libraries (i.e. the
+DLLs), and you have to make sure that Visual-C++-compatible
+import libraries are created during the FFmpeg build process.
+
+This description of how to use the FFmpeg libraries with Visual C++ is
+based on Visual C++ 2005 Express Edition Beta 2. If you have a different
+version, you might have to modify the procedures slightly.
+
+Here are the step-by-step instructions for building the FFmpeg libraries
+so they can be used with Visual C++:
+
+@enumerate
+
+@item Install Visual C++ (if you haven't done so already).
+
+@item Install MinGW and MSYS as described above.
+
+@item Add a call to @file{vcvars32.bat} (which sets up the environment
+variables for the Visual C++ tools) as the first line of
+@file{msys.bat}. The standard location for @file{vcvars32.bat} is
+@file{C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat},
+and the standard location for @file{msys.bat} is
+@file{C:\msys\1.0\msys.bat}. If this corresponds to your setup, add the
+following line as the first line of @file{msys.bat}:
+
+@code{call "C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat"}
+
+@item Start the MSYS shell (file @file{msys.bat}) and type @code{link.exe}.
+If you get a help message with the command line options of @code{link.exe},
+this means your environment variables are set up correctly, the
+Microsoft linker is on the path and will be used by FFmpeg to
+create Visual-C++-compatible import libraries.
+
+@item Extract the current version of FFmpeg and change to the FFmpeg directory.
+
+@item Type the command
+@code{./configure --enable-shared --disable-static --enable-memalign-hack}
+to configure and, if that didn't produce any errors,
+type @code{make} to build FFmpeg.
+
+@item The subdirectories @file{libavformat}, @file{libavcodec}, and
+@file{libavutil} should now contain the files @file{avformat.dll},
+@file{avformat.lib}, @file{avcodec.dll}, @file{avcodec.lib},
+@file{avutil.dll}, and @file{avutil.lib}, respectively. Copy the three
+DLLs to your System32 directory (typically @file{C:\Windows\System32}).
+
+@end enumerate
+
+And here is how to use these libraries with Visual C++:
+
+@enumerate
+
+@item Create a new console application ("File / New / Project") and then
+select "Win32 Console Application". On the appropriate page of the
+Application Wizard, uncheck the "Precompiled headers" option.
+
+@item Write the source code for your application, or, for testing, just
+copy the code from an existing sample application into the source file
+that Visual C++ has already created for you. (Note that your source
+filehas to have a @code{.cpp} extension; otherwise, Visual C++ won't
+compile the FFmpeg headers correctly because in C mode, it doesn't
+recognize the @code{inline} keyword.) For example, you can copy
+@file{output_example.c} from the FFmpeg distribution (but you will
+have to make minor modifications so the code will compile under
+C++, see below).
+
+@item Open the "Project / Properties" dialog box. In the "Configuration"
+combo box, select "All Configurations" so that the changes you make will
+affect both debug and release builds. In the tree view on the left hand
+side, select "C/C++ / General", then edit the "Additional Include
+Directories" setting to contain the complete paths to the
+@file{libavformat}, @file{libavcodec}, and @file{libavutil}
+subdirectories of your FFmpeg directory. Note that the directories have
+to be separated using semicolons. Now select "Linker / General" from the
+tree view and edit the "Additional Library Directories" setting to
+contain the same three directories.
+
+@item Still in the "Project / Properties" dialog box, select "Linker / Input"
+from the tree view, then add the files @file{avformat.lib},
+@file{avcodec.lib}, and @file{avutil.lib} to the end of the "Additional
+Dependencies". Note that the names of the libraries have to be separated
+using spaces.
+
+@item Now, select "C/C++ / Code Generation" from the tree view. Select
+"Debug" in the "Configuration" combo box. Make sure that "Runtime
+Library" is set to "Multi-threaded Debug DLL". Then, select "Release" in
+the "Configuration" combo box and make sure that "Runtime Library" is
+set to "Multi-threaded DLL".
+
+@item Click "OK" to close the "Project / Properties" dialog box and build
+the application. Hopefully, it should compile and run cleanly. If you
+used @file{output_example.c} as your sample application, you will get a
+few compiler errors, but they are easy to fix. The first type of error
+occurs because Visual C++ doesn't allow an @code{int} to be converted to
+an @code{enum} without a cast. To solve the problem, insert the required
+casts (this error occurs once for a @code{CodecID} and once for a
+@code{CodecType}). The second type of error occurs because C++ requires
+the return value of @code{malloc} to be cast to the exact type of the
+pointer it is being assigned to. Visual C++ will complain that, for
+example, @code{(void *)} is being assigned to @code{(uint8_t *)} without
+an explicit cast. So insert an explicit cast in these places to silence
+the compiler. The third type of error occurs because the @code{snprintf}
+library function is called @code{_snprintf} under Visual C++. So just
+add an underscore to fix the problem. With these changes,
+@file{output_example.c} should compile under Visual C++, and the
+resulting executable should produce valid video files.
+
+@end enumerate
+
+@subsection Cross compilation for Windows with Linux
+
+You must use the MinGW cross compilation tools available at
+@url{http://www.mingw.org/}.
+
+Then configure FFmpeg with the following options:
+@example
+./configure --enable-mingw32 --cross-prefix=i386-mingw32msvc-
+@end example
+(you can change the cross-prefix according to the prefix chosen for the
+MinGW tools).
+
+Then you can easily test FFmpeg with Wine
+(@url{http://www.winehq.com/}).
+
+@subsection Compilation under Cygwin
+
+Cygwin works very much like Unix.
+
+Just install your Cygwin with all the "Base" packages, plus the
+following "Devel" ones:
+@example
+binutils, gcc-core, make, subversion
+@end example
+
+Do not install binutils-20060709-1 (they are buggy on shared builds);
+use binutils-20050610-1 instead.
+
+Then run
+
+@example
+./configure --enable-static --disable-shared
+@end example
+
+to make a static build or
+
+@example
+./configure --enable-shared --disable-static
+@end example
+
+to build shared libraries.
+
+If you want to build FFmpeg with additional libraries, download Cygwin
+"Devel" packages for Ogg and Vorbis from any Cygwin packages repository
+and/or SDL, xvid, faac, faad2 packages from Cygwin Ports,
+(@url{http://cygwinports.dotsrc.org/}).
+
+@subsection Crosscompilation for Windows under Cygwin
+
+With Cygwin you can create Windows binaries that don't need the cygwin1.dll.
+
+Just install your Cygwin as explained before, plus these additional
+"Devel" packages:
+@example
+gcc-mingw-core, mingw-runtime, mingw-zlib
+@end example
+
+and add some special flags to your configure invocation.
+
+For a static build run
+@example
+./configure --enable-mingw32 --enable-memalign-hack --enable-static --disable-shared --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
+@end example
+
+and for a build with shared libraries
+@example
+./configure --enable-mingw32 --enable-memalign-hack --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
+@end example
+
+@section BeOS
+
+The configure script should guess the configuration itself.
+Networking support is currently not finished.
+errno issues fixed by Andrew Bachmann.
+
+Old stuff:
+
+François Revol - revol at free dot fr - April 2002
+
+The configure script should guess the configuration itself,
+however I still didn't test building on the net_server version of BeOS.
+
+FFserver is broken (needs poll() implementation).
+
+There are still issues with errno codes, which are negative in BeOS, and
+that FFmpeg negates when returning. This ends up turning errors into
+valid results, then crashes.
+(To be fixed)
+
+@chapter Developers Guide
+
+@section API
+@itemize @bullet
+@item libavcodec is the library containing the codecs (both encoding and
+decoding). Look at @file{libavcodec/apiexample.c} to see how to use it.
+
+@item libavformat is the library containing the file format handling (mux and
+demux code for several formats). Look at @file{ffplay.c} to use it in a
+player. See @file{output_example.c} to use it to generate audio or video
+streams.
+
+@end itemize
+
+@section Integrating libavcodec or libavformat in your program
+
+You can integrate all the source code of the libraries to link them
+statically to avoid any version problem. All you need is to provide a
+'config.mak' and a 'config.h' in the parent directory. See the defines
+generated by ./configure to understand what is needed.
+
+You can use libavcodec or libavformat in your commercial program, but
+@emph{any patch you make must be published}. The best way to proceed is
+to send your patches to the FFmpeg mailing list.
+
+@node Coding Rules
+@section Coding Rules
+
+FFmpeg is programmed in the ISO C90 language with a few additional
+features from ISO C99, namely:
+@itemize @bullet
+@item
+the @samp{inline} keyword;
+@item
+@samp{//} comments;
+@item
+designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
+@item
+compound literals (@samp{x = (struct s) @{ 17, 23 @};})
+@end itemize
+
+These features are supported by all compilers we care about, so we won't
+accept patches to remove their use unless they absolutely don't impair
+clarity and performance.
+
+All code must compile with GCC 2.95 and GCC 3.3. Currently, FFmpeg also
+compiles with several other compilers, such as the Compaq ccc compiler
+or Sun Studio 9, and we would like to keep it that way unless it would
+be exceedingly involved. To ensure compatibility, please don't use any
+additional C99 features or GCC extensions. Especially watch out for:
+@itemize @bullet
+@item
+mixing statements and declarations;
+@item
+@samp{long long} (use @samp{int64_t} instead);
+@item
+@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
+@item
+GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
+@end itemize
+
+Indent size is 4.
+The presentation is the one specified by 'indent -i4 -kr -nut'.
+The TAB character is forbidden outside of Makefiles as is any
+form of trailing whitespace. Commits containing either will be
+rejected by the Subversion repository.
+
+Main priority in FFmpeg is simplicity and small code size (=less
+bugs).
+
+Comments: Use the JavaDoc/Doxygen
+format (see examples below) so that code documentation
+can be generated automatically. All nontrivial functions should have a comment
+above them explaining what the function does, even if it's just one sentence.
+All structures and their member variables should be documented, too.
+@example
+/**
+ * @@file mpeg.c
+ * MPEG codec.
+ * @@author ...
+ */
+
+/**
+ * Summary sentence.
+ * more text ...
+ * ...
+ */
+typedef struct Foobar@{
+ int var1; /**< var1 description */
+ int var2; ///< var2 description
+ /** var3 description */
+ int var3;
+@} Foobar;
+
+/**
+ * Summary sentence.
+ * more text ...
+ * ...
+ * @@param my_parameter description of my_parameter
+ * @@return return value description
+ */
+int myfunc(int my_parameter)
+...
+@end example
+
+fprintf and printf are forbidden in libavformat and libavcodec,
+please use av_log() instead.
+
+@section Development Policy
+
+@enumerate
+@item
+ You must not commit code which breaks FFmpeg! (Meaning unfinished but
+ enabled code which breaks compilation or compiles but does not work or
+ breaks the regression tests)
+ You can commit unfinished stuff (for testing etc), but it must be disabled
+ (#ifdef etc) by default so it does not interfere with other developers'
+ work.
+@item
+ You don't have to over-test things. If it works for you, and you think it
+ should work for others, then commit. If your code has problems
+ (portability, triggers compiler bugs, unusual environment etc) they will be
+ reported and eventually fixed.
+@item
+ Do not commit unrelated changes together, split them into self-contained
+ pieces.
+@item
+ Do not change behavior of the program (renaming options etc) without
+ first discussing it on the ffmpeg-devel mailing list. Do not remove
+ functionality from the code. Just improve!
+
+ Note: Redundant code can be removed.
+@item
+ Do not commit changes to the build system (Makefiles, configure script)
+ which change behavior, defaults etc, without asking first. The same
+ applies to compiler warning fixes, trivial looking fixes and to code
+ maintained by other developers. We usually have a reason for doing things
+ the way we do. Send your changes as patches to the ffmpeg-devel mailing
+ list, and if the code maintainers say OK, you may commit. This does not
+ apply to files you wrote and/or maintain.
+@item
+ We refuse source indentation and other cosmetic changes if they are mixed
+ with functional changes, such commits will be rejected and removed. Every
+ developer has his own indentation style, you should not change it. Of course
+ if you (re)write something, you can use your own style, even though we would
+ prefer if the indentation throughout FFmpeg was consistent (Many projects
+ force a given indentation style - we don't.). If you really need to make
+ indentation changes (try to avoid this), separate them strictly from real
+ changes.
+
+ NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
+ then either do NOT change the indentation of the inner part within (don't
+ move it to the right)! or do so in a separate commit
+@item
+ Always fill out the commit log message. Describe in a few lines what you
+ changed and why. You can refer to mailing list postings if you fix a
+ particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
+@item
+ If you apply a patch by someone else, include the name and email address in
+ the log message. Since the ffmpeg-cvslog mailing list is publicly
+ archived you should add some SPAM protection to the email address. Send an
+ answer to ffmpeg-devel (or wherever you got the patch from) saying that
+ you applied the patch.
+@item
+ Do NOT commit to code actively maintained by others without permission.
+ Send a patch to ffmpeg-devel instead. If noone answers within a reasonable
+ timeframe (12h for build failures and security fixes, 3 days small changes,
+ 1 week for big patches) then commit your patch if you think it's OK.
+ Also note, the maintainer can simply ask for more time to review!
+@item
+ Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
+ are sent there and reviewed by all the other developers. Bugs and possible
+ improvements or general questions regarding commits are discussed there. We
+ expect you to react if problems with your code are uncovered.
+@item
+ Update the documentation if you change behavior or add features. If you are
+ unsure how best to do this, send a patch to ffmpeg-devel, the documentation
+ maintainer(s) will review and commit your stuff.
+@item
+ Never write to unallocated memory, never write over the end of arrays,
+ always check values read from some untrusted source before using them
+ as array index or other risky things.
+@item
+ Remember to check if you need to bump versions for the specific libav
+ parts (libavutil, libavcodec, libavformat) you are changing. You need
+ to change the version integer and the version string.
+ Incrementing the first component means no backward compatibility to
+ previous versions (e.g. removal of a function from the public API).
+ Incrementing the second component means backward compatible change
+ (e.g. addition of a function to the public API).
+ Incrementing the third component means a noteworthy binary compatible
+ change (e.g. encoder bug fix that matters for the decoder).
+@item
+ If you add a new codec, remember to update the changelog, add it to
+ the supported codecs table in the documentation and bump the second
+ component of the @file{libavcodec} version number appropriately. If
+ it has a fourcc, add it to @file{libavformat/avienc.c}, even if it
+ is only a decoder.
+@end enumerate
+
+We think our rules are not too hard. If you have comments, contact us.
+
+Note, these rules are mostly borrowed from the MPlayer project.
+
+@section Submitting patches
+
+First, (@pxref{Coding Rules}) above if you didn't yet.
+
+When you submit your patch, try to send a unified diff (diff '-up'
+option). I cannot read other diffs :-)
+
+Also please do not submit patches which contain several unrelated changes.
+Split them into individual self-contained patches; this makes reviewing
+them much easier.
+
+Run the regression tests before submitting a patch so that you can
+verify that there are no big problems.
+
+Patches should be posted as base64 encoded attachments (or any other
+encoding which ensures that the patch won't be trashed during
+transmission) to the ffmpeg-devel mailing list, see
+@url{http://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-devel}
+
+It also helps quite a bit if you tell us what the patch does (for example
+'replaces lrint by lrintf'), and why (for example '*BSD isn't C99 compliant
+and has no lrint()')
+
+We reply to all submitted patches and either apply or reject with some
+explanation why, but sometimes we are quite busy so it can take a week or two.
+
+@section Regression tests
+
+Before submitting a patch (or committing to the repository), you should at least
+test that you did not break anything.
+
+The regression tests build a synthetic video stream and a synthetic
+audio stream. These are then encoded and decoded with all codecs or
+formats. The CRC (or MD5) of each generated file is recorded in a
+result file. A 'diff' is launched to compare the reference results and
+the result file.
+
+The regression tests then go on to test the FFserver code with a
+limited set of streams. It is important that this step runs correctly
+as well.
+
+Run 'make test' to test all the codecs and formats.
+
+Run 'make fulltest' to test all the codecs, formats and FFserver.
+
+[Of course, some patches may change the results of the regression tests. In
+this case, the reference results of the regression tests shall be modified
+accordingly].
+
+@bye
diff --git a/contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt b/contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt
new file mode 100644
index 000000000..2eb4ee71a
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt
@@ -0,0 +1,172 @@
+FFmpeg & evaluating performance on the PowerPC Architecture HOWTO
+
+(c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
+
+
+
+I - Introduction
+
+The PowerPC architecture and its SIMD extension AltiVec offer some
+interesting tools to evaluate performance and improve the code.
+This document tries to explain how to use those tools with FFmpeg.
+
+The architecture itself offers two ways to evaluate the performance of
+a given piece of code:
+
+1) The Time Base Registers (TBL)
+2) The Performance Monitor Counter Registers (PMC)
+
+The first ones are always available, always active, but they're not very
+accurate: the registers increment by one every four *bus* cycles. On
+my 667 Mhz tiBook (ppc7450), this means once every twenty *processor*
+cycles. So we won't use that.
+
+The PMC are much more useful: not only can they report cycle-accurate
+timing, but they can also be used to monitor many other parameters,
+such as the number of AltiVec stalls for every kind of instruction,
+or instruction cache misses. The downside is that not all processors
+support the PMC (all G3, all G4 and the 970 do support them), and
+they're inactive by default - you need to activate them with a
+dedicated tool. Also, the number of available PMC depends on the
+procesor: the various 604 have 2, the various 75x (aka. G3) have 4,
+and the various 74xx (aka G4) have 6.
+
+*WARNING*: The PowerPC 970 is not very well documented, and its PMC
+registers are 64 bits wide. To properly notify the code, you *must*
+tune for the 970 (using --tune=970), or the code will assume 32 bit
+registers.
+
+
+II - Enabling FFmpeg PowerPC performance support
+
+This needs to be done by hand. First, you need to configure FFmpeg as
+usual, but add the "--powerpc-perf-enable" option. For instance:
+
+#####
+./configure --prefix=/usr/local/ffmpeg-svn --cc=gcc-3.3 --tune=7450 --powerpc-perf-enable
+#####
+
+This will configure FFmpeg to install inside /usr/local/ffmpeg-svn,
+compiling with gcc-3.3 (you should try to use this one or a newer
+gcc), and tuning for the PowerPC 7450 (i.e. the newer G4; as a rule of
+thumb, those at 550Mhz and more). It will also enable the PMC.
+
+You may also edit the file "config.h" to enable the following line:
+
+#####
+// #define ALTIVEC_USE_REFERENCE_C_CODE 1
+#####
+
+If you enable this line, then the code will not make use of AltiVec,
+but will use the reference C code instead. This is useful to compare
+performance between two versions of the code.
+
+Also, the number of enabled PMC is defined in "libavcodec/ppc/dsputil_ppc.h":
+
+#####
+#define POWERPC_NUM_PMC_ENABLED 4
+#####
+
+If you have a G4 CPU, you can enable all 6 PMC. DO NOT enable more
+PMC than available on your CPU!
+
+Then, simply compile FFmpeg as usual (make && make install).
+
+
+
+III - Using FFmpeg PowerPC performance support
+
+This FFmeg can be used exactly as usual. But before exiting, FFmpeg
+will dump a per-function report that looks like this:
+
+#####
+PowerPC performance report
+ Values are from the PMC registers, and represent whatever the
+ registers are set to record.
+ Function "gmc1_altivec" (pmc1):
+ min: 231
+ max: 1339867
+ avg: 558.25 (255302)
+ Function "gmc1_altivec" (pmc2):
+ min: 93
+ max: 2164
+ avg: 267.31 (255302)
+ Function "gmc1_altivec" (pmc3):
+ min: 72
+ max: 1987
+ avg: 276.20 (255302)
+(...)
+#####
+
+In this example, PMC1 was set to record CPU cycles, PMC2 was set to
+record AltiVec Permute Stall Cycles, and PMC3 was set to record AltiVec
+Issue Stalls.
+
+The function "gmc1_altivec" was monitored 255302 times, and the
+minimum execution time was 231 processor cycles. The max and average
+aren't much use, as it's very likely the OS interrupted execution for
+reasons of its own :-(
+
+With the exact same settings and source file, but using the reference C
+code we get:
+
+#####
+PowerPC performance report
+ Values are from the PMC registers, and represent whatever the
+ registers are set to record.
+ Function "gmc1_altivec" (pmc1):
+ min: 592
+ max: 2532235
+ avg: 962.88 (255302)
+ Function "gmc1_altivec" (pmc2):
+ min: 0
+ max: 33
+ avg: 0.00 (255302)
+ Function "gmc1_altivec" (pmc3):
+ min: 0
+ max: 350
+ avg: 0.03 (255302)
+(...)
+#####
+
+592 cycles, so the fastest AltiVec execution is about 2.5x faster than
+the fastest C execution in this example. It's not perfect but it's not
+bad (well I wrote this function so I can't say otherwise :-).
+
+Once you have that kind of report, you can try to improve things by
+finding what goes wrong and fixing it; in the example above, one
+should try to diminish the number of AltiVec stalls, as this *may*
+improve performance.
+
+
+
+IV) Enabling the PMC in Mac OS X
+
+This is easy. Use "Monster" and "monster". Those tools come from
+Apple's CHUD package, and can be found hidden in the developer web
+site & FTP site. "MONster" is the graphical application, use it to
+generate a config file specifying what each register should
+monitor. Then use the command-line application "monster" to use that
+config file, and enjoy the results.
+
+Note that "MONster" can be used for many other things, but it's
+documented by Apple, it's not my subject.
+
+If you are using CHUD 4.4.2 or later, you'll notice that MONster is
+no longer available. It's been superseeded by Shark, where
+configuration of PMCs is available as a plugin.
+
+
+
+V) Enabling the PMC on Linux
+
+On linux you may use oprofile from http://oprofile.sf.net, depending on the
+version and the cpu you may need to apply a patch[1] to access a set of the
+possibile counters from the userspace application. You can always define them
+using the kernel interface /dev/oprofile/* .
+
+[1] http://dev.gentoo.org/~lu_zero/development/oprofile-g4-20060423.patch
+
+--
+Romain Dolbeau <romain@dolbeau.org>
+Luca Barbato <lu_zero@gentoo.org>
diff --git a/contrib/ffmpeg/doc/ffplay-doc.texi b/contrib/ffmpeg/doc/ffplay-doc.texi
new file mode 100644
index 000000000..db08eb38f
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffplay-doc.texi
@@ -0,0 +1,104 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle FFplay Documentation
+@titlepage
+@sp 7
+@center @titlefont{FFplay Documentation}
+@sp 3
+@end titlepage
+
+
+@chapter Introduction
+
+@c man begin DESCRIPTION
+FFplay is a very simple and portable media player using the FFmpeg
+libraries and the SDL library. It is mostly used as a testbed for the
+various FFmpeg APIs.
+@c man end
+
+@chapter Invocation
+
+@section Syntax
+@example
+@c man begin SYNOPSIS
+ffplay [options] @file{input_file}
+@c man end
+@end example
+
+@c man begin OPTIONS
+@section Main options
+
+@table @option
+@item -h
+show help
+@item -x width
+force displayed width
+@item -y height
+force displayed height
+@item -an
+disable audio
+@item -vn
+disable video
+@item -nodisp
+disable graphical display
+@item -f fmt
+force format
+@end table
+
+@section Advanced options
+@table @option
+@item -stats
+Show the stream duration, the codec parameters, the current position in
+the stream and the audio/video synchronisation drift.
+@item -rtp_tcp
+Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful
+if you are streaming with the RTSP protocol.
+@item -sync type
+Set the master clock to audio (@code{type=audio}), video
+(@code{type=video}) or external (@code{type=ext}). Default is audio. The
+master clock is used to control audio-video synchronization. Most media
+players use audio as master clock, but in some cases (streaming or high
+quality broadcast) it is necessary to change that. This option is mainly
+used for debugging purposes.
+@end table
+
+@section While playing
+
+@table @key
+@item q, ESC
+quit
+
+@item f
+toggle full screen
+
+@item p, SPC
+pause
+
+@item a
+cycle audio channel
+
+@item v
+cycle video channel
+
+@item w
+show audio waves
+@end table
+
+@c man end
+
+@ignore
+
+@setfilename ffplay
+@settitle FFplay media player
+
+@c man begin SEEALSO
+ffmpeg(1), ffserver(1) and the html documentation of @file{ffmpeg}.
+@c man end
+
+@c man begin AUTHOR
+Fabrice Bellard
+@c man end
+
+@end ignore
+
+@bye
diff --git a/contrib/ffmpeg/doc/ffserver-doc.texi b/contrib/ffmpeg/doc/ffserver-doc.texi
new file mode 100644
index 000000000..ed67bb6c0
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffserver-doc.texi
@@ -0,0 +1,224 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle FFserver Documentation
+@titlepage
+@sp 7
+@center @titlefont{FFserver Documentation}
+@sp 3
+@end titlepage
+
+
+@chapter Introduction
+
+@c man begin DESCRIPTION
+FFserver is a streaming server for both audio and video. It supports
+several live feeds, streaming from files and time shifting on live feeds
+(you can seek to positions in the past on each live feed, provided you
+specify a big enough feed storage in ffserver.conf).
+
+This documentation covers only the streaming aspects of ffserver /
+ffmpeg. All questions about parameters for ffmpeg, codec questions,
+etc. are not covered here. Read @file{ffmpeg-doc.html} for more
+information.
+@c man end
+
+@chapter QuickStart
+
+[Contributed by Philip Gladstone, philip-ffserver at gladstonefamily dot net]
+
+@section What can this do?
+
+When properly configured and running, you can capture video and audio in real
+time from a suitable capture card, and stream it out over the Internet to
+either Windows Media Player or RealAudio player (with some restrictions).
+
+It can also stream from files, though that is currently broken. Very often, a
+web server can be used to serve up the files just as well.
+
+It can stream prerecorded video from .ffm files, though it is somewhat tricky
+to make it work correctly.
+
+@section What do I need?
+
+I use Linux on a 900MHz Duron with a cheapo Bt848 based TV capture card. I'm
+using stock Linux 2.4.17 with the stock drivers. [Actually that isn't true,
+I needed some special drivers for my motherboard-based sound card.]
+
+I understand that FreeBSD systems work just fine as well.
+
+@section How do I make it work?
+
+First, build the kit. It *really* helps to have installed LAME first. Then when
+you run the ffserver ./configure, make sure that you have the --enable-mp3lame
+flag turned on.
+
+LAME is important as it allows for streaming audio to Windows Media Player.
+Don't ask why the other audio types do not work.
+
+As a simple test, just run the following two command lines (assuming that you
+have a V4L video capture card):
+
+@example
+./ffserver -f doc/ffserver.conf &
+./ffmpeg http://localhost:8090/feed1.ffm
+@end example
+
+At this point you should be able to go to your Windows machine and fire up
+Windows Media Player (WMP). Go to Open URL and enter
+
+@example
+ http://<linuxbox>:8090/test.asf
+@end example
+
+You should (after a short delay) see video and hear audio.
+
+WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to
+transfer the entire file before starting to play.
+The same is true of AVI files.
+
+@section What happens next?
+
+You should edit the ffserver.conf file to suit your needs (in terms of
+frame rates etc). Then install ffserver and ffmpeg, write a script to start
+them up, and off you go.
+
+@section Troubleshooting
+
+@subsection I don't hear any audio, but video is fine.
+
+Maybe you didn't install LAME, or got your ./configure statement wrong. Check
+the ffmpeg output to see if a line referring to MP3 is present. If not, then
+your configuration was incorrect. If it is, then maybe your wiring is not
+set up correctly. Maybe the sound card is not getting data from the right
+input source. Maybe you have a really awful audio interface (like I do)
+that only captures in stereo and also requires that one channel be flipped.
+If you are one of these people, then export 'AUDIO_FLIP_LEFT=1' before
+starting ffmpeg.
+
+@subsection The audio and video loose sync after a while.
+
+Yes, they do.
+
+@subsection After a long while, the video update rate goes way down in WMP.
+
+Yes, it does. Who knows why?
+
+@subsection WMP 6.4 behaves differently to WMP 7.
+
+Yes, it does. Any thoughts on this would be gratefully received. These
+differences extend to embedding WMP into a web page. [There are two
+object IDs that you can use: The old one, which does not play well, and
+the new one, which does (both tested on the same system). However,
+I suspect that the new one is not available unless you have installed WMP 7].
+
+@section What else can it do?
+
+You can replay video from .ffm files that was recorded earlier.
+However, there are a number of caveats, including the fact that the
+ffserver parameters must match the original parameters used to record the
+file. If they do not, then ffserver deletes the file before recording into it.
+(Now that I write this, it seems broken).
+
+You can fiddle with many of the codec choices and encoding parameters, and
+there are a bunch more parameters that you cannot control. Post a message
+to the mailing list if there are some 'must have' parameters. Look in
+ffserver.conf for a list of the currently available controls.
+
+It will automatically generate the ASX or RAM files that are often used
+in browsers. These files are actually redirections to the underlying ASF
+or RM file. The reason for this is that the browser often fetches the
+entire file before starting up the external viewer. The redirection files
+are very small and can be transferred quickly. [The stream itself is
+often 'infinite' and thus the browser tries to download it and never
+finishes.]
+
+@section Tips
+
+* When you connect to a live stream, most players (WMP, RA, etc) want to
+buffer a certain number of seconds of material so that they can display the
+signal continuously. However, ffserver (by default) starts sending data
+in realtime. This means that there is a pause of a few seconds while the
+buffering is being done by the player. The good news is that this can be
+cured by adding a '?buffer=5' to the end of the URL. This means that the
+stream should start 5 seconds in the past -- and so the first 5 seconds
+of the stream are sent as fast as the network will allow. It will then
+slow down to real time. This noticeably improves the startup experience.
+
+You can also add a 'Preroll 15' statement into the ffserver.conf that will
+add the 15 second prebuffering on all requests that do not otherwise
+specify a time. In addition, ffserver will skip frames until a key_frame
+is found. This further reduces the startup delay by not transferring data
+that will be discarded.
+
+* You may want to adjust the MaxBandwidth in the ffserver.conf to limit
+the amount of bandwidth consumed by live streams.
+
+@section Why does the ?buffer / Preroll stop working after a time?
+
+It turns out that (on my machine at least) the number of frames successfully
+grabbed is marginally less than the number that ought to be grabbed. This
+means that the timestamp in the encoded data stream gets behind realtime.
+This means that if you say 'Preroll 10', then when the stream gets 10
+or more seconds behind, there is no Preroll left.
+
+Fixing this requires a change in the internals of how timestamps are
+handled.
+
+@section Does the @code{?date=} stuff work.
+
+Yes (subject to the limitation outlined above). Also note that whenever you
+start ffserver, it deletes the ffm file (if any parameters have changed),
+thus wiping out what you had recorded before.
+
+The format of the @code{?date=xxxxxx} is fairly flexible. You should use one
+of the following formats (the 'T' is literal):
+
+@example
+* YYYY-MM-DDTHH:MM:SS (localtime)
+* YYYY-MM-DDTHH:MM:SSZ (UTC)
+@end example
+
+You can omit the YYYY-MM-DD, and then it refers to the current day. However
+note that @samp{?date=16:00:00} refers to 16:00 on the current day -- this
+may be in the future and so is unlikely to be useful.
+
+You use this by adding the ?date= to the end of the URL for the stream.
+For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
+
+@chapter Invocation
+@section Syntax
+@example
+@c man begin SYNOPSIS
+ffserver [options]
+@c man end
+@end example
+
+@section Options
+@c man begin OPTIONS
+@table @option
+@item -L
+Print the license.
+@item -h
+Print the help.
+@item -f configfile
+Use @file{configfile} instead of @file{/etc/ffserver.conf}.
+@end table
+@c man end
+
+@ignore
+
+@setfilename ffsserver
+@settitle FFserver video server
+
+@c man begin SEEALSO
+ffmpeg(1), ffplay(1), the @file{ffmpeg/doc/ffserver.conf} example and
+the HTML documentation of @file{ffmpeg}.
+@c man end
+
+@c man begin AUTHOR
+Fabrice Bellard
+@c man end
+
+@end ignore
+
+@bye
diff --git a/contrib/ffmpeg/doc/ffserver.conf b/contrib/ffmpeg/doc/ffserver.conf
new file mode 100644
index 000000000..a3b3ff412
--- /dev/null
+++ b/contrib/ffmpeg/doc/ffserver.conf
@@ -0,0 +1,349 @@
+# Port on which the server is listening. You must select a different
+# port from your standard HTTP web server if it is running on the same
+# computer.
+Port 8090
+
+# Address on which the server is bound. Only useful if you have
+# several network interfaces.
+BindAddress 0.0.0.0
+
+# Number of simultaneous requests that can be handled. Since FFServer
+# is very fast, it is more likely that you will want to leave this high
+# and use MaxBandwidth, below.
+MaxClients 1000
+
+# This the maximum amount of kbit/sec that you are prepared to
+# consume when streaming to clients.
+MaxBandwidth 1000
+
+# Access log file (uses standard Apache log file format)
+# '-' is the standard output.
+CustomLog -
+
+# Suppress that if you want to launch ffserver as a daemon.
+NoDaemon
+
+
+##################################################################
+# Definition of the live feeds. Each live feed contains one video
+# and/or audio sequence coming from an ffmpeg encoder or another
+# ffserver. This sequence may be encoded simultaneously with several
+# codecs at several resolutions.
+
+<Feed feed1.ffm>
+
+# You must use 'ffmpeg' to send a live feed to ffserver. In this
+# example, you can type:
+#
+# ffmpeg http://localhost:8090/feed1.ffm
+
+# ffserver can also do time shifting. It means that it can stream any
+# previously recorded live stream. The request should contain:
+# "http://xxxx?date=[YYYY-MM-DDT][[HH:]MM:]SS[.m...]".You must specify
+# a path where the feed is stored on disk. You also specify the
+# maximum size of the feed, where zero means unlimited. Default:
+# File=/tmp/feed_name.ffm FileMaxSize=5M
+File /tmp/feed1.ffm
+FileMaxSize 200K
+
+# You could specify
+# ReadOnlyFile /saved/specialvideo.ffm
+# This marks the file as readonly and it will not be deleted or updated.
+
+# Specify launch in order to start ffmpeg automatically.
+# First ffmpeg must be defined with an appropriate path if needed,
+# after that options can follow, but avoid adding the http:// field
+#Launch ffmpeg
+
+# Only allow connections from localhost to the feed.
+ACL allow 127.0.0.1
+
+</Feed>
+
+
+##################################################################
+# Now you can define each stream which will be generated from the
+# original audio and video stream. Each format has a filename (here
+# 'test1.mpg'). FFServer will send this stream when answering a
+# request containing this filename.
+
+<Stream test1.mpg>
+
+# coming from live feed 'feed1'
+Feed feed1.ffm
+
+# Format of the stream : you can choose among:
+# mpeg : MPEG-1 multiplexed video and audio
+# mpegvideo : only MPEG-1 video
+# mp2 : MPEG-2 audio (use AudioCodec to select layer 2 and 3 codec)
+# ogg : Ogg format (Vorbis audio codec)
+# rm : RealNetworks-compatible stream. Multiplexed audio and video.
+# ra : RealNetworks-compatible stream. Audio only.
+# mpjpeg : Multipart JPEG (works with Netscape without any plugin)
+# jpeg : Generate a single JPEG image.
+# asf : ASF compatible streaming (Windows Media Player format).
+# swf : Macromedia Flash compatible stream
+# avi : AVI format (MPEG-4 video, MPEG audio sound)
+# master : special ffmpeg stream used to duplicate a server
+Format mpeg
+
+# Bitrate for the audio stream. Codecs usually support only a few
+# different bitrates.
+AudioBitRate 32
+
+# Number of audio channels: 1 = mono, 2 = stereo
+AudioChannels 1
+
+# Sampling frequency for audio. When using low bitrates, you should
+# lower this frequency to 22050 or 11025. The supported frequencies
+# depend on the selected audio codec.
+AudioSampleRate 44100
+
+# Bitrate for the video stream
+VideoBitRate 64
+
+# Ratecontrol buffer size
+VideoBufferSize 40
+
+# Number of frames per second
+VideoFrameRate 3
+
+# Size of the video frame: WxH (default: 160x128)
+# The following abbreviations are defined: sqcif, qcif, cif, 4cif
+VideoSize 160x128
+
+# Transmit only intra frames (useful for low bitrates, but kills frame rate).
+#VideoIntraOnly
+
+# If non-intra only, an intra frame is transmitted every VideoGopSize
+# frames. Video synchronization can only begin at an intra frame.
+VideoGopSize 12
+
+# More MPEG-4 parameters
+# VideoHighQuality
+# Video4MotionVector
+
+# Choose your codecs:
+#AudioCodec mp2
+#VideoCodec mpeg1video
+
+# Suppress audio
+#NoAudio
+
+# Suppress video
+#NoVideo
+
+#VideoQMin 3
+#VideoQMax 31
+
+# Set this to the number of seconds backwards in time to start. Note that
+# most players will buffer 5-10 seconds of video, and also you need to allow
+# for a keyframe to appear in the data stream.
+#Preroll 15
+
+# ACL:
+
+# You can allow ranges of addresses (or single addresses)
+#ACL ALLOW <first address> <last address>
+
+# You can deny ranges of addresses (or single addresses)
+#ACL DENY <first address> <last address>
+
+# You can repeat the ACL allow/deny as often as you like. It is on a per
+# stream basis. The first match defines the action. If there are no matches,
+# then the default is the inverse of the last ACL statement.
+#
+# Thus 'ACL allow localhost' only allows access from localhost.
+# 'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and
+# allow everybody else.
+
+</Stream>
+
+
+##################################################################
+# Example streams
+
+
+# Multipart JPEG
+
+#<Stream test.mjpg>
+#Feed feed1.ffm
+#Format mpjpeg
+#VideoFrameRate 2
+#VideoIntraOnly
+#NoAudio
+#Strict -1
+#</Stream>
+
+
+# Single JPEG
+
+#<Stream test.jpg>
+#Feed feed1.ffm
+#Format jpeg
+#VideoFrameRate 2
+#VideoIntraOnly
+##VideoSize 352x240
+#NoAudio
+#Strict -1
+#</Stream>
+
+
+# Flash
+
+#<Stream test.swf>
+#Feed feed1.ffm
+#Format swf
+#VideoFrameRate 2
+#VideoIntraOnly
+#NoAudio
+#</Stream>
+
+
+# ASF compatible
+
+<Stream test.asf>
+Feed feed1.ffm
+Format asf
+VideoFrameRate 15
+VideoSize 352x240
+VideoBitRate 256
+VideoBufferSize 40
+VideoGopSize 30
+AudioBitRate 64
+StartSendOnKey
+</Stream>
+
+
+# MP3 audio
+
+#<Stream test.mp3>
+#Feed feed1.ffm
+#Format mp2
+#AudioCodec mp3
+#AudioBitRate 64
+#AudioChannels 1
+#AudioSampleRate 44100
+#NoVideo
+#</Stream>
+
+
+# Ogg Vorbis audio
+
+#<Stream test.ogg>
+#Feed feed1.ffm
+#Title "Stream title"
+#AudioBitRate 64
+#AudioChannels 2
+#AudioSampleRate 44100
+#NoVideo
+#</Stream>
+
+
+# Real with audio only at 32 kbits
+
+#<Stream test.ra>
+#Feed feed1.ffm
+#Format rm
+#AudioBitRate 32
+#NoVideo
+#NoAudio
+#</Stream>
+
+
+# Real with audio and video at 64 kbits
+
+#<Stream test.rm>
+#Feed feed1.ffm
+#Format rm
+#AudioBitRate 32
+#VideoBitRate 128
+#VideoFrameRate 25
+#VideoGopSize 25
+#NoAudio
+#</Stream>
+
+
+##################################################################
+# A stream coming from a file: you only need to set the input
+# filename and optionally a new format. Supported conversions:
+# AVI -> ASF
+
+#<Stream file.rm>
+#File "/usr/local/httpd/htdocs/tlive.rm"
+#NoAudio
+#</Stream>
+
+#<Stream file.asf>
+#File "/usr/local/httpd/htdocs/test.asf"
+#NoAudio
+#Author "Me"
+#Copyright "Super MegaCorp"
+#Title "Test stream from disk"
+#Comment "Test comment"
+#</Stream>
+
+
+##################################################################
+# RTSP examples
+#
+# You can access this stream with the RTSP URL:
+# rtsp://localhost:5454/test1-rtsp.mpg
+#
+# A non-standard RTSP redirector is also created. Its URL is:
+# http://localhost:8090/test1-rtsp.rtsp
+
+#<Stream test1-rtsp.mpg>
+#Format rtp
+#File "/usr/local/httpd/htdocs/test1.mpg"
+#</Stream>
+
+
+##################################################################
+# SDP/multicast examples
+#
+# If you want to send your stream in multicast, you must set the
+# multicast address with MulticastAddress. The port and the TTL can
+# also be set.
+#
+# An SDP file is automatically generated by ffserver by adding the
+# 'sdp' extension to the stream name (here
+# http://localhost:8090/test1-sdp.sdp). You should usually give this
+# file to your player to play the stream.
+#
+# The 'NoLoop' option can be used to avoid looping when the stream is
+# terminated.
+
+#<Stream test1-sdp.mpg>
+#Format rtp
+#File "/usr/local/httpd/htdocs/test1.mpg"
+#MulticastAddress 224.124.0.1
+#MulticastPort 5000
+#MulticastTTL 16
+#NoLoop
+#</Stream>
+
+
+##################################################################
+# Special streams
+
+# Server status
+
+<Stream stat.html>
+Format status
+
+# Only allow local people to get the status
+ACL allow localhost
+ACL allow 192.168.0.0 192.168.255.255
+
+#FaviconURL http://pond1.gladstonefamily.net:8080/favicon.ico
+</Stream>
+
+
+# Redirect index.html to the appropriate site
+
+<Redirect index.html>
+URL http://www.ffmpeg.org/
+</Redirect>
+
+
diff --git a/contrib/ffmpeg/doc/hooks.texi b/contrib/ffmpeg/doc/hooks.texi
new file mode 100644
index 000000000..15013547c
--- /dev/null
+++ b/contrib/ffmpeg/doc/hooks.texi
@@ -0,0 +1,113 @@
+\input texinfo @c -*- texinfo -*-
+
+@settitle Video Hook Documentation
+@titlepage
+@sp 7
+@center @titlefont{Video Hook Documentation}
+@sp 3
+@end titlepage
+
+
+@chapter Introduction
+
+
+The video hook functionality is designed (mostly) for live video. It allows
+the video to be modified or examined between the decoder and the encoder.
+
+Any number of hook modules can be placed inline, and they are run in the
+order that they were specified on the ffmpeg command line.
+
+Three modules are provided and are described below. They are all intended to
+be used as a base for your own modules.
+
+Modules are loaded using the -vhook option to ffmpeg. The value of this parameter
+is a space separated list of arguments. The first is the module name, and the rest
+are passed as arguments to the Configure function of the module.
+
+@section null.c
+
+This does nothing. Actually it converts the input image to RGB24 and then converts
+it back again. This is meant as a sample that you can use to test your setup.
+
+@section fish.c
+
+This implements a 'fish detector'. Essentially it converts the image into HSV
+space and tests whether more than a certain percentage of the pixels fall into
+a specific HSV cuboid. If so, then the image is saved into a file for processing
+by other bits of code.
+
+Why use HSV? It turns out that HSV cuboids represent a more compact range of
+colors than would an RGB cuboid.
+
+@section imlib2.c
+
+This module implements a text overlay for a video image. Currently it
+supports a fixed overlay or reading the text from a file. The string
+is passed through strftime so that it is easy to imprint the date and
+time onto the image.
+
+You may also overlay an image (even semi-transparent) like TV stations do.
+You may move either the text or the image around your video to create
+scrolling credits, for example.
+
+Text fonts are being looked for in a FONTPATH environment variable.
+
+Options:
+@multitable @columnfractions .2 .8
+@item @option{-c <color>} @tab The color of the text
+@item @option{-F <fontname>} @tab The font face and size
+@item @option{-t <text>} @tab The text
+@item @option{-f <filename>} @tab The filename to read text from
+@item @option{-x <expresion>} @tab X coordinate of text or image
+@item @option{-y <expresion>} @tab Y coordinate of text or image
+@item @option{-i <filename>} @tab The filename to read a image from
+@end multitable
+
+Expresions are functions of these variables:
+@multitable @columnfractions .2 .8
+@item @var{N} @tab frame number (starting at zero)
+@item @var{H} @tab frame height
+@item @var{W} @tab frame width
+@item @var{h} @tab image height
+@item @var{w} @tab image width
+@item @var{X} @tab previous x coordinate of text or image
+@item @var{Y} @tab previous y coordinate of text or image
+@end multitable
+
+You may also use the constants @var{PI}, @var{E}, and the math functions available at the
+FFmpeg formula evaluator at (@url{ffmpeg-doc.html#SEC13}), except @var{bits2qp(bits)}
+and @var{qp2bits(qp)}.
+
+Usage examples:
+
+@example
+ # Remember to set the path to your fonts
+ FONTPATH="/cygdrive/c/WINDOWS/Fonts/"
+ FONTPATH="$FONTPATH:/usr/share/imlib2/data/fonts/"
+ FONTPATH="$FONTPATH:/usr/X11R6/lib/X11/fonts/TTF/"
+ export FONTPATH
+
+ # Bulb dancing in a Lissajous pattern
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -x W*(0.5+0.25*sin(N/47*PI))-w/2 -y H*(0.5+0.50*cos(N/97*PI))-h/2 -i /usr/share/imlib2/data/images/bulb.png' \
+ -acodec copy -sameq output.avi
+
+ # Text scrolling
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -c red -F Vera.ttf/20 -x 150+0.5*N -y 70+0.25*N -t Hello' \
+ -acodec copy -sameq output.avi
+@end example
+
+@section ppm.c
+
+It's basically a launch point for a PPM pipe, so you can use any
+executable (or script) which consumes a PPM on stdin and produces a PPM
+on stdout (and flushes each frame).
+
+Usage example:
+
+@example
+ffmpeg -i input -vhook "/path/to/ppm.so some-ppm-filter args" output
+@end example
+
+@bye
diff --git a/contrib/ffmpeg/doc/optimization.txt b/contrib/ffmpeg/doc/optimization.txt
new file mode 100644
index 000000000..26c5ae64c
--- /dev/null
+++ b/contrib/ffmpeg/doc/optimization.txt
@@ -0,0 +1,158 @@
+optimization Tips (for libavcodec):
+
+What to optimize:
+If you plan to do non-x86 architecture specific optimizations (SIMD normally),
+then take a look in the i386/ directory, as most important functions are
+already optimized for MMX.
+
+If you want to do x86 optimizations then you can either try to finetune the
+stuff in the i386 directory or find some other functions in the C source to
+optimize, but there aren't many left.
+
+Understanding these overoptimized functions:
+As many functions tend to be a bit difficult to understand because
+of optimizations, it can be hard to optimize them further, or write
+architecture-specific versions. It is recommened to look at older
+revisions of the interesting files (for a web frontend try ViewVC at
+http://svn.mplayerhq.hu/ffmpeg/trunk/).
+Alternatively, look into the other architecture-specific versions in
+the i386/, ppc/, alpha/ subdirectories. Even if you don't exactly
+comprehend the instructions, it could help understanding the functions
+and how they can be optimized.
+
+NOTE: If you still don't understand some function, ask at our mailing list!!!
+(http://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-devel)
+
+
+
+WTF is that function good for ....:
+The primary purpose of that list is to avoid wasting time to optimize functions
+which are rarely used
+
+put(_no_rnd)_pixels{,_x2,_y2,_xy2}
+ Used in motion compensation (en/decoding).
+
+avg_pixels{,_x2,_y2,_xy2}
+ Used in motion compensation of B-frames.
+ These are less important than the put*pixels functions.
+
+avg_no_rnd_pixels*
+ unused
+
+pix_abs16x16{,_x2,_y2,_xy2}
+ Used in motion estimation (encoding) with SAD.
+
+pix_abs8x8{,_x2,_y2,_xy2}
+ Used in motion estimation (encoding) with SAD of MPEG-4 4MV only.
+ These are less important than the pix_abs16x16* functions.
+
+put_mspel8_mc* / wmv2_mspel8*
+ Used only in WMV2.
+ it is not recommended that you waste your time with these, as WMV2
+ is an ugly and relatively useless codec.
+
+mpeg4_qpel* / *qpel_mc*
+ Used in MPEG-4 qpel motion compensation (encoding & decoding).
+ The qpel8 functions are used only for 4mv,
+ the avg_* functions are used only for B-frames.
+ Optimizing them should have a significant impact on qpel
+ encoding & decoding.
+
+qpel{8,16}_mc??_old_c / *pixels{8,16}_l4
+ Just used to work around a bug in an old libavcodec encoder version.
+ Don't optimize them.
+
+tpel_mc_func {put,avg}_tpel_pixels_tab
+ Used only for SVQ3, so only optimize them if you need fast SVQ3 decoding.
+
+add_bytes/diff_bytes
+ For huffyuv only, optimize if you want a faster ffhuffyuv codec.
+
+get_pixels / diff_pixels
+ Used for encoding, easy.
+
+clear_blocks
+ easiest to optimize
+
+gmc
+ Used for MPEG-4 gmc.
+ Optimizing this should have a significant effect on the gmc decoding
+ speed but it's very likely impossible to write in SIMD.
+
+gmc1
+ Used for chroma blocks in MPEG-4 gmc with 1 warp point
+ (there are 4 luma & 2 chroma blocks per macroblock, so
+ only 1/3 of the gmc blocks use this, the other 2/3
+ use the normal put_pixel* code, but only if there is
+ just 1 warp point).
+ Note: DivX5 gmc always uses just 1 warp point.
+
+pix_sum
+ Used for encoding.
+
+hadamard8_diff / sse / sad == pix_norm1 / dct_sad / quant_psnr / rd / bit
+ Specific compare functions used in encoding, it depends upon the
+ command line switches which of these are used.
+ Don't waste your time with dct_sad & quant_psnr, they aren't
+ really useful.
+
+put_pixels_clamped / add_pixels_clamped
+ Used for en/decoding in the IDCT, easy.
+ Note, some optimized IDCTs have the add/put clamped code included and
+ then put_pixels_clamped / add_pixels_clamped will be unused.
+
+idct/fdct
+ idct (encoding & decoding)
+ fdct (encoding)
+ difficult to optimize
+
+dct_quantize_trellis
+ Used for encoding with trellis quantization.
+ difficult to optimize
+
+dct_quantize
+ Used for encoding.
+
+dct_unquantize_mpeg1
+ Used in MPEG-1 en/decoding.
+
+dct_unquantize_mpeg2
+ Used in MPEG-2 en/decoding.
+
+dct_unquantize_h263
+ Used in MPEG-4/H.263 en/decoding.
+
+FIXME remaining functions?
+BTW, most of these functions are in dsputil.c/.h, some are in mpegvideo.c/.h.
+
+
+
+Alignment:
+Some instructions on some architectures have strict alignment restrictions,
+for example most SSE/SSE2 instructions on x86.
+The minimum guaranteed alignment is written in the .h files, for example:
+ void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, UINT8 *pixels/*align 8*/, int line_size);
+
+
+
+Links:
+http://www.aggregate.org/MAGIC/
+
+x86-specific:
+http://developer.intel.com/design/pentium4/manuals/248966.htm
+
+The IA-32 Intel Architecture Software Developer's Manual, Volume 2:
+Instruction Set Reference
+http://developer.intel.com/design/pentium4/manuals/245471.htm
+
+http://www.agner.org/assem/
+
+AMD Athlon Processor x86 Code Optimization Guide:
+http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/22007.pdf
+
+GCC asm links:
+official doc but quite ugly
+http://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html
+
+a bit old (note "+" is valid for input-output, even though the next disagrees)
+http://www.cs.virginia.edu/~clc5q/gcc-inline-asm.pdf
diff --git a/contrib/ffmpeg/doc/soc.txt b/contrib/ffmpeg/doc/soc.txt
new file mode 100644
index 000000000..8b4a86db8
--- /dev/null
+++ b/contrib/ffmpeg/doc/soc.txt
@@ -0,0 +1,24 @@
+Google Summer of Code and similar project guidelines
+
+Summer of Code is a project by Google in which students are paid to implement
+some nice new features for various participating open source projects ...
+
+This text is a collection of things to take care of for the next soc as
+it's a little late for this year's soc (2006).
+
+The Goal:
+Our goal in respect to soc is and must be of course exactly one thing and
+that is to improve FFmpeg, to reach this goal, code must
+* conform to the svn policy and patch submission guidelines
+* must improve FFmpeg somehow (faster, smaller, "better",
+ more codecs supported, fewer bugs, cleaner, ...)
+
+for mentors and other developers to help students to reach that goal it is
+essential that changes to their codebase are publicly visible, clean and
+easy reviewable that again leads us to:
+* use of a revision control system like svn
+* separation of cosmetic from non-cosmetic changes (this is almost entirely
+ ignored by mentors and students in soc 2006 which might lead to a suprise
+ when the code will be reviewed at the end before a possible inclusion in
+ FFmpeg, individual changes were generally not reviewable due to cosmetics).
+* frequent commits, so that comments can be provided early
diff --git a/contrib/ffmpeg/doc/texi2pod.pl b/contrib/ffmpeg/doc/texi2pod.pl
new file mode 100755
index 000000000..c414ffcc6
--- /dev/null
+++ b/contrib/ffmpeg/doc/texi2pod.pl
@@ -0,0 +1,427 @@
+#! /usr/bin/perl -w
+
+# Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
+
+# This file is part of GNU CC.
+
+# GNU CC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# GNU CC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with GNU CC; see the file COPYING. If not, write to
+# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301 USA
+
+# This does trivial (and I mean _trivial_) conversion of Texinfo
+# markup to Perl POD format. It's intended to be used to extract
+# something suitable for a manpage from a Texinfo document.
+
+$output = 0;
+$skipping = 0;
+%sects = ();
+$section = "";
+@icstack = ();
+@endwstack = ();
+@skstack = ();
+@instack = ();
+$shift = "";
+%defs = ();
+$fnno = 1;
+$inf = "";
+$ibase = "";
+
+while ($_ = shift) {
+ if (/^-D(.*)$/) {
+ if ($1 ne "") {
+ $flag = $1;
+ } else {
+ $flag = shift;
+ }
+ $value = "";
+ ($flag, $value) = ($flag =~ /^([^=]+)(?:=(.+))?/);
+ die "no flag specified for -D\n"
+ unless $flag ne "";
+ die "flags may only contain letters, digits, hyphens, dashes and underscores\n"
+ unless $flag =~ /^[a-zA-Z0-9_-]+$/;
+ $defs{$flag} = $value;
+ } elsif (/^-/) {
+ usage();
+ } else {
+ $in = $_, next unless defined $in;
+ $out = $_, next unless defined $out;
+ usage();
+ }
+}
+
+if (defined $in) {
+ $inf = gensym();
+ open($inf, "<$in") or die "opening \"$in\": $!\n";
+ $ibase = $1 if $in =~ m|^(.+)/[^/]+$|;
+} else {
+ $inf = \*STDIN;
+}
+
+if (defined $out) {
+ open(STDOUT, ">$out") or die "opening \"$out\": $!\n";
+}
+
+while(defined $inf) {
+while(<$inf>) {
+ # Certain commands are discarded without further processing.
+ /^\@(?:
+ [a-z]+index # @*index: useful only in complete manual
+ |need # @need: useful only in printed manual
+ |(?:end\s+)?group # @group .. @end group: ditto
+ |page # @page: ditto
+ |node # @node: useful only in .info file
+ |(?:end\s+)?ifnottex # @ifnottex .. @end ifnottex: use contents
+ )\b/x and next;
+
+ chomp;
+
+ # Look for filename and title markers.
+ /^\@setfilename\s+([^.]+)/ and $fn = $1, next;
+ /^\@settitle\s+([^.]+)/ and $tl = postprocess($1), next;
+
+ # Identify a man title but keep only the one we are interested in.
+ /^\@c\s+man\s+title\s+([A-Za-z0-9-]+)\s+(.+)/ and do {
+ if (exists $defs{$1}) {
+ $fn = $1;
+ $tl = postprocess($2);
+ }
+ next;
+ };
+
+ # Look for blocks surrounded by @c man begin SECTION ... @c man end.
+ # This really oughta be @ifman ... @end ifman and the like, but such
+ # would require rev'ing all other Texinfo translators.
+ /^\@c\s+man\s+begin\s+([A-Z]+)\s+([A-Za-z0-9-]+)/ and do {
+ $output = 1 if exists $defs{$2};
+ $sect = $1;
+ next;
+ };
+ /^\@c\s+man\s+begin\s+([A-Z]+)/ and $sect = $1, $output = 1, next;
+ /^\@c\s+man\s+end/ and do {
+ $sects{$sect} = "" unless exists $sects{$sect};
+ $sects{$sect} .= postprocess($section);
+ $section = "";
+ $output = 0;
+ next;
+ };
+
+ # handle variables
+ /^\@set\s+([a-zA-Z0-9_-]+)\s*(.*)$/ and do {
+ $defs{$1} = $2;
+ next;
+ };
+ /^\@clear\s+([a-zA-Z0-9_-]+)/ and do {
+ delete $defs{$1};
+ next;
+ };
+
+ next unless $output;
+
+ # Discard comments. (Can't do it above, because then we'd never see
+ # @c man lines.)
+ /^\@c\b/ and next;
+
+ # End-block handler goes up here because it needs to operate even
+ # if we are skipping.
+ /^\@end\s+([a-z]+)/ and do {
+ # Ignore @end foo, where foo is not an operation which may
+ # cause us to skip, if we are presently skipping.
+ my $ended = $1;
+ next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex)$/;
+
+ die "\@end $ended without \@$ended at line $.\n" unless defined $endw;
+ die "\@$endw ended by \@end $ended at line $.\n" unless $ended eq $endw;
+
+ $endw = pop @endwstack;
+
+ if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex)$/) {
+ $skipping = pop @skstack;
+ next;
+ } elsif ($ended =~ /^(?:example|smallexample|display)$/) {
+ $shift = "";
+ $_ = ""; # need a paragraph break
+ } elsif ($ended =~ /^(?:itemize|enumerate|[fv]?table)$/) {
+ $_ = "\n=back\n";
+ $ic = pop @icstack;
+ } else {
+ die "unknown command \@end $ended at line $.\n";
+ }
+ };
+
+ # We must handle commands which can cause skipping even while we
+ # are skipping, otherwise we will not process nested conditionals
+ # correctly.
+ /^\@ifset\s+([a-zA-Z0-9_-]+)/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = "ifset";
+ $skipping = 1 unless exists $defs{$1};
+ next;
+ };
+
+ /^\@ifclear\s+([a-zA-Z0-9_-]+)/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = "ifclear";
+ $skipping = 1 if exists $defs{$1};
+ next;
+ };
+
+ /^\@(ignore|menu|iftex)\b/ and do {
+ push @endwstack, $endw;
+ push @skstack, $skipping;
+ $endw = $1;
+ $skipping = 1;
+ next;
+ };
+
+ next if $skipping;
+
+ # Character entities. First the ones that can be replaced by raw text
+ # or discarded outright:
+ s/\@copyright\{\}/(c)/g;
+ s/\@dots\{\}/.../g;
+ s/\@enddots\{\}/..../g;
+ s/\@([.!? ])/$1/g;
+ s/\@[:-]//g;
+ s/\@bullet(?:\{\})?/*/g;
+ s/\@TeX\{\}/TeX/g;
+ s/\@pounds\{\}/\#/g;
+ s/\@minus(?:\{\})?/-/g;
+ s/\\,/,/g;
+
+ # Now the ones that have to be replaced by special escapes
+ # (which will be turned back into text by unmunge())
+ s/&/&amp;/g;
+ s/\@\{/&lbrace;/g;
+ s/\@\}/&rbrace;/g;
+ s/\@\@/&at;/g;
+
+ # Inside a verbatim block, handle @var specially.
+ if ($shift ne "") {
+ s/\@var\{([^\}]*)\}/<$1>/g;
+ }
+
+ # POD doesn't interpret E<> inside a verbatim block.
+ if ($shift eq "") {
+ s/</&lt;/g;
+ s/>/&gt;/g;
+ } else {
+ s/</&LT;/g;
+ s/>/&GT;/g;
+ }
+
+ # Single line command handlers.
+
+ /^\@include\s+(.+)$/ and do {
+ push @instack, $inf;
+ $inf = gensym();
+
+ # Try cwd and $ibase.
+ open($inf, "<" . $1)
+ or open($inf, "<" . $ibase . "/" . $1)
+ or die "cannot open $1 or $ibase/$1: $!\n";
+ next;
+ };
+
+ /^\@(?:section|unnumbered|unnumberedsec|center)\s+(.+)$/
+ and $_ = "\n=head2 $1\n";
+ /^\@subsection\s+(.+)$/
+ and $_ = "\n=head3 $1\n";
+
+ # Block command handlers:
+ /^\@itemize\s+(\@[a-z]+|\*|-)/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ $ic = $1;
+ $_ = "\n=over 4\n";
+ $endw = "itemize";
+ };
+
+ /^\@enumerate(?:\s+([a-zA-Z0-9]+))?/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ if (defined $1) {
+ $ic = $1 . ".";
+ } else {
+ $ic = "1.";
+ }
+ $_ = "\n=over 4\n";
+ $endw = "enumerate";
+ };
+
+ /^\@([fv]?table)\s+(\@[a-z]+)/ and do {
+ push @endwstack, $endw;
+ push @icstack, $ic;
+ $endw = $1;
+ $ic = $2;
+ $ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env)/B/;
+ $ic =~ s/\@(?:code|kbd)/C/;
+ $ic =~ s/\@(?:dfn|var|emph|cite|i)/I/;
+ $ic =~ s/\@(?:file)/F/;
+ $_ = "\n=over 4\n";
+ };
+
+ /^\@((?:small)?example|display)/ and do {
+ push @endwstack, $endw;
+ $endw = $1;
+ $shift = "\t";
+ $_ = ""; # need a paragraph break
+ };
+
+ /^\@itemx?\s*(.+)?$/ and do {
+ if (defined $1) {
+ # Entity escapes prevent munging by the <> processing below.
+ $_ = "\n=item $ic\&LT;$1\&GT;\n";
+ } else {
+ $_ = "\n=item $ic\n";
+ $ic =~ y/A-Ya-y/B-Zb-z/;
+ $ic =~ s/(\d+)/$1 + 1/eg;
+ }
+ };
+
+ $section .= $shift.$_."\n";
+}
+# End of current file.
+close($inf);
+$inf = pop @instack;
+}
+
+die "No filename or title\n" unless defined $fn && defined $tl;
+
+$sects{NAME} = "$fn \- $tl\n";
+$sects{FOOTNOTES} .= "=back\n" if exists $sects{FOOTNOTES};
+
+for $sect (qw(NAME SYNOPSIS DESCRIPTION OPTIONS EXAMPLES ENVIRONMENT FILES
+ BUGS NOTES FOOTNOTES SEEALSO AUTHOR COPYRIGHT)) {
+ if(exists $sects{$sect}) {
+ $head = $sect;
+ $head =~ s/SEEALSO/SEE ALSO/;
+ print "=head1 $head\n\n";
+ print scalar unmunge ($sects{$sect});
+ print "\n";
+ }
+}
+
+sub usage
+{
+ die "usage: $0 [-D toggle...] [infile [outfile]]\n";
+}
+
+sub postprocess
+{
+ local $_ = $_[0];
+
+ # @value{foo} is replaced by whatever 'foo' is defined as.
+ while (m/(\@value\{([a-zA-Z0-9_-]+)\})/g) {
+ if (! exists $defs{$2}) {
+ print STDERR "Option $2 not defined\n";
+ s/\Q$1\E//;
+ } else {
+ $value = $defs{$2};
+ s/\Q$1\E/$value/;
+ }
+ }
+
+ # Formatting commands.
+ # Temporary escape for @r.
+ s/\@r\{([^\}]*)\}/R<$1>/g;
+ s/\@(?:dfn|var|emph|cite|i)\{([^\}]*)\}/I<$1>/g;
+ s/\@(?:code|kbd)\{([^\}]*)\}/C<$1>/g;
+ s/\@(?:gccoptlist|samp|strong|key|option|env|command|b)\{([^\}]*)\}/B<$1>/g;
+ s/\@sc\{([^\}]*)\}/\U$1/g;
+ s/\@file\{([^\}]*)\}/F<$1>/g;
+ s/\@w\{([^\}]*)\}/S<$1>/g;
+ s/\@(?:dmn|math)\{([^\}]*)\}/$1/g;
+
+ # Cross references are thrown away, as are @noindent and @refill.
+ # (@noindent is impossible in .pod, and @refill is unnecessary.)
+ # @* is also impossible in .pod; we discard it and any newline that
+ # follows it. Similarly, our macro @gol must be discarded.
+
+ s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g;
+ s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g;
+ s/;\s+\@pxref\{(?:[^\}]*)\}//g;
+ s/\@noindent\s*//g;
+ s/\@refill//g;
+ s/\@gol//g;
+ s/\@\*\s*\n?//g;
+
+ # @uref can take one, two, or three arguments, with different
+ # semantics each time. @url and @email are just like @uref with
+ # one argument, for our purposes.
+ s/\@(?:uref|url|email)\{([^\},]*)\}/&lt;B<$1>&gt;/g;
+ s/\@uref\{([^\},]*),([^\},]*)\}/$2 (C<$1>)/g;
+ s/\@uref\{([^\},]*),([^\},]*),([^\},]*)\}/$3/g;
+
+ # Turn B<blah I<blah> blah> into B<blah> I<blah> B<blah> to
+ # match Texinfo semantics of @emph inside @samp. Also handle @r
+ # inside bold.
+ s/&LT;/</g;
+ s/&GT;/>/g;
+ 1 while s/B<((?:[^<>]|I<[^<>]*>)*)R<([^>]*)>/B<$1>${2}B</g;
+ 1 while (s/B<([^<>]*)I<([^>]+)>/B<$1>I<$2>B</g);
+ 1 while (s/I<([^<>]*)B<([^>]+)>/I<$1>B<$2>I</g);
+ s/[BI]<>//g;
+ s/([BI])<(\s+)([^>]+)>/$2$1<$3>/g;
+ s/([BI])<([^>]+?)(\s+)>/$1<$2>$3/g;
+
+ # Extract footnotes. This has to be done after all other
+ # processing because otherwise the regexp will choke on formatting
+ # inside @footnote.
+ while (/\@footnote/g) {
+ s/\@footnote\{([^\}]+)\}/[$fnno]/;
+ add_footnote($1, $fnno);
+ $fnno++;
+ }
+
+ return $_;
+}
+
+sub unmunge
+{
+ # Replace escaped symbols with their equivalents.
+ local $_ = $_[0];
+
+ s/&lt;/E<lt>/g;
+ s/&gt;/E<gt>/g;
+ s/&lbrace;/\{/g;
+ s/&rbrace;/\}/g;
+ s/&at;/\@/g;
+ s/&amp;/&/g;
+ return $_;
+}
+
+sub add_footnote
+{
+ unless (exists $sects{FOOTNOTES}) {
+ $sects{FOOTNOTES} = "\n=over 4\n\n";
+ }
+
+ $sects{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++;
+ $sects{FOOTNOTES} .= $_[0];
+ $sects{FOOTNOTES} .= "\n\n";
+}
+
+# stolen from Symbol.pm
+{
+ my $genseq = 0;
+ sub gensym
+ {
+ my $name = "GEN" . $genseq++;
+ my $ref = \*{$name};
+ delete $::{$name};
+ return $ref;
+ }
+}
diff --git a/contrib/ffmpeg/ffinstall.nsi b/contrib/ffmpeg/ffinstall.nsi
new file mode 100644
index 000000000..f483b0174
--- /dev/null
+++ b/contrib/ffmpeg/ffinstall.nsi
@@ -0,0 +1,75 @@
+;NSIS Script For FFmpeg
+
+;Title Of Your Application
+Name "FFmpeg"
+CompletedText "FFmpeg install completed! Enjoy your meal!"
+
+; do a CRC check
+CRCCheck On
+
+; output file name
+OutFile "FFinstall.exe"
+
+; license page introduction
+LicenseText "You must agree to this license before installing."
+
+; license data
+LicenseData ".\COPYING"
+
+; the default installation directory
+InstallDir "$PROGRAMFILES\FFmpeg"
+
+;The text to prompt the user to enter a directory
+DirText "Please select the folder below"
+
+Section "Install"
+ ;Install Files
+ SetOutPath $INSTDIR
+ SetCompress Auto
+ SetOverwrite IfNewer
+ File ".\ffmpeg.exe"
+ File ".\SDL.dll"
+ File ".\ffplay.exe"
+ File ".\COPYING"
+ File ".\CREDITS"
+
+ ; documentation
+ SetOutPath $INSTDIR\doc
+ File ".\doc\faq.html"
+ File ".\doc\ffmpeg-doc.html"
+ File ".\doc\ffplay-doc.html"
+
+ ; Write the uninstall keys for Windows
+ WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\FFmpeg" "DisplayName" "FFmpeg (remove only)"
+ WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\FFmpeg" "UninstallString" "$INSTDIR\Uninst.exe"
+WriteUninstaller "Uninst.exe"
+SectionEnd
+
+Section "Shortcuts"
+ ;Add Shortcuts
+SectionEnd
+
+UninstallText "This will uninstall FFmpeg from your system"
+
+Section Uninstall
+ ; delete files
+ Delete "$INSTDIR\ffmpeg.exe"
+ Delete "$INSTDIR\SDL.dll"
+ Delete "$INSTDIR\ffplay.exe"
+ Delete "$INSTDIR\COPYING"
+ Delete "$INSTDIR\CREDITS"
+
+ ; delete documentation
+ Delete "$INSTDIR\doc\faq.html"
+ Delete "$INSTDIR\ffmpeg-doc.html"
+ Delete "$INSTDIR\doc\ffplay-doc.html"
+
+ RMDir /r $INSTDIR\doc
+
+ ; delete uninstaller and unistall registry entries
+ Delete "$INSTDIR\Uninst.exe"
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\FFmpeg"
+ DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\FFmpeg"
+ RMDir "$INSTDIR"
+SectionEnd
+
diff --git a/contrib/ffmpeg/ffmpeg.c b/contrib/ffmpeg/ffmpeg.c
new file mode 100644
index 000000000..ab0b689a6
--- /dev/null
+++ b/contrib/ffmpeg/ffmpeg.c
@@ -0,0 +1,3973 @@
+/*
+ * FFmpeg main
+ * Copyright (c) 2000-2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#define HAVE_AV_CONFIG_H
+#include <signal.h>
+#include <limits.h>
+#include "avformat.h"
+#include "swscale.h"
+#include "framehook.h"
+#include "dsputil.h"
+#include "opt.h"
+#include "fifo.h"
+
+#ifdef __MINGW32__
+#include <conio.h>
+#else
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <termios.h>
+#include <sys/resource.h>
+#endif
+#ifdef CONFIG_OS2
+#include <sys/types.h>
+#include <sys/select.h>
+#include <stdlib.h>
+#endif
+#undef time //needed because HAVE_AV_CONFIG_H is defined on top
+#include <time.h>
+
+#include "version.h"
+#include "cmdutils.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#if !defined(INFINITY) && defined(HUGE_VAL)
+#define INFINITY HUGE_VAL
+#endif
+
+/* select an input stream for an output stream */
+typedef struct AVStreamMap {
+ int file_index;
+ int stream_index;
+ int sync_file_index;
+ int sync_stream_index;
+} AVStreamMap;
+
+/** select an input file for an output file */
+typedef struct AVMetaDataMap {
+ int out_file;
+ int in_file;
+} AVMetaDataMap;
+
+extern const OptionDef options[];
+
+static void show_help(void);
+static void show_license(void);
+static int opt_default(const char *opt, const char *arg);
+
+#define MAX_FILES 20
+
+static AVFormatContext *input_files[MAX_FILES];
+static int64_t input_files_ts_offset[MAX_FILES];
+static int nb_input_files = 0;
+
+static AVFormatContext *output_files[MAX_FILES];
+static int nb_output_files = 0;
+
+static AVStreamMap stream_maps[MAX_FILES];
+static int nb_stream_maps;
+
+static AVMetaDataMap meta_data_maps[MAX_FILES];
+static int nb_meta_data_maps;
+
+static AVInputFormat *file_iformat;
+static AVOutputFormat *file_oformat;
+static int frame_width = 0;
+static int frame_height = 0;
+static float frame_aspect_ratio = 0;
+static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
+static int frame_padtop = 0;
+static int frame_padbottom = 0;
+static int frame_padleft = 0;
+static int frame_padright = 0;
+static int padcolor[3] = {16,128,128}; /* default to black */
+static int frame_topBand = 0;
+static int frame_bottomBand = 0;
+static int frame_leftBand = 0;
+static int frame_rightBand = 0;
+static int max_frames[4] = {INT_MAX, INT_MAX, INT_MAX, INT_MAX};
+static int frame_rate = 25;
+static int frame_rate_base = 1;
+static float video_qscale = 0;
+static int video_qdiff = 3;
+static uint16_t *intra_matrix = NULL;
+static uint16_t *inter_matrix = NULL;
+#if 0 //experimental, (can be removed)
+static float video_rc_qsquish=1.0;
+static float video_rc_qmod_amp=0;
+static int video_rc_qmod_freq=0;
+#endif
+static char *video_rc_override_string=NULL;
+static char *video_rc_eq="tex^qComp";
+static int me_method = ME_EPZS;
+static int video_disable = 0;
+static int video_discard = 0;
+static int video_codec_id = CODEC_ID_NONE;
+static int video_codec_tag = 0;
+static int same_quality = 0;
+static int do_deinterlace = 0;
+static int packet_size = 0;
+static int strict = 0;
+static int top_field_first = -1;
+static int me_threshold = 0;
+static int intra_dc_precision = 8;
+static int loop_input = 0;
+static int loop_output = AVFMT_NOOUTPUTLOOP;
+static int qp_hist = 0;
+
+static int intra_only = 0;
+static int audio_sample_rate = 44100;
+static int audio_bit_rate = 64000;
+#define QSCALE_NONE -99999
+static float audio_qscale = QSCALE_NONE;
+static int audio_disable = 0;
+static int audio_channels = 1;
+static int audio_codec_id = CODEC_ID_NONE;
+static int audio_codec_tag = 0;
+static char *audio_language = NULL;
+
+static int subtitle_codec_id = CODEC_ID_NONE;
+static char *subtitle_language = NULL;
+
+static float mux_preload= 0.5;
+static float mux_max_delay= 0.7;
+
+static int64_t recording_time = 0;
+static int64_t start_time = 0;
+static int64_t rec_timestamp = 0;
+static int64_t input_ts_offset = 0;
+static int file_overwrite = 0;
+static char *str_title = NULL;
+static char *str_author = NULL;
+static char *str_copyright = NULL;
+static char *str_comment = NULL;
+static char *str_album = NULL;
+static int do_benchmark = 0;
+static int do_hex_dump = 0;
+static int do_pkt_dump = 0;
+static int do_psnr = 0;
+static int do_vstats = 0;
+static int do_pass = 0;
+static char *pass_logfilename = NULL;
+static int audio_stream_copy = 0;
+static int video_stream_copy = 0;
+static int subtitle_stream_copy = 0;
+static int video_sync_method= 1;
+static int audio_sync_method= 0;
+static int copy_ts= 0;
+static int opt_shortest = 0; //
+static int video_global_header = 0;
+
+static int rate_emu = 0;
+
+#ifdef CONFIG_BKTR
+static const char *video_grab_format = "bktr";
+#else
+#ifdef CONFIG_VIDEO4LINUX2
+static const char *video_grab_format = "video4linux2";
+#else
+static const char *video_grab_format = "video4linux";
+#endif
+#endif
+static char *video_device = NULL;
+static char *grab_device = NULL;
+static int video_channel = 0;
+static char *video_standard = "ntsc";
+
+static const char *audio_grab_format = "audio_device";
+static char *audio_device = NULL;
+static int audio_volume = 256;
+
+static int using_stdin = 0;
+static int using_vhook = 0;
+static int verbose = 1;
+static int thread_count= 1;
+static int q_pressed = 0;
+static int64_t video_size = 0;
+static int64_t audio_size = 0;
+static int64_t extra_size = 0;
+static int nb_frames_dup = 0;
+static int nb_frames_drop = 0;
+static int input_sync;
+static int limit_filesize = 0; //
+
+static int pgmyuv_compatibility_hack=0;
+static int dts_delta_threshold = 10;
+
+static int sws_flags = SWS_BICUBIC;
+
+const char **opt_names=NULL;
+int opt_name_count=0;
+AVCodecContext *avctx_opts;
+AVFormatContext *avformat_opts;
+
+static AVBitStreamFilterContext *video_bitstream_filters=NULL;
+static AVBitStreamFilterContext *audio_bitstream_filters=NULL;
+static AVBitStreamFilterContext *bitstream_filters[MAX_FILES][MAX_STREAMS];
+
+#define DEFAULT_PASS_LOGFILENAME "ffmpeg2pass"
+
+struct AVInputStream;
+
+typedef struct AVOutputStream {
+ int file_index; /* file index */
+ int index; /* stream index in the output file */
+ int source_index; /* AVInputStream index */
+ AVStream *st; /* stream in the output file */
+ int encoding_needed; /* true if encoding needed for this stream */
+ int frame_number;
+ /* input pts and corresponding output pts
+ for A/V sync */
+ //double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
+ struct AVInputStream *sync_ist; /* input stream to sync against */
+ int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
+ /* video only */
+ int video_resample;
+ AVFrame pict_tmp; /* temporary image for resampling */
+ struct SwsContext *img_resample_ctx; /* for image resampling */
+ int resample_height;
+
+ int video_crop;
+ int topBand; /* cropping area sizes */
+ int leftBand;
+
+ int video_pad;
+ int padtop; /* padding area sizes */
+ int padbottom;
+ int padleft;
+ int padright;
+
+ /* audio only */
+ int audio_resample;
+ ReSampleContext *resample; /* for audio resampling */
+ AVFifoBuffer fifo; /* for compression: one audio fifo per codec */
+ FILE *logfile;
+} AVOutputStream;
+
+typedef struct AVInputStream {
+ int file_index;
+ int index;
+ AVStream *st;
+ int discard; /* true if stream data should be discarded */
+ int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
+ int64_t sample_index; /* current sample */
+
+ int64_t start; /* time when read started */
+ unsigned long frame; /* current frame */
+ int64_t next_pts; /* synthetic pts for cases where pkt.pts
+ is not defined */
+ int64_t pts; /* current pts */
+ int is_start; /* is 1 at the start and after a discontinuity */
+} AVInputStream;
+
+typedef struct AVInputFile {
+ int eof_reached; /* true if eof reached */
+ int ist_index; /* index of first stream in ist_table */
+ int buffer_size; /* current total buffer size */
+ int buffer_size_max; /* buffer size at which we consider we can stop
+ buffering */
+ int nb_streams; /* nb streams we are aware of */
+} AVInputFile;
+
+#ifndef __MINGW32__
+
+/* init terminal so that we can grab keys */
+static struct termios oldtty;
+#endif
+
+static void term_exit(void)
+{
+#ifndef __MINGW32__
+ tcsetattr (0, TCSANOW, &oldtty);
+#endif
+}
+
+static volatile sig_atomic_t received_sigterm = 0;
+
+static void
+sigterm_handler(int sig)
+{
+ received_sigterm = sig;
+ term_exit();
+}
+
+static void term_init(void)
+{
+#ifndef __MINGW32__
+ struct termios tty;
+
+ tcgetattr (0, &tty);
+ oldtty = tty;
+
+ tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
+ |INLCR|IGNCR|ICRNL|IXON);
+ tty.c_oflag |= OPOST;
+ tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
+ tty.c_cflag &= ~(CSIZE|PARENB);
+ tty.c_cflag |= CS8;
+ tty.c_cc[VMIN] = 1;
+ tty.c_cc[VTIME] = 0;
+
+ tcsetattr (0, TCSANOW, &tty);
+ signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
+#endif
+
+ signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
+ signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
+ /*
+ register a function to be called at normal program termination
+ */
+ atexit(term_exit);
+#ifdef CONFIG_BEOS_NETSERVER
+ fcntl(0, F_SETFL, fcntl(0, F_GETFL) | O_NONBLOCK);
+#endif
+}
+
+/* read a key without blocking */
+static int read_key(void)
+{
+#ifdef __MINGW32__
+ if(kbhit())
+ return(getch());
+#else
+ int n = 1;
+ unsigned char ch;
+#ifndef CONFIG_BEOS_NETSERVER
+ struct timeval tv;
+ fd_set rfds;
+
+ FD_ZERO(&rfds);
+ FD_SET(0, &rfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ n = select(1, &rfds, NULL, NULL, &tv);
+#endif
+ if (n > 0) {
+ n = read(0, &ch, 1);
+ if (n == 1)
+ return ch;
+
+ return n;
+ }
+#endif
+ return -1;
+}
+
+static int decode_interrupt_cb(void)
+{
+ return q_pressed || (q_pressed = read_key() == 'q');
+}
+
+static int read_ffserver_streams(AVFormatContext *s, const char *filename)
+{
+ int i, err;
+ AVFormatContext *ic;
+
+ err = av_open_input_file(&ic, filename, NULL, FFM_PACKET_SIZE, NULL);
+ if (err < 0)
+ return err;
+ /* copy stream format */
+ s->nb_streams = ic->nb_streams;
+ for(i=0;i<ic->nb_streams;i++) {
+ AVStream *st;
+
+ // FIXME: a more elegant solution is needed
+ st = av_mallocz(sizeof(AVStream));
+ memcpy(st, ic->streams[i], sizeof(AVStream));
+ st->codec = avcodec_alloc_context();
+ memcpy(st->codec, ic->streams[i]->codec, sizeof(AVCodecContext));
+ s->streams[i] = st;
+ }
+
+ av_close_input_file(ic);
+ return 0;
+}
+
+static double
+get_sync_ipts(const AVOutputStream *ost)
+{
+ const AVInputStream *ist = ost->sync_ist;
+ return (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/AV_TIME_BASE;
+}
+
+static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
+ while(bsfc){
+ AVPacket new_pkt= *pkt;
+ int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
+ &new_pkt.data, &new_pkt.size,
+ pkt->data, pkt->size,
+ pkt->flags & PKT_FLAG_KEY);
+ if(a){
+ av_free_packet(pkt);
+ new_pkt.destruct= av_destruct_packet;
+ }
+ *pkt= new_pkt;
+
+ bsfc= bsfc->next;
+ }
+
+ av_interleaved_write_frame(s, pkt);
+}
+
+#define MAX_AUDIO_PACKET_SIZE (128 * 1024)
+
+static void do_audio_out(AVFormatContext *s,
+ AVOutputStream *ost,
+ AVInputStream *ist,
+ unsigned char *buf, int size)
+{
+ uint8_t *buftmp;
+ static uint8_t *audio_buf = NULL;
+ static uint8_t *audio_out = NULL;
+ const int audio_out_size= 4*MAX_AUDIO_PACKET_SIZE;
+
+ int size_out, frame_bytes, ret;
+ AVCodecContext *enc= ost->st->codec;
+
+ /* SC: dynamic allocation of buffers */
+ if (!audio_buf)
+ audio_buf = av_malloc(2*MAX_AUDIO_PACKET_SIZE);
+ if (!audio_out)
+ audio_out = av_malloc(audio_out_size);
+ if (!audio_buf || !audio_out)
+ return; /* Should signal an error ! */
+
+ if(audio_sync_method){
+ double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
+ - av_fifo_size(&ost->fifo)/(ost->st->codec->channels * 2);
+ double idelta= delta*ist->st->codec->sample_rate / enc->sample_rate;
+ int byte_delta= ((int)idelta)*2*ist->st->codec->channels;
+
+ //FIXME resample delay
+ if(fabs(delta) > 50){
+ if(ist->is_start){
+ if(byte_delta < 0){
+ byte_delta= FFMAX(byte_delta, -size);
+ size += byte_delta;
+ buf -= byte_delta;
+ if(verbose > 2)
+ fprintf(stderr, "discarding %d audio samples\n", (int)-delta);
+ if(!size)
+ return;
+ ist->is_start=0;
+ }else{
+ static uint8_t *input_tmp= NULL;
+ input_tmp= av_realloc(input_tmp, byte_delta + size);
+
+ if(byte_delta + size <= MAX_AUDIO_PACKET_SIZE)
+ ist->is_start=0;
+ else
+ byte_delta= MAX_AUDIO_PACKET_SIZE - size;
+
+ memset(input_tmp, 0, byte_delta);
+ memcpy(input_tmp + byte_delta, buf, size);
+ buf= input_tmp;
+ size += byte_delta;
+ if(verbose > 2)
+ fprintf(stderr, "adding %d audio samples of silence\n", (int)delta);
+ }
+ }else if(audio_sync_method>1){
+ int comp= clip(delta, -audio_sync_method, audio_sync_method);
+ assert(ost->audio_resample);
+ if(verbose > 2)
+ fprintf(stderr, "compensating audio timestamp drift:%f compensation:%d in:%d\n", delta, comp, enc->sample_rate);
+// fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(&ost->fifo)/(ost->st->codec->channels * 2));
+ av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
+ }
+ }
+ }else
+ ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
+ - av_fifo_size(&ost->fifo)/(ost->st->codec->channels * 2); //FIXME wrong
+
+ if (ost->audio_resample) {
+ buftmp = audio_buf;
+ size_out = audio_resample(ost->resample,
+ (short *)buftmp, (short *)buf,
+ size / (ist->st->codec->channels * 2));
+ size_out = size_out * enc->channels * 2;
+ } else {
+ buftmp = buf;
+ size_out = size;
+ }
+
+ /* now encode as many frames as possible */
+ if (enc->frame_size > 1) {
+ /* output resampled raw samples */
+ av_fifo_write(&ost->fifo, buftmp, size_out);
+
+ frame_bytes = enc->frame_size * 2 * enc->channels;
+
+ while (av_fifo_read(&ost->fifo, audio_buf, frame_bytes) == 0) {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+
+ ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
+ (short *)audio_buf);
+ audio_size += ret;
+ pkt.stream_index= ost->index;
+ pkt.data= audio_out;
+ pkt.size= ret;
+ if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+ pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+ pkt.flags |= PKT_FLAG_KEY;
+ write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
+
+ ost->sync_opts += enc->frame_size;
+ }
+ } else {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+
+ ost->sync_opts += size_out / (2 * enc->channels);
+
+ /* output a pcm frame */
+ /* XXX: change encoding codec API to avoid this ? */
+ switch(enc->codec->id) {
+ case CODEC_ID_PCM_S32LE:
+ case CODEC_ID_PCM_S32BE:
+ case CODEC_ID_PCM_U32LE:
+ case CODEC_ID_PCM_U32BE:
+ size_out = size_out << 1;
+ break;
+ case CODEC_ID_PCM_S24LE:
+ case CODEC_ID_PCM_S24BE:
+ case CODEC_ID_PCM_U24LE:
+ case CODEC_ID_PCM_U24BE:
+ case CODEC_ID_PCM_S24DAUD:
+ size_out = size_out / 2 * 3;
+ break;
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S16BE:
+ case CODEC_ID_PCM_U16LE:
+ case CODEC_ID_PCM_U16BE:
+ break;
+ default:
+ size_out = size_out >> 1;
+ break;
+ }
+ ret = avcodec_encode_audio(enc, audio_out, size_out,
+ (short *)buftmp);
+ audio_size += ret;
+ pkt.stream_index= ost->index;
+ pkt.data= audio_out;
+ pkt.size= ret;
+ if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+ pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+ pkt.flags |= PKT_FLAG_KEY;
+ write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
+ }
+}
+
+static void pre_process_video_frame(AVInputStream *ist, AVPicture *picture, void **bufp)
+{
+ AVCodecContext *dec;
+ AVPicture *picture2;
+ AVPicture picture_tmp;
+ uint8_t *buf = 0;
+
+ dec = ist->st->codec;
+
+ /* deinterlace : must be done before any resize */
+ if (do_deinterlace || using_vhook) {
+ int size;
+
+ /* create temporary picture */
+ size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
+ buf = av_malloc(size);
+ if (!buf)
+ return;
+
+ picture2 = &picture_tmp;
+ avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
+
+ if (do_deinterlace){
+ if(avpicture_deinterlace(picture2, picture,
+ dec->pix_fmt, dec->width, dec->height) < 0) {
+ /* if error, do not deinterlace */
+ av_free(buf);
+ buf = NULL;
+ picture2 = picture;
+ }
+ } else {
+ img_copy(picture2, picture, dec->pix_fmt, dec->width, dec->height);
+ }
+ } else {
+ picture2 = picture;
+ }
+
+ frame_hook_process(picture2, dec->pix_fmt, dec->width, dec->height);
+
+ if (picture != picture2)
+ *picture = *picture2;
+ *bufp = buf;
+}
+
+/* we begin to correct av delay at this threshold */
+#define AV_DELAY_MAX 0.100
+
+static void do_subtitle_out(AVFormatContext *s,
+ AVOutputStream *ost,
+ AVInputStream *ist,
+ AVSubtitle *sub,
+ int64_t pts)
+{
+ static uint8_t *subtitle_out = NULL;
+ int subtitle_out_max_size = 65536;
+ int subtitle_out_size, nb, i;
+ AVCodecContext *enc;
+ AVPacket pkt;
+
+ if (pts == AV_NOPTS_VALUE) {
+ fprintf(stderr, "Subtitle packets must have a pts\n");
+ return;
+ }
+
+ enc = ost->st->codec;
+
+ if (!subtitle_out) {
+ subtitle_out = av_malloc(subtitle_out_max_size);
+ }
+
+ /* Note: DVB subtitle need one packet to draw them and one other
+ packet to clear them */
+ /* XXX: signal it in the codec context ? */
+ if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
+ nb = 2;
+ else
+ nb = 1;
+
+ for(i = 0; i < nb; i++) {
+ subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
+ subtitle_out_max_size, sub);
+
+ av_init_packet(&pkt);
+ pkt.stream_index = ost->index;
+ pkt.data = subtitle_out;
+ pkt.size = subtitle_out_size;
+ pkt.pts = av_rescale_q(av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q) + input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ost->st->time_base);
+ if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
+ /* XXX: the pts correction is handled here. Maybe handling
+ it in the codec would be better */
+ if (i == 0)
+ pkt.pts += 90 * sub->start_display_time;
+ else
+ pkt.pts += 90 * sub->end_display_time;
+ }
+ write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
+ }
+}
+
+static int bit_buffer_size= 1024*256;
+static uint8_t *bit_buffer= NULL;
+
+static void do_video_out(AVFormatContext *s,
+ AVOutputStream *ost,
+ AVInputStream *ist,
+ AVFrame *in_picture,
+ int *frame_size)
+{
+ int nb_frames, i, ret;
+ AVFrame *final_picture, *formatted_picture, *resampling_dst, *padding_src;
+ AVFrame picture_crop_temp, picture_pad_temp;
+ uint8_t *buf = NULL, *buf1 = NULL;
+ AVCodecContext *enc, *dec;
+
+ avcodec_get_frame_defaults(&picture_crop_temp);
+ avcodec_get_frame_defaults(&picture_pad_temp);
+
+ enc = ost->st->codec;
+ dec = ist->st->codec;
+
+ /* by default, we output a single frame */
+ nb_frames = 1;
+
+ *frame_size = 0;
+
+ if(video_sync_method){
+ double vdelta;
+ vdelta = get_sync_ipts(ost) / av_q2d(enc->time_base) - ost->sync_opts;
+ //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
+ if (vdelta < -1.1)
+ nb_frames = 0;
+ else if (vdelta > 1.1)
+ nb_frames = lrintf(vdelta);
+//fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, ost->sync_ipts, nb_frames);
+ if (nb_frames == 0){
+ ++nb_frames_drop;
+ if (verbose>2)
+ fprintf(stderr, "*** drop!\n");
+ }else if (nb_frames > 1) {
+ nb_frames_dup += nb_frames;
+ if (verbose>2)
+ fprintf(stderr, "*** %d dup!\n", nb_frames-1);
+ }
+ }else
+ ost->sync_opts= lrintf(get_sync_ipts(ost) / av_q2d(enc->time_base));
+
+ nb_frames= FFMIN(nb_frames, max_frames[CODEC_TYPE_VIDEO] - ost->frame_number);
+ if (nb_frames <= 0)
+ return;
+
+ if (ost->video_crop) {
+ if (img_crop((AVPicture *)&picture_crop_temp, (AVPicture *)in_picture, dec->pix_fmt, ost->topBand, ost->leftBand) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "error cropping picture\n");
+ goto the_end;
+ }
+ formatted_picture = &picture_crop_temp;
+ } else {
+ formatted_picture = in_picture;
+ }
+
+ final_picture = formatted_picture;
+ padding_src = formatted_picture;
+ resampling_dst = &ost->pict_tmp;
+ if (ost->video_pad) {
+ final_picture = &ost->pict_tmp;
+ if (ost->video_resample) {
+ if (img_crop((AVPicture *)&picture_pad_temp, (AVPicture *)final_picture, enc->pix_fmt, ost->padtop, ost->padleft) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "error padding picture\n");
+ goto the_end;
+ }
+ resampling_dst = &picture_pad_temp;
+ }
+ }
+
+ if (ost->video_resample) {
+ padding_src = NULL;
+ final_picture = &ost->pict_tmp;
+ sws_scale(ost->img_resample_ctx, formatted_picture->data, formatted_picture->linesize,
+ 0, ost->resample_height, resampling_dst->data, resampling_dst->linesize);
+ }
+
+ if (ost->video_pad) {
+ img_pad((AVPicture*)final_picture, (AVPicture *)padding_src,
+ enc->height, enc->width, enc->pix_fmt,
+ ost->padtop, ost->padbottom, ost->padleft, ost->padright, padcolor);
+ }
+
+ /* duplicates frame if needed */
+ for(i=0;i<nb_frames;i++) {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.stream_index= ost->index;
+
+ if (s->oformat->flags & AVFMT_RAWPICTURE) {
+ /* raw pictures are written as AVPicture structure to
+ avoid any copies. We support temorarily the older
+ method. */
+ AVFrame* old_frame = enc->coded_frame;
+ enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
+ pkt.data= (uint8_t *)final_picture;
+ pkt.size= sizeof(AVPicture);
+ if(dec->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+ pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+ if(dec->coded_frame && dec->coded_frame->key_frame)
+ pkt.flags |= PKT_FLAG_KEY;
+
+ write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
+ enc->coded_frame = old_frame;
+ } else {
+ AVFrame big_picture;
+
+ big_picture= *final_picture;
+ /* better than nothing: use input picture interlaced
+ settings */
+ big_picture.interlaced_frame = in_picture->interlaced_frame;
+ if(avctx_opts->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)){
+ if(top_field_first == -1)
+ big_picture.top_field_first = in_picture->top_field_first;
+ else
+ big_picture.top_field_first = top_field_first;
+ }
+
+ /* handles sameq here. This is not correct because it may
+ not be a global option */
+ if (same_quality) {
+ big_picture.quality = ist->st->quality;
+ }else
+ big_picture.quality = ost->st->quality;
+ if(!me_threshold)
+ big_picture.pict_type = 0;
+// big_picture.pts = AV_NOPTS_VALUE;
+ big_picture.pts= ost->sync_opts;
+// big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
+//av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
+ ret = avcodec_encode_video(enc,
+ bit_buffer, bit_buffer_size,
+ &big_picture);
+ if (ret == -1) {
+ fprintf(stderr, "Video encoding failed\n");
+ exit(1);
+ }
+ //enc->frame_number = enc->real_pict_num;
+ if(ret>0){
+ pkt.data= bit_buffer;
+ pkt.size= ret;
+ if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+ pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+/*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
+ pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
+ pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
+
+ if(enc->coded_frame && enc->coded_frame->key_frame)
+ pkt.flags |= PKT_FLAG_KEY;
+ write_frame(s, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
+ *frame_size = ret;
+ //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d",
+ // enc->frame_number-1, enc->real_pict_num, ret,
+ // enc->pict_type);
+ /* if two pass, output log */
+ if (ost->logfile && enc->stats_out) {
+ fprintf(ost->logfile, "%s", enc->stats_out);
+ }
+ }
+ }
+ ost->sync_opts++;
+ ost->frame_number++;
+ }
+ the_end:
+ av_free(buf);
+ av_free(buf1);
+}
+
+static double psnr(double d){
+ if(d==0) return INFINITY;
+ return -10.0*log(d)/log(10.0);
+}
+
+static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
+ int frame_size)
+{
+ static FILE *fvstats=NULL;
+ char filename[40];
+ time_t today2;
+ struct tm *today;
+ AVCodecContext *enc;
+ int frame_number;
+ int64_t ti;
+ double ti1, bitrate, avg_bitrate;
+
+ if (!fvstats) {
+ today2 = time(NULL);
+ today = localtime(&today2);
+ snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour,
+ today->tm_min,
+ today->tm_sec);
+ fvstats = fopen(filename,"w");
+ if (!fvstats) {
+ perror("fopen");
+ exit(1);
+ }
+ }
+
+ ti = MAXINT64;
+ enc = ost->st->codec;
+ if (enc->codec_type == CODEC_TYPE_VIDEO) {
+ frame_number = ost->frame_number;
+ fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality/(float)FF_QP2LAMBDA);
+ if (enc->flags&CODEC_FLAG_PSNR)
+ fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
+
+ fprintf(fvstats,"f_size= %6d ", frame_size);
+ /* compute pts value */
+ ti1 = ost->sync_opts * av_q2d(enc->time_base);
+ if (ti1 < 0.01)
+ ti1 = 0.01;
+
+ bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
+ avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
+ fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
+ (double)video_size / 1024, ti1, bitrate, avg_bitrate);
+ fprintf(fvstats,"type= %c\n", av_get_pict_type_char(enc->coded_frame->pict_type));
+ }
+}
+
+static void print_report(AVFormatContext **output_files,
+ AVOutputStream **ost_table, int nb_ostreams,
+ int is_last_report)
+{
+ char buf[1024];
+ AVOutputStream *ost;
+ AVFormatContext *oc, *os;
+ int64_t total_size;
+ AVCodecContext *enc;
+ int frame_number, vid, i;
+ double bitrate, ti1, pts;
+ static int64_t last_time = -1;
+ static int qp_histogram[52];
+
+ if (!is_last_report) {
+ int64_t cur_time;
+ /* display the report every 0.5 seconds */
+ cur_time = av_gettime();
+ if (last_time == -1) {
+ last_time = cur_time;
+ return;
+ }
+ if ((cur_time - last_time) < 500000)
+ return;
+ last_time = cur_time;
+ }
+
+
+ oc = output_files[0];
+
+ total_size = url_ftell(&oc->pb);
+
+ buf[0] = '\0';
+ ti1 = 1e10;
+ vid = 0;
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ os = output_files[ost->file_index];
+ enc = ost->st->codec;
+ if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ",
+ enc->coded_frame->quality/(float)FF_QP2LAMBDA);
+ }
+ if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
+ frame_number = ost->frame_number;
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d q=%3.1f ",
+ frame_number, enc->coded_frame ? enc->coded_frame->quality/(float)FF_QP2LAMBDA : -1);
+ if(is_last_report)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
+ if(qp_hist && enc->coded_frame){
+ int j;
+ int qp= lrintf(enc->coded_frame->quality/(float)FF_QP2LAMBDA);
+ if(qp>=0 && qp<sizeof(qp_histogram)/sizeof(int))
+ qp_histogram[qp]++;
+ for(j=0; j<32; j++)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j]+1)/log(2)));
+ }
+ if (enc->flags&CODEC_FLAG_PSNR){
+ int j;
+ double error, error_sum=0;
+ double scale, scale_sum=0;
+ char type[3]= {'Y','U','V'};
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
+ for(j=0; j<3; j++){
+ if(is_last_report){
+ error= enc->error[j];
+ scale= enc->width*enc->height*255.0*255.0*frame_number;
+ }else{
+ error= enc->coded_frame->error[j];
+ scale= enc->width*enc->height*255.0*255.0;
+ }
+ if(j) scale/=4;
+ error_sum += error;
+ scale_sum += scale;
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error/scale));
+ }
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum/scale_sum));
+ }
+ vid = 1;
+ }
+ /* compute min output value */
+ pts = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den;
+ if ((pts < ti1) && (pts > 0))
+ ti1 = pts;
+ }
+ if (ti1 < 0.01)
+ ti1 = 0.01;
+
+ if (verbose || is_last_report) {
+ bitrate = (double)(total_size * 8) / ti1 / 1000.0;
+
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
+ "size=%8.0fkB time=%0.1f bitrate=%6.1fkbits/s",
+ (double)total_size / 1024, ti1, bitrate);
+
+ if (verbose > 1)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
+ nb_frames_dup, nb_frames_drop);
+
+ if (verbose >= 0)
+ fprintf(stderr, "%s \r", buf);
+
+ fflush(stderr);
+ }
+
+ if (is_last_report && verbose >= 0){
+ int64_t raw= audio_size + video_size + extra_size;
+ fprintf(stderr, "\n");
+ fprintf(stderr, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
+ video_size/1024.0,
+ audio_size/1024.0,
+ extra_size/1024.0,
+ 100.0*(total_size - raw)/raw
+ );
+ }
+}
+
+/* pkt = NULL means EOF (needed to flush decoder buffers) */
+static int output_packet(AVInputStream *ist, int ist_index,
+ AVOutputStream **ost_table, int nb_ostreams,
+ const AVPacket *pkt)
+{
+ AVFormatContext *os;
+ AVOutputStream *ost;
+ uint8_t *ptr;
+ int len, ret, i;
+ uint8_t *data_buf;
+ int data_size, got_picture;
+ AVFrame picture;
+ void *buffer_to_free;
+ static unsigned int samples_size= 0;
+ static short *samples= NULL;
+ AVSubtitle subtitle, *subtitle_to_free;
+ int got_subtitle;
+
+ if(!pkt){
+ ist->pts= ist->next_pts; // needed for last packet if vsync=0
+ } else if (pkt->dts != AV_NOPTS_VALUE) { //FIXME seems redundant, as libavformat does this too
+ ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
+ } else {
+// assert(ist->pts == ist->next_pts);
+ }
+
+ if (pkt == NULL) {
+ /* EOF handling */
+ ptr = NULL;
+ len = 0;
+ goto handle_eof;
+ }
+
+ len = pkt->size;
+ ptr = pkt->data;
+ while (len > 0) {
+ handle_eof:
+ /* decode the packet if needed */
+ data_buf = NULL; /* fail safe */
+ data_size = 0;
+ subtitle_to_free = NULL;
+ if (ist->decoding_needed) {
+ switch(ist->st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:{
+ if(pkt)
+ samples= av_fast_realloc(samples, &samples_size, FFMAX(pkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE));
+ /* XXX: could avoid copy if PCM 16 bits with same
+ endianness as CPU */
+ ret = avcodec_decode_audio(ist->st->codec, samples, &data_size,
+ ptr, len);
+ if (ret < 0)
+ goto fail_decode;
+ ptr += ret;
+ len -= ret;
+ /* Some bug in mpeg audio decoder gives */
+ /* data_size < 0, it seems they are overflows */
+ if (data_size <= 0) {
+ /* no audio frame */
+ continue;
+ }
+ data_buf = (uint8_t *)samples;
+ ist->next_pts += ((int64_t)AV_TIME_BASE/2 * data_size) /
+ (ist->st->codec->sample_rate * ist->st->codec->channels);
+ break;}
+ case CODEC_TYPE_VIDEO:
+ data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
+ /* XXX: allocate picture correctly */
+ avcodec_get_frame_defaults(&picture);
+
+ ret = avcodec_decode_video(ist->st->codec,
+ &picture, &got_picture, ptr, len);
+ ist->st->quality= picture.quality;
+ if (ret < 0)
+ goto fail_decode;
+ if (!got_picture) {
+ /* no picture yet */
+ goto discard_packet;
+ }
+ if (ist->st->codec->time_base.num != 0) {
+ ist->next_pts += ((int64_t)AV_TIME_BASE *
+ ist->st->codec->time_base.num) /
+ ist->st->codec->time_base.den;
+ }
+ len = 0;
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ ret = avcodec_decode_subtitle(ist->st->codec,
+ &subtitle, &got_subtitle, ptr, len);
+ if (ret < 0)
+ goto fail_decode;
+ if (!got_subtitle) {
+ goto discard_packet;
+ }
+ subtitle_to_free = &subtitle;
+ len = 0;
+ break;
+ default:
+ goto fail_decode;
+ }
+ } else {
+ switch(ist->st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
+ (ist->st->codec->sample_rate * ist->st->codec->channels);
+ break;
+ case CODEC_TYPE_VIDEO:
+ if (ist->st->codec->time_base.num != 0) {
+ ist->next_pts += ((int64_t)AV_TIME_BASE *
+ ist->st->codec->time_base.num) /
+ ist->st->codec->time_base.den;
+ }
+ break;
+ }
+ data_buf = ptr;
+ data_size = len;
+ ret = len;
+ len = 0;
+ }
+
+ buffer_to_free = NULL;
+ if (ist->st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ pre_process_video_frame(ist, (AVPicture *)&picture,
+ &buffer_to_free);
+ }
+
+ // preprocess audio (volume)
+ if (ist->st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ if (audio_volume != 256) {
+ short *volp;
+ volp = samples;
+ for(i=0;i<(data_size / sizeof(short));i++) {
+ int v = ((*volp) * audio_volume + 128) >> 8;
+ if (v < -32768) v = -32768;
+ if (v > 32767) v = 32767;
+ *volp++ = v;
+ }
+ }
+ }
+
+ /* frame rate emulation */
+ if (ist->st->codec->rate_emu) {
+ int64_t pts = av_rescale((int64_t) ist->frame * ist->st->codec->time_base.num, 1000000, ist->st->codec->time_base.den);
+ int64_t now = av_gettime() - ist->start;
+ if (pts > now)
+ usleep(pts - now);
+
+ ist->frame++;
+ }
+
+#if 0
+ /* mpeg PTS deordering : if it is a P or I frame, the PTS
+ is the one of the next displayed one */
+ /* XXX: add mpeg4 too ? */
+ if (ist->st->codec->codec_id == CODEC_ID_MPEG1VIDEO) {
+ if (ist->st->codec->pict_type != B_TYPE) {
+ int64_t tmp;
+ tmp = ist->last_ip_pts;
+ ist->last_ip_pts = ist->frac_pts.val;
+ ist->frac_pts.val = tmp;
+ }
+ }
+#endif
+ /* if output time reached then transcode raw format,
+ encode packets and output them */
+ if (start_time == 0 || ist->pts >= start_time)
+ for(i=0;i<nb_ostreams;i++) {
+ int frame_size;
+
+ ost = ost_table[i];
+ if (ost->source_index == ist_index) {
+ os = output_files[ost->file_index];
+
+#if 0
+ printf("%d: got pts=%0.3f %0.3f\n", i,
+ (double)pkt->pts / AV_TIME_BASE,
+ ((double)ist->pts / AV_TIME_BASE) -
+ ((double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den));
+#endif
+ /* set the input output pts pairs */
+ //ost->sync_ipts = (double)(ist->pts + input_files_ts_offset[ist->file_index] - start_time)/ AV_TIME_BASE;
+
+ if (ost->encoding_needed) {
+ switch(ost->st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ do_audio_out(os, ost, ist, data_buf, data_size);
+ break;
+ case CODEC_TYPE_VIDEO:
+ do_video_out(os, ost, ist, &picture, &frame_size);
+ video_size += frame_size;
+ if (do_vstats && frame_size)
+ do_video_stats(os, ost, frame_size);
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ do_subtitle_out(os, ost, ist, &subtitle,
+ pkt->pts);
+ break;
+ default:
+ av_abort();
+ }
+ } else {
+ AVFrame avframe; //FIXME/XXX remove this
+ AVPacket opkt;
+ av_init_packet(&opkt);
+
+ /* no reencoding needed : output the packet directly */
+ /* force the input stream PTS */
+
+ avcodec_get_frame_defaults(&avframe);
+ ost->st->codec->coded_frame= &avframe;
+ avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
+
+ if(ost->st->codec->codec_type == CODEC_TYPE_AUDIO)
+ audio_size += data_size;
+ else if (ost->st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ video_size += data_size;
+ ost->sync_opts++;
+ }
+
+ opkt.stream_index= ost->index;
+ if(pkt->pts != AV_NOPTS_VALUE)
+ opkt.pts= av_rescale_q(av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q) + input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ost->st->time_base);
+ else
+ opkt.pts= AV_NOPTS_VALUE;
+
+ {
+ int64_t dts;
+ if (pkt->dts == AV_NOPTS_VALUE)
+ dts = ist->next_pts;
+ else
+ dts= av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
+ opkt.dts= av_rescale_q(dts + input_files_ts_offset[ist->file_index], AV_TIME_BASE_Q, ost->st->time_base);
+ }
+ opkt.flags= pkt->flags;
+
+ //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
+ if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & PKT_FLAG_KEY))
+ opkt.destruct= av_destruct_packet;
+
+ write_frame(os, &opkt, ost->st->codec, bitstream_filters[ost->file_index][pkt->stream_index]);
+ ost->st->codec->frame_number++;
+ ost->frame_number++;
+ av_free_packet(&opkt);
+ }
+ }
+ }
+ av_free(buffer_to_free);
+ /* XXX: allocate the subtitles in the codec ? */
+ if (subtitle_to_free) {
+ if (subtitle_to_free->rects != NULL) {
+ for (i = 0; i < subtitle_to_free->num_rects; i++) {
+ av_free(subtitle_to_free->rects[i].bitmap);
+ av_free(subtitle_to_free->rects[i].rgba_palette);
+ }
+ av_freep(&subtitle_to_free->rects);
+ }
+ subtitle_to_free->num_rects = 0;
+ subtitle_to_free = NULL;
+ }
+ }
+ discard_packet:
+ if (pkt == NULL) {
+ /* EOF handling */
+
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ if (ost->source_index == ist_index) {
+ AVCodecContext *enc= ost->st->codec;
+ os = output_files[ost->file_index];
+
+ if(ost->st->codec->codec_type == CODEC_TYPE_AUDIO && enc->frame_size <=1)
+ continue;
+ if(ost->st->codec->codec_type == CODEC_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
+ continue;
+
+ if (ost->encoding_needed) {
+ for(;;) {
+ AVPacket pkt;
+ int fifo_bytes;
+ av_init_packet(&pkt);
+ pkt.stream_index= ost->index;
+
+ switch(ost->st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ fifo_bytes = av_fifo_size(&ost->fifo);
+ ret = 0;
+ /* encode any samples remaining in fifo */
+ if(fifo_bytes > 0 && enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
+ int fs_tmp = enc->frame_size;
+ enc->frame_size = fifo_bytes / (2 * enc->channels);
+ if(av_fifo_read(&ost->fifo, (uint8_t *)samples, fifo_bytes) == 0) {
+ ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, samples);
+ }
+ enc->frame_size = fs_tmp;
+ }
+ if(ret <= 0) {
+ ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
+ }
+ audio_size += ret;
+ pkt.flags |= PKT_FLAG_KEY;
+ break;
+ case CODEC_TYPE_VIDEO:
+ ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
+ video_size += ret;
+ if(enc->coded_frame && enc->coded_frame->key_frame)
+ pkt.flags |= PKT_FLAG_KEY;
+ if (ost->logfile && enc->stats_out) {
+ fprintf(ost->logfile, "%s", enc->stats_out);
+ }
+ break;
+ default:
+ ret=-1;
+ }
+
+ if(ret<=0)
+ break;
+ pkt.data= bit_buffer;
+ pkt.size= ret;
+ if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
+ pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
+ write_frame(os, &pkt, ost->st->codec, bitstream_filters[ost->file_index][pkt.stream_index]);
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+ fail_decode:
+ return -1;
+}
+
+
+/*
+ * The following code is the main loop of the file converter
+ */
+static int av_encode(AVFormatContext **output_files,
+ int nb_output_files,
+ AVFormatContext **input_files,
+ int nb_input_files,
+ AVStreamMap *stream_maps, int nb_stream_maps)
+{
+ int ret, i, j, k, n, nb_istreams = 0, nb_ostreams = 0;
+ AVFormatContext *is, *os;
+ AVCodecContext *codec, *icodec;
+ AVOutputStream *ost, **ost_table = NULL;
+ AVInputStream *ist, **ist_table = NULL;
+ AVInputFile *file_table;
+ AVFormatContext *stream_no_data;
+ int key;
+
+ file_table= (AVInputFile*) av_mallocz(nb_input_files * sizeof(AVInputFile));
+ if (!file_table)
+ goto fail;
+
+ /* input stream init */
+ j = 0;
+ for(i=0;i<nb_input_files;i++) {
+ is = input_files[i];
+ file_table[i].ist_index = j;
+ file_table[i].nb_streams = is->nb_streams;
+ j += is->nb_streams;
+ }
+ nb_istreams = j;
+
+ ist_table = av_mallocz(nb_istreams * sizeof(AVInputStream *));
+ if (!ist_table)
+ goto fail;
+
+ for(i=0;i<nb_istreams;i++) {
+ ist = av_mallocz(sizeof(AVInputStream));
+ if (!ist)
+ goto fail;
+ ist_table[i] = ist;
+ }
+ j = 0;
+ for(i=0;i<nb_input_files;i++) {
+ is = input_files[i];
+ for(k=0;k<is->nb_streams;k++) {
+ ist = ist_table[j++];
+ ist->st = is->streams[k];
+ ist->file_index = i;
+ ist->index = k;
+ ist->discard = 1; /* the stream is discarded by default
+ (changed later) */
+
+ if (ist->st->codec->rate_emu) {
+ ist->start = av_gettime();
+ ist->frame = 0;
+ }
+ }
+ }
+
+ /* output stream init */
+ nb_ostreams = 0;
+ for(i=0;i<nb_output_files;i++) {
+ os = output_files[i];
+ if (!os->nb_streams) {
+ fprintf(stderr, "Output file does not contain any stream\n");
+ exit(1);
+ }
+ nb_ostreams += os->nb_streams;
+ }
+ if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) {
+ fprintf(stderr, "Number of stream maps must match number of output streams\n");
+ exit(1);
+ }
+
+ /* Sanity check the mapping args -- do the input files & streams exist? */
+ for(i=0;i<nb_stream_maps;i++) {
+ int fi = stream_maps[i].file_index;
+ int si = stream_maps[i].stream_index;
+
+ if (fi < 0 || fi > nb_input_files - 1 ||
+ si < 0 || si > file_table[fi].nb_streams - 1) {
+ fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si);
+ exit(1);
+ }
+ fi = stream_maps[i].sync_file_index;
+ si = stream_maps[i].sync_stream_index;
+ if (fi < 0 || fi > nb_input_files - 1 ||
+ si < 0 || si > file_table[fi].nb_streams - 1) {
+ fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si);
+ exit(1);
+ }
+ }
+
+ ost_table = av_mallocz(sizeof(AVOutputStream *) * nb_ostreams);
+ if (!ost_table)
+ goto fail;
+ for(i=0;i<nb_ostreams;i++) {
+ ost = av_mallocz(sizeof(AVOutputStream));
+ if (!ost)
+ goto fail;
+ ost_table[i] = ost;
+ }
+
+ n = 0;
+ for(k=0;k<nb_output_files;k++) {
+ os = output_files[k];
+ for(i=0;i<os->nb_streams;i++) {
+ int found;
+ ost = ost_table[n++];
+ ost->file_index = k;
+ ost->index = i;
+ ost->st = os->streams[i];
+ if (nb_stream_maps > 0) {
+ ost->source_index = file_table[stream_maps[n-1].file_index].ist_index +
+ stream_maps[n-1].stream_index;
+
+ /* Sanity check that the stream types match */
+ if (ist_table[ost->source_index]->st->codec->codec_type != ost->st->codec->codec_type) {
+ fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n",
+ stream_maps[n-1].file_index, stream_maps[n-1].stream_index,
+ ost->file_index, ost->index);
+ exit(1);
+ }
+
+ } else {
+ /* get corresponding input stream index : we select the first one with the right type */
+ found = 0;
+ for(j=0;j<nb_istreams;j++) {
+ ist = ist_table[j];
+ if (ist->discard &&
+ ist->st->codec->codec_type == ost->st->codec->codec_type) {
+ ost->source_index = j;
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* try again and reuse existing stream */
+ for(j=0;j<nb_istreams;j++) {
+ ist = ist_table[j];
+ if (ist->st->codec->codec_type == ost->st->codec->codec_type) {
+ ost->source_index = j;
+ found = 1;
+ }
+ }
+ if (!found) {
+ fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n",
+ ost->file_index, ost->index);
+ exit(1);
+ }
+ }
+ }
+ ist = ist_table[ost->source_index];
+ ist->discard = 0;
+ ost->sync_ist = (nb_stream_maps > 0) ?
+ ist_table[file_table[stream_maps[n-1].sync_file_index].ist_index +
+ stream_maps[n-1].sync_stream_index] : ist;
+ }
+ }
+
+ /* for each output stream, we compute the right encoding parameters */
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ ist = ist_table[ost->source_index];
+
+ codec = ost->st->codec;
+ icodec = ist->st->codec;
+
+ if (ost->st->stream_copy) {
+ /* if stream_copy is selected, no need to decode or encode */
+ codec->codec_id = icodec->codec_id;
+ codec->codec_type = icodec->codec_type;
+ if(!codec->codec_tag) codec->codec_tag = icodec->codec_tag;
+ codec->bit_rate = icodec->bit_rate;
+ codec->extradata= icodec->extradata;
+ codec->extradata_size= icodec->extradata_size;
+ if(av_q2d(icodec->time_base) > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/1000)
+ codec->time_base = icodec->time_base;
+ else
+ codec->time_base = ist->st->time_base;
+ switch(codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ codec->sample_rate = icodec->sample_rate;
+ codec->channels = icodec->channels;
+ codec->frame_size = icodec->frame_size;
+ codec->block_align= icodec->block_align;
+ break;
+ case CODEC_TYPE_VIDEO:
+ codec->pix_fmt = icodec->pix_fmt;
+ codec->width = icodec->width;
+ codec->height = icodec->height;
+ codec->has_b_frames = icodec->has_b_frames;
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ break;
+ default:
+ av_abort();
+ }
+ } else {
+ switch(codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (av_fifo_init(&ost->fifo, 2 * MAX_AUDIO_PACKET_SIZE))
+ goto fail;
+
+ if (codec->channels == icodec->channels &&
+ codec->sample_rate == icodec->sample_rate) {
+ ost->audio_resample = 0;
+ } else {
+ if (codec->channels != icodec->channels &&
+ (icodec->codec_id == CODEC_ID_AC3 ||
+ icodec->codec_id == CODEC_ID_DTS)) {
+ /* Special case for 5:1 AC3 and DTS input */
+ /* and mono or stereo output */
+ /* Request specific number of channels */
+ icodec->channels = codec->channels;
+ if (codec->sample_rate == icodec->sample_rate)
+ ost->audio_resample = 0;
+ else {
+ ost->audio_resample = 1;
+ }
+ } else {
+ ost->audio_resample = 1;
+ }
+ }
+ if(audio_sync_method>1)
+ ost->audio_resample = 1;
+
+ if(ost->audio_resample){
+ ost->resample = audio_resample_init(codec->channels, icodec->channels,
+ codec->sample_rate, icodec->sample_rate);
+ if(!ost->resample){
+ printf("Can't resample. Aborting.\n");
+ av_abort();
+ }
+ }
+ ist->decoding_needed = 1;
+ ost->encoding_needed = 1;
+ break;
+ case CODEC_TYPE_VIDEO:
+ ost->video_crop = ((frame_leftBand + frame_rightBand + frame_topBand + frame_bottomBand) != 0);
+ ost->video_pad = ((frame_padleft + frame_padright + frame_padtop + frame_padbottom) != 0);
+ ost->video_resample = ((codec->width != icodec->width -
+ (frame_leftBand + frame_rightBand) +
+ (frame_padleft + frame_padright)) ||
+ (codec->height != icodec->height -
+ (frame_topBand + frame_bottomBand) +
+ (frame_padtop + frame_padbottom)) ||
+ (codec->pix_fmt != icodec->pix_fmt));
+ if (ost->video_crop) {
+ ost->topBand = frame_topBand;
+ ost->leftBand = frame_leftBand;
+ }
+ if (ost->video_pad) {
+ ost->padtop = frame_padtop;
+ ost->padleft = frame_padleft;
+ ost->padbottom = frame_padbottom;
+ ost->padright = frame_padright;
+ if (!ost->video_resample) {
+ avcodec_get_frame_defaults(&ost->pict_tmp);
+ if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, codec->pix_fmt,
+ codec->width, codec->height ) )
+ goto fail;
+ }
+ }
+ if (ost->video_resample) {
+ avcodec_get_frame_defaults(&ost->pict_tmp);
+ if( avpicture_alloc( (AVPicture*)&ost->pict_tmp, codec->pix_fmt,
+ codec->width, codec->height ) )
+ goto fail;
+
+ ost->img_resample_ctx = sws_getContext(
+ icodec->width - (frame_leftBand + frame_rightBand),
+ icodec->height - (frame_topBand + frame_bottomBand),
+ icodec->pix_fmt,
+ codec->width - (frame_padleft + frame_padright),
+ codec->height - (frame_padtop + frame_padbottom),
+ codec->pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ost->img_resample_ctx == NULL) {
+ fprintf(stderr, "Cannot get resampling context\n");
+ exit(1);
+ }
+ ost->resample_height = icodec->height - (frame_topBand + frame_bottomBand);
+ }
+ ost->encoding_needed = 1;
+ ist->decoding_needed = 1;
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ ost->encoding_needed = 1;
+ ist->decoding_needed = 1;
+ break;
+ default:
+ av_abort();
+ break;
+ }
+ /* two pass mode */
+ if (ost->encoding_needed &&
+ (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
+ char logfilename[1024];
+ FILE *f;
+ int size;
+ char *logbuffer;
+
+ snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
+ pass_logfilename ?
+ pass_logfilename : DEFAULT_PASS_LOGFILENAME, i);
+ if (codec->flags & CODEC_FLAG_PASS1) {
+ f = fopen(logfilename, "w");
+ if (!f) {
+ perror(logfilename);
+ exit(1);
+ }
+ ost->logfile = f;
+ } else {
+ /* read the log file */
+ f = fopen(logfilename, "r");
+ if (!f) {
+ perror(logfilename);
+ exit(1);
+ }
+ fseek(f, 0, SEEK_END);
+ size = ftell(f);
+ fseek(f, 0, SEEK_SET);
+ logbuffer = av_malloc(size + 1);
+ if (!logbuffer) {
+ fprintf(stderr, "Could not allocate log buffer\n");
+ exit(1);
+ }
+ size = fread(logbuffer, 1, size, f);
+ fclose(f);
+ logbuffer[size] = '\0';
+ codec->stats_in = logbuffer;
+ }
+ }
+ }
+ if(codec->codec_type == CODEC_TYPE_VIDEO){
+ int size= codec->width * codec->height;
+ bit_buffer_size= FFMAX(bit_buffer_size, 4*size);
+ }
+ }
+
+ if (!bit_buffer)
+ bit_buffer = av_malloc(bit_buffer_size);
+ if (!bit_buffer)
+ goto fail;
+
+ /* dump the file output parameters - cannot be done before in case
+ of stream copy */
+ for(i=0;i<nb_output_files;i++) {
+ dump_format(output_files[i], i, output_files[i]->filename, 1);
+ }
+
+ /* dump the stream mapping */
+ if (verbose >= 0) {
+ fprintf(stderr, "Stream mapping:\n");
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ fprintf(stderr, " Stream #%d.%d -> #%d.%d",
+ ist_table[ost->source_index]->file_index,
+ ist_table[ost->source_index]->index,
+ ost->file_index,
+ ost->index);
+ if (ost->sync_ist != ist_table[ost->source_index])
+ fprintf(stderr, " [sync #%d.%d]",
+ ost->sync_ist->file_index,
+ ost->sync_ist->index);
+ fprintf(stderr, "\n");
+ }
+ }
+
+ /* open each encoder */
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ if (ost->encoding_needed) {
+ AVCodec *codec;
+ codec = avcodec_find_encoder(ost->st->codec->codec_id);
+ if (!codec) {
+ fprintf(stderr, "Unsupported codec for output stream #%d.%d\n",
+ ost->file_index, ost->index);
+ exit(1);
+ }
+ if (avcodec_open(ost->st->codec, codec) < 0) {
+ fprintf(stderr, "Error while opening codec for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height\n",
+ ost->file_index, ost->index);
+ exit(1);
+ }
+ extra_size += ost->st->codec->extradata_size;
+ }
+ }
+
+ /* open each decoder */
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ if (ist->decoding_needed) {
+ AVCodec *codec;
+ codec = avcodec_find_decoder(ist->st->codec->codec_id);
+ if (!codec) {
+ fprintf(stderr, "Unsupported codec (id=%d) for input stream #%d.%d\n",
+ ist->st->codec->codec_id, ist->file_index, ist->index);
+ exit(1);
+ }
+ if (avcodec_open(ist->st->codec, codec) < 0) {
+ fprintf(stderr, "Error while opening codec for input stream #%d.%d\n",
+ ist->file_index, ist->index);
+ exit(1);
+ }
+ //if (ist->st->codec->codec_type == CODEC_TYPE_VIDEO)
+ // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD;
+ }
+ }
+
+ /* init pts */
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ is = input_files[ist->file_index];
+ ist->pts = 0;
+ ist->next_pts = av_rescale_q(ist->st->start_time, ist->st->time_base, AV_TIME_BASE_Q);
+ if(ist->st->start_time == AV_NOPTS_VALUE)
+ ist->next_pts=0;
+ if(input_files_ts_offset[ist->file_index])
+ ist->next_pts= AV_NOPTS_VALUE;
+ ist->is_start = 1;
+ }
+
+ /* compute buffer size max (should use a complete heuristic) */
+ for(i=0;i<nb_input_files;i++) {
+ file_table[i].buffer_size_max = 2048;
+ }
+
+ /* set meta data information from input file if required */
+ for (i=0;i<nb_meta_data_maps;i++) {
+ AVFormatContext *out_file;
+ AVFormatContext *in_file;
+
+ int out_file_index = meta_data_maps[i].out_file;
+ int in_file_index = meta_data_maps[i].in_file;
+ if ( out_file_index < 0 || out_file_index >= nb_output_files ) {
+ fprintf(stderr, "Invalid output file index %d map_meta_data(%d,%d)\n", out_file_index, out_file_index, in_file_index);
+ ret = -EINVAL;
+ goto fail;
+ }
+ if ( in_file_index < 0 || in_file_index >= nb_input_files ) {
+ fprintf(stderr, "Invalid input file index %d map_meta_data(%d,%d)\n", in_file_index, out_file_index, in_file_index);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ out_file = output_files[out_file_index];
+ in_file = input_files[in_file_index];
+
+ strcpy(out_file->title, in_file->title);
+ strcpy(out_file->author, in_file->author);
+ strcpy(out_file->copyright, in_file->copyright);
+ strcpy(out_file->comment, in_file->comment);
+ strcpy(out_file->album, in_file->album);
+ out_file->year = in_file->year;
+ out_file->track = in_file->track;
+ strcpy(out_file->genre, in_file->genre);
+ }
+
+ /* open files and write file headers */
+ for(i=0;i<nb_output_files;i++) {
+ os = output_files[i];
+ if (av_write_header(os) < 0) {
+ fprintf(stderr, "Could not write header for output file #%d (incorrect codec parameters ?)\n", i);
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if ( !using_stdin && verbose >= 0) {
+ fprintf(stderr, "Press [q] to stop encoding\n");
+ url_set_interrupt_cb(decode_interrupt_cb);
+ }
+ term_init();
+
+ stream_no_data = 0;
+ key = -1;
+
+ for(; received_sigterm == 0;) {
+ int file_index, ist_index;
+ AVPacket pkt;
+ double ipts_min;
+ double opts_min;
+
+ redo:
+ ipts_min= 1e100;
+ opts_min= 1e100;
+ /* if 'q' pressed, exits */
+ if (!using_stdin) {
+ if (q_pressed)
+ break;
+ /* read_key() returns 0 on EOF */
+ key = read_key();
+ if (key == 'q')
+ break;
+ }
+
+ /* select the stream that we must read now by looking at the
+ smallest output pts */
+ file_index = -1;
+ for(i=0;i<nb_ostreams;i++) {
+ double ipts, opts;
+ ost = ost_table[i];
+ os = output_files[ost->file_index];
+ ist = ist_table[ost->source_index];
+ if(ost->st->codec->codec_type == CODEC_TYPE_VIDEO)
+ opts = ost->sync_opts * av_q2d(ost->st->codec->time_base);
+ else
+ opts = ost->st->pts.val * av_q2d(ost->st->time_base);
+ ipts = (double)ist->pts;
+ if (!file_table[ist->file_index].eof_reached){
+ if(ipts < ipts_min) {
+ ipts_min = ipts;
+ if(input_sync ) file_index = ist->file_index;
+ }
+ if(opts < opts_min) {
+ opts_min = opts;
+ if(!input_sync) file_index = ist->file_index;
+ }
+ }
+ if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){
+ file_index= -1;
+ break;
+ }
+ }
+ /* if none, if is finished */
+ if (file_index < 0) {
+ break;
+ }
+
+ /* finish if recording time exhausted */
+ if (recording_time > 0 && opts_min >= (recording_time / 1000000.0))
+ break;
+
+ /* finish if limit size exhausted */
+ if (limit_filesize != 0 && (limit_filesize * 1024) < url_ftell(&output_files[0]->pb))
+ break;
+
+ /* read a frame from it and output it in the fifo */
+ is = input_files[file_index];
+ if (av_read_frame(is, &pkt) < 0) {
+ file_table[file_index].eof_reached = 1;
+ if (opt_shortest) break; else continue; //
+ }
+
+ if (!pkt.size) {
+ stream_no_data = is;
+ } else {
+ stream_no_data = 0;
+ }
+ if (do_pkt_dump) {
+ av_pkt_dump(stdout, &pkt, do_hex_dump);
+ }
+ /* the following test is needed in case new streams appear
+ dynamically in stream : we ignore them */
+ if (pkt.stream_index >= file_table[file_index].nb_streams)
+ goto discard_packet;
+ ist_index = file_table[file_index].ist_index + pkt.stream_index;
+ ist = ist_table[ist_index];
+ if (ist->discard)
+ goto discard_packet;
+
+// fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files_ts_offset[ist->file_index], ist->st->codec->codec_type);
+ if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE) {
+ int64_t delta= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q) - ist->next_pts;
+ if(FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE && !copy_ts){
+ input_files_ts_offset[ist->file_index]-= delta;
+ if (verbose > 2)
+ fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files_ts_offset[ist->file_index]);
+ for(i=0; i<file_table[file_index].nb_streams; i++){
+ int index= file_table[file_index].ist_index + i;
+ ist_table[index]->next_pts += delta;
+ ist_table[index]->is_start=1;
+ }
+ }
+ }
+
+ //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->index, pkt.size);
+ if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
+
+ if (verbose >= 0)
+ fprintf(stderr, "Error while decoding stream #%d.%d\n",
+ ist->file_index, ist->index);
+
+ av_free_packet(&pkt);
+ goto redo;
+ }
+
+ discard_packet:
+ av_free_packet(&pkt);
+
+ /* dump report by using the output first video and audio streams */
+ print_report(output_files, ost_table, nb_ostreams, 0);
+ }
+
+ /* at the end of stream, we must flush the decoder buffers */
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ if (ist->decoding_needed) {
+ output_packet(ist, i, ost_table, nb_ostreams, NULL);
+ }
+ }
+
+ term_exit();
+
+ /* write the trailer if needed and close file */
+ for(i=0;i<nb_output_files;i++) {
+ os = output_files[i];
+ av_write_trailer(os);
+ }
+
+ /* dump report by using the first video and audio streams */
+ print_report(output_files, ost_table, nb_ostreams, 1);
+
+ /* close each encoder */
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ if (ost->encoding_needed) {
+ av_freep(&ost->st->codec->stats_in);
+ avcodec_close(ost->st->codec);
+ }
+ }
+
+ /* close each decoder */
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ if (ist->decoding_needed) {
+ avcodec_close(ist->st->codec);
+ }
+ }
+
+ /* finished ! */
+
+ ret = 0;
+ fail1:
+ av_freep(&bit_buffer);
+ av_free(file_table);
+
+ if (ist_table) {
+ for(i=0;i<nb_istreams;i++) {
+ ist = ist_table[i];
+ av_free(ist);
+ }
+ av_free(ist_table);
+ }
+ if (ost_table) {
+ for(i=0;i<nb_ostreams;i++) {
+ ost = ost_table[i];
+ if (ost) {
+ if (ost->logfile) {
+ fclose(ost->logfile);
+ ost->logfile = NULL;
+ }
+ av_fifo_free(&ost->fifo); /* works even if fifo is not
+ initialized but set to zero */
+ av_free(ost->pict_tmp.data[0]);
+ if (ost->video_resample)
+ sws_freeContext(ost->img_resample_ctx);
+ if (ost->audio_resample)
+ audio_resample_close(ost->resample);
+ av_free(ost);
+ }
+ }
+ av_free(ost_table);
+ }
+ return ret;
+ fail:
+ ret = -ENOMEM;
+ goto fail1;
+}
+
+#if 0
+int file_read(const char *filename)
+{
+ URLContext *h;
+ unsigned char buffer[1024];
+ int len, i;
+
+ if (url_open(&h, filename, O_RDONLY) < 0) {
+ printf("could not open '%s'\n", filename);
+ return -1;
+ }
+ for(;;) {
+ len = url_read(h, buffer, sizeof(buffer));
+ if (len <= 0)
+ break;
+ for(i=0;i<len;i++) putchar(buffer[i]);
+ }
+ url_close(h);
+ return 0;
+}
+#endif
+
+static void opt_format(const char *arg)
+{
+ /* compatibility stuff for pgmyuv */
+ if (!strcmp(arg, "pgmyuv")) {
+ pgmyuv_compatibility_hack=1;
+// opt_image_format(arg);
+ arg = "image2";
+ }
+
+ file_iformat = av_find_input_format(arg);
+ file_oformat = guess_format(arg, NULL, NULL);
+ if (!file_iformat && !file_oformat) {
+ fprintf(stderr, "Unknown input or output format: %s\n", arg);
+ exit(1);
+ }
+}
+
+static void opt_video_rc_eq(char *arg)
+{
+ video_rc_eq = arg;
+}
+
+static void opt_video_rc_override_string(char *arg)
+{
+ video_rc_override_string = arg;
+}
+
+static void opt_me_threshold(const char *arg)
+{
+ me_threshold = atoi(arg);
+}
+
+static void opt_verbose(const char *arg)
+{
+ verbose = atoi(arg);
+ av_log_set_level(atoi(arg));
+}
+
+static void opt_frame_rate(const char *arg)
+{
+ if (parse_frame_rate(&frame_rate, &frame_rate_base, arg) < 0) {
+ fprintf(stderr, "Incorrect frame rate\n");
+ exit(1);
+ }
+}
+
+static void opt_frame_crop_top(const char *arg)
+{
+ frame_topBand = atoi(arg);
+ if (frame_topBand < 0) {
+ fprintf(stderr, "Incorrect top crop size\n");
+ exit(1);
+ }
+ if ((frame_topBand % 2) != 0) {
+ fprintf(stderr, "Top crop size must be a multiple of 2\n");
+ exit(1);
+ }
+ if ((frame_topBand) >= frame_height){
+ fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
+ exit(1);
+ }
+ frame_height -= frame_topBand;
+}
+
+static void opt_frame_crop_bottom(const char *arg)
+{
+ frame_bottomBand = atoi(arg);
+ if (frame_bottomBand < 0) {
+ fprintf(stderr, "Incorrect bottom crop size\n");
+ exit(1);
+ }
+ if ((frame_bottomBand % 2) != 0) {
+ fprintf(stderr, "Bottom crop size must be a multiple of 2\n");
+ exit(1);
+ }
+ if ((frame_bottomBand) >= frame_height){
+ fprintf(stderr, "Vertical crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
+ exit(1);
+ }
+ frame_height -= frame_bottomBand;
+}
+
+static void opt_frame_crop_left(const char *arg)
+{
+ frame_leftBand = atoi(arg);
+ if (frame_leftBand < 0) {
+ fprintf(stderr, "Incorrect left crop size\n");
+ exit(1);
+ }
+ if ((frame_leftBand % 2) != 0) {
+ fprintf(stderr, "Left crop size must be a multiple of 2\n");
+ exit(1);
+ }
+ if ((frame_leftBand) >= frame_width){
+ fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
+ exit(1);
+ }
+ frame_width -= frame_leftBand;
+}
+
+static void opt_frame_crop_right(const char *arg)
+{
+ frame_rightBand = atoi(arg);
+ if (frame_rightBand < 0) {
+ fprintf(stderr, "Incorrect right crop size\n");
+ exit(1);
+ }
+ if ((frame_rightBand % 2) != 0) {
+ fprintf(stderr, "Right crop size must be a multiple of 2\n");
+ exit(1);
+ }
+ if ((frame_rightBand) >= frame_width){
+ fprintf(stderr, "Horizontal crop dimensions are outside the range of the original image.\nRemember to crop first and scale second.\n");
+ exit(1);
+ }
+ frame_width -= frame_rightBand;
+}
+
+static void opt_frame_size(const char *arg)
+{
+ if (parse_image_size(&frame_width, &frame_height, arg) < 0) {
+ fprintf(stderr, "Incorrect frame size\n");
+ exit(1);
+ }
+ if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
+ fprintf(stderr, "Frame size must be a multiple of 2\n");
+ exit(1);
+ }
+}
+
+
+#define SCALEBITS 10
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define RGB_TO_Y(r, g, b) \
+((FIX(0.29900) * (r) + FIX(0.58700) * (g) + \
+ FIX(0.11400) * (b) + ONE_HALF) >> SCALEBITS)
+
+#define RGB_TO_U(r1, g1, b1, shift)\
+(((- FIX(0.16874) * r1 - FIX(0.33126) * g1 + \
+ FIX(0.50000) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_V(r1, g1, b1, shift)\
+(((FIX(0.50000) * r1 - FIX(0.41869) * g1 - \
+ FIX(0.08131) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+static void opt_pad_color(const char *arg) {
+ /* Input is expected to be six hex digits similar to
+ how colors are expressed in html tags (but without the #) */
+ int rgb = strtol(arg, NULL, 16);
+ int r,g,b;
+
+ r = (rgb >> 16);
+ g = ((rgb >> 8) & 255);
+ b = (rgb & 255);
+
+ padcolor[0] = RGB_TO_Y(r,g,b);
+ padcolor[1] = RGB_TO_U(r,g,b,0);
+ padcolor[2] = RGB_TO_V(r,g,b,0);
+}
+
+static void opt_frame_pad_top(const char *arg)
+{
+ frame_padtop = atoi(arg);
+ if (frame_padtop < 0) {
+ fprintf(stderr, "Incorrect top pad size\n");
+ exit(1);
+ }
+ if ((frame_padtop % 2) != 0) {
+ fprintf(stderr, "Top pad size must be a multiple of 2\n");
+ exit(1);
+ }
+}
+
+static void opt_frame_pad_bottom(const char *arg)
+{
+ frame_padbottom = atoi(arg);
+ if (frame_padbottom < 0) {
+ fprintf(stderr, "Incorrect bottom pad size\n");
+ exit(1);
+ }
+ if ((frame_padbottom % 2) != 0) {
+ fprintf(stderr, "Bottom pad size must be a multiple of 2\n");
+ exit(1);
+ }
+}
+
+
+static void opt_frame_pad_left(const char *arg)
+{
+ frame_padleft = atoi(arg);
+ if (frame_padleft < 0) {
+ fprintf(stderr, "Incorrect left pad size\n");
+ exit(1);
+ }
+ if ((frame_padleft % 2) != 0) {
+ fprintf(stderr, "Left pad size must be a multiple of 2\n");
+ exit(1);
+ }
+}
+
+
+static void opt_frame_pad_right(const char *arg)
+{
+ frame_padright = atoi(arg);
+ if (frame_padright < 0) {
+ fprintf(stderr, "Incorrect right pad size\n");
+ exit(1);
+ }
+ if ((frame_padright % 2) != 0) {
+ fprintf(stderr, "Right pad size must be a multiple of 2\n");
+ exit(1);
+ }
+}
+
+
+static void opt_frame_pix_fmt(const char *arg)
+{
+ frame_pix_fmt = avcodec_get_pix_fmt(arg);
+}
+
+static void opt_frame_aspect_ratio(const char *arg)
+{
+ int x = 0, y = 0;
+ double ar = 0;
+ const char *p;
+
+ p = strchr(arg, ':');
+ if (p) {
+ x = strtol(arg, (char **)&arg, 10);
+ if (arg == p)
+ y = strtol(arg+1, (char **)&arg, 10);
+ if (x > 0 && y > 0)
+ ar = (double)x / (double)y;
+ } else
+ ar = strtod(arg, (char **)&arg);
+
+ if (!ar) {
+ fprintf(stderr, "Incorrect aspect ratio specification.\n");
+ exit(1);
+ }
+ frame_aspect_ratio = ar;
+}
+
+static void opt_qscale(const char *arg)
+{
+ video_qscale = atof(arg);
+ if (video_qscale <= 0 ||
+ video_qscale > 255) {
+ fprintf(stderr, "qscale must be > 0.0 and <= 255\n");
+ exit(1);
+ }
+}
+
+static void opt_qdiff(const char *arg)
+{
+ video_qdiff = atoi(arg);
+ if (video_qdiff < 0 ||
+ video_qdiff > 31) {
+ fprintf(stderr, "qdiff must be >= 1 and <= 31\n");
+ exit(1);
+ }
+}
+
+static void opt_packet_size(const char *arg)
+{
+ packet_size= atoi(arg);
+}
+
+static void opt_strict(const char *arg)
+{
+ strict= atoi(arg);
+}
+
+static void opt_top_field_first(const char *arg)
+{
+ top_field_first= atoi(arg);
+}
+
+static void opt_thread_count(const char *arg)
+{
+ thread_count= atoi(arg);
+#if !defined(HAVE_THREADS)
+ if (verbose >= 0)
+ fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
+#endif
+}
+
+static void opt_audio_bitrate(const char *arg)
+{
+ audio_bit_rate = atoi(arg) * 1000;
+}
+
+static void opt_audio_rate(const char *arg)
+{
+ audio_sample_rate = atoi(arg);
+}
+
+static void opt_audio_channels(const char *arg)
+{
+ audio_channels = atoi(arg);
+}
+
+static void opt_video_device(const char *arg)
+{
+ video_device = av_strdup(arg);
+}
+
+static void opt_grab_device(const char *arg)
+{
+ grab_device = av_strdup(arg);
+}
+
+static void opt_video_channel(const char *arg)
+{
+ video_channel = strtol(arg, NULL, 0);
+}
+
+static void opt_video_standard(const char *arg)
+{
+ video_standard = av_strdup(arg);
+}
+
+static void opt_audio_device(const char *arg)
+{
+ audio_device = av_strdup(arg);
+}
+
+static void opt_codec(int *pstream_copy, int *pcodec_id,
+ int codec_type, const char *arg)
+{
+ AVCodec *p;
+
+ if (!strcmp(arg, "copy")) {
+ *pstream_copy = 1;
+ } else {
+ p = first_avcodec;
+ while (p) {
+ if (!strcmp(p->name, arg) && p->type == codec_type)
+ break;
+ p = p->next;
+ }
+ if (p == NULL) {
+ fprintf(stderr, "Unknown codec '%s'\n", arg);
+ exit(1);
+ } else {
+ *pcodec_id = p->id;
+ }
+ }
+}
+
+static void opt_audio_codec(const char *arg)
+{
+ opt_codec(&audio_stream_copy, &audio_codec_id, CODEC_TYPE_AUDIO, arg);
+}
+
+static void opt_audio_tag(const char *arg)
+{
+ char *tail;
+ audio_codec_tag= strtol(arg, &tail, 0);
+
+ if(!tail || *tail)
+ audio_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
+}
+
+static void opt_video_tag(const char *arg)
+{
+ char *tail;
+ video_codec_tag= strtol(arg, &tail, 0);
+
+ if(!tail || *tail)
+ video_codec_tag= arg[0] + (arg[1]<<8) + (arg[2]<<16) + (arg[3]<<24);
+}
+
+static void add_frame_hooker(const char *arg)
+{
+ int argc = 0;
+ char *argv[64];
+ int i;
+ char *args = av_strdup(arg);
+
+ using_vhook = 1;
+
+ argv[0] = strtok(args, " ");
+ while (argc < 62 && (argv[++argc] = strtok(NULL, " "))) {
+ }
+
+ i = frame_hook_add(argc, argv);
+
+ if (i != 0) {
+ fprintf(stderr, "Failed to add video hook function: %s\n", arg);
+ exit(1);
+ }
+}
+
+const char *motion_str[] = {
+ "zero",
+ "full",
+ "log",
+ "phods",
+ "epzs",
+ "x1",
+ "hex",
+ "umh",
+ "iter",
+ NULL,
+};
+
+static void opt_motion_estimation(const char *arg)
+{
+ const char **p;
+ p = motion_str;
+ for(;;) {
+ if (!*p) {
+ fprintf(stderr, "Unknown motion estimation method '%s'\n", arg);
+ exit(1);
+ }
+ if (!strcmp(*p, arg))
+ break;
+ p++;
+ }
+ me_method = (p - motion_str) + 1;
+}
+
+static void opt_video_codec(const char *arg)
+{
+ opt_codec(&video_stream_copy, &video_codec_id, CODEC_TYPE_VIDEO, arg);
+}
+
+static void opt_subtitle_codec(const char *arg)
+{
+ opt_codec(&subtitle_stream_copy, &subtitle_codec_id, CODEC_TYPE_SUBTITLE, arg);
+}
+
+static void opt_map(const char *arg)
+{
+ AVStreamMap *m;
+ const char *p;
+
+ p = arg;
+ m = &stream_maps[nb_stream_maps++];
+
+ m->file_index = strtol(arg, (char **)&p, 0);
+ if (*p)
+ p++;
+
+ m->stream_index = strtol(p, (char **)&p, 0);
+ if (*p) {
+ p++;
+ m->sync_file_index = strtol(p, (char **)&p, 0);
+ if (*p)
+ p++;
+ m->sync_stream_index = strtol(p, (char **)&p, 0);
+ } else {
+ m->sync_file_index = m->file_index;
+ m->sync_stream_index = m->stream_index;
+ }
+}
+
+static void opt_map_meta_data(const char *arg)
+{
+ AVMetaDataMap *m;
+ const char *p;
+
+ p = arg;
+ m = &meta_data_maps[nb_meta_data_maps++];
+
+ m->out_file = strtol(arg, (char **)&p, 0);
+ if (*p)
+ p++;
+
+ m->in_file = strtol(p, (char **)&p, 0);
+}
+
+static void opt_recording_time(const char *arg)
+{
+ recording_time = parse_date(arg, 1);
+}
+
+static void opt_start_time(const char *arg)
+{
+ start_time = parse_date(arg, 1);
+}
+
+static void opt_rec_timestamp(const char *arg)
+{
+ rec_timestamp = parse_date(arg, 0) / 1000000;
+}
+
+static void opt_input_ts_offset(const char *arg)
+{
+ input_ts_offset = parse_date(arg, 1);
+}
+
+static void opt_input_file(const char *filename)
+{
+ AVFormatContext *ic;
+ AVFormatParameters params, *ap = &params;
+ int err, i, ret, rfps, rfps_base;
+ int64_t timestamp;
+
+ if (!strcmp(filename, "-"))
+ filename = "pipe:";
+
+ using_stdin |= !strncmp(filename, "pipe:", 5) ||
+ !strcmp( filename, "/dev/stdin" );
+
+ /* get default parameters from command line */
+ ic = av_alloc_format_context();
+
+ memset(ap, 0, sizeof(*ap));
+ ap->prealloced_context = 1;
+ ap->sample_rate = audio_sample_rate;
+ ap->channels = audio_channels;
+ ap->time_base.den = frame_rate;
+ ap->time_base.num = frame_rate_base;
+ ap->width = frame_width + frame_padleft + frame_padright;
+ ap->height = frame_height + frame_padtop + frame_padbottom;
+ ap->pix_fmt = frame_pix_fmt;
+ ap->device = grab_device;
+ ap->channel = video_channel;
+ ap->standard = video_standard;
+ ap->video_codec_id = video_codec_id;
+ ap->audio_codec_id = audio_codec_id;
+ if(pgmyuv_compatibility_hack)
+ ap->video_codec_id= CODEC_ID_PGMYUV;
+
+ for(i=0; i<opt_name_count; i++){
+ AVOption *opt;
+ double d= av_get_double(avformat_opts, opt_names[i], &opt);
+ if(d==d && (opt->flags&AV_OPT_FLAG_DECODING_PARAM))
+ av_set_double(ic, opt_names[i], d);
+ }
+ /* open the input file with generic libav function */
+ err = av_open_input_file(&ic, filename, file_iformat, 0, ap);
+ if (err < 0) {
+ print_error(filename, err);
+ exit(1);
+ }
+
+ ic->loop_input = loop_input;
+
+ /* If not enough info to get the stream parameters, we decode the
+ first frames to get it. (used in mpeg case for example) */
+ ret = av_find_stream_info(ic);
+ if (ret < 0 && verbose >= 0) {
+ fprintf(stderr, "%s: could not find codec parameters\n", filename);
+ exit(1);
+ }
+
+ timestamp = start_time;
+ /* add the stream start time */
+ if (ic->start_time != AV_NOPTS_VALUE)
+ timestamp += ic->start_time;
+
+ /* if seeking requested, we execute it */
+ if (start_time != 0) {
+ ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
+ if (ret < 0) {
+ fprintf(stderr, "%s: could not seek to position %0.3f\n",
+ filename, (double)timestamp / AV_TIME_BASE);
+ }
+ /* reset seek info */
+ start_time = 0;
+ }
+
+ /* update the current parameters so that they match the one of the input stream */
+ for(i=0;i<ic->nb_streams;i++) {
+ int j;
+ AVCodecContext *enc = ic->streams[i]->codec;
+#if defined(HAVE_THREADS)
+ if(thread_count>1)
+ avcodec_thread_init(enc, thread_count);
+#endif
+ enc->thread_count= thread_count;
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ for(j=0; j<opt_name_count; j++){
+ AVOption *opt;
+ double d= av_get_double(avctx_opts, opt_names[j], &opt);
+ if(d==d && (opt->flags&AV_OPT_FLAG_AUDIO_PARAM) && (opt->flags&AV_OPT_FLAG_DECODING_PARAM))
+ av_set_double(enc, opt_names[j], d);
+ }
+ //fprintf(stderr, "\nInput Audio channels: %d", enc->channels);
+ audio_channels = enc->channels;
+ audio_sample_rate = enc->sample_rate;
+ if(audio_disable)
+ ic->streams[i]->discard= AVDISCARD_ALL;
+ break;
+ case CODEC_TYPE_VIDEO:
+ for(j=0; j<opt_name_count; j++){
+ AVOption *opt;
+ double d= av_get_double(avctx_opts, opt_names[j], &opt);
+ if(d==d && (opt->flags&AV_OPT_FLAG_VIDEO_PARAM) && (opt->flags&AV_OPT_FLAG_DECODING_PARAM))
+ av_set_double(enc, opt_names[j], d);
+ }
+ frame_height = enc->height;
+ frame_width = enc->width;
+ frame_aspect_ratio = av_q2d(enc->sample_aspect_ratio) * enc->width / enc->height;
+ frame_pix_fmt = enc->pix_fmt;
+ rfps = ic->streams[i]->r_frame_rate.num;
+ rfps_base = ic->streams[i]->r_frame_rate.den;
+ if(enc->lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
+ if(me_threshold)
+ enc->debug |= FF_DEBUG_MV;
+
+ if (enc->time_base.den != rfps || enc->time_base.num != rfps_base) {
+
+ if (verbose >= 0)
+ fprintf(stderr,"\nSeems that stream %d comes from film source: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
+ i, (float)enc->time_base.den / enc->time_base.num, enc->time_base.den, enc->time_base.num,
+
+ (float)rfps / rfps_base, rfps, rfps_base);
+ }
+ /* update the current frame rate to match the stream frame rate */
+ frame_rate = rfps;
+ frame_rate_base = rfps_base;
+
+ enc->rate_emu = rate_emu;
+ if(video_disable)
+ ic->streams[i]->discard= AVDISCARD_ALL;
+ else if(video_discard)
+ ic->streams[i]->discard= video_discard;
+ break;
+ case CODEC_TYPE_DATA:
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ break;
+ case CODEC_TYPE_UNKNOWN:
+ break;
+ default:
+ av_abort();
+ }
+ }
+
+ input_files[nb_input_files] = ic;
+ input_files_ts_offset[nb_input_files] = input_ts_offset - (copy_ts ? 0 : timestamp);
+ /* dump the file content */
+ if (verbose >= 0)
+ dump_format(ic, nb_input_files, filename, 0);
+
+ nb_input_files++;
+ file_iformat = NULL;
+ file_oformat = NULL;
+
+ grab_device = NULL;
+ video_channel = 0;
+
+ rate_emu = 0;
+}
+
+static void opt_grab(const char *arg)
+{
+ file_iformat = av_find_input_format(arg);
+ opt_input_file("");
+}
+
+static void check_audio_video_inputs(int *has_video_ptr, int *has_audio_ptr)
+{
+ int has_video, has_audio, i, j;
+ AVFormatContext *ic;
+
+ has_video = 0;
+ has_audio = 0;
+ for(j=0;j<nb_input_files;j++) {
+ ic = input_files[j];
+ for(i=0;i<ic->nb_streams;i++) {
+ AVCodecContext *enc = ic->streams[i]->codec;
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ has_audio = 1;
+ break;
+ case CODEC_TYPE_VIDEO:
+ has_video = 1;
+ break;
+ case CODEC_TYPE_DATA:
+ case CODEC_TYPE_UNKNOWN:
+ case CODEC_TYPE_SUBTITLE:
+ break;
+ default:
+ av_abort();
+ }
+ }
+ }
+ *has_video_ptr = has_video;
+ *has_audio_ptr = has_audio;
+}
+
+static void new_video_stream(AVFormatContext *oc)
+{
+ AVStream *st;
+ AVCodecContext *video_enc;
+ int codec_id;
+
+ st = av_new_stream(oc, oc->nb_streams);
+ if (!st) {
+ fprintf(stderr, "Could not alloc stream\n");
+ exit(1);
+ }
+ bitstream_filters[nb_output_files][oc->nb_streams - 1]= video_bitstream_filters;
+ video_bitstream_filters= NULL;
+
+#if defined(HAVE_THREADS)
+ if(thread_count>1)
+ avcodec_thread_init(st->codec, thread_count);
+#endif
+
+ video_enc = st->codec;
+
+ if(video_codec_tag)
+ video_enc->codec_tag= video_codec_tag;
+
+ if( (video_global_header&1)
+ || (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))){
+ video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ avctx_opts->flags|= CODEC_FLAG_GLOBAL_HEADER;
+ }
+ if(video_global_header&2){
+ video_enc->flags2 |= CODEC_FLAG2_LOCAL_HEADER;
+ avctx_opts->flags2|= CODEC_FLAG2_LOCAL_HEADER;
+ }
+
+ if (video_stream_copy) {
+ st->stream_copy = 1;
+ video_enc->codec_type = CODEC_TYPE_VIDEO;
+ } else {
+ char *p;
+ int i;
+ AVCodec *codec;
+
+ codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, CODEC_TYPE_VIDEO);
+ if (video_codec_id != CODEC_ID_NONE)
+ codec_id = video_codec_id;
+
+ video_enc->codec_id = codec_id;
+ codec = avcodec_find_encoder(codec_id);
+
+ for(i=0; i<opt_name_count; i++){
+ AVOption *opt;
+ double d= av_get_double(avctx_opts, opt_names[i], &opt);
+ if(d==d && (opt->flags&AV_OPT_FLAG_VIDEO_PARAM) && (opt->flags&AV_OPT_FLAG_ENCODING_PARAM))
+ av_set_double(video_enc, opt_names[i], d);
+ }
+
+ video_enc->time_base.den = frame_rate;
+ video_enc->time_base.num = frame_rate_base;
+ if(codec && codec->supported_framerates){
+ const AVRational *p= codec->supported_framerates;
+ AVRational req= (AVRational){frame_rate, frame_rate_base};
+ const AVRational *best=NULL;
+ AVRational best_error= (AVRational){INT_MAX, 1};
+ for(; p->den!=0; p++){
+ AVRational error= av_sub_q(req, *p);
+ if(error.num <0) error.num *= -1;
+ if(av_cmp_q(error, best_error) < 0){
+ best_error= error;
+ best= p;
+ }
+ }
+ video_enc->time_base.den= best->num;
+ video_enc->time_base.num= best->den;
+ }
+
+ video_enc->width = frame_width + frame_padright + frame_padleft;
+ video_enc->height = frame_height + frame_padtop + frame_padbottom;
+ video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*video_enc->height/video_enc->width, 255);
+ video_enc->pix_fmt = frame_pix_fmt;
+
+ if(codec && codec->pix_fmts){
+ const enum PixelFormat *p= codec->pix_fmts;
+ for(; *p!=-1; p++){
+ if(*p == video_enc->pix_fmt)
+ break;
+ }
+ if(*p == -1)
+ video_enc->pix_fmt = codec->pix_fmts[0];
+ }
+
+ if (intra_only)
+ video_enc->gop_size = 0;
+ if (video_qscale || same_quality) {
+ video_enc->flags |= CODEC_FLAG_QSCALE;
+ video_enc->global_quality=
+ st->quality = FF_QP2LAMBDA * video_qscale;
+ }
+
+ if(intra_matrix)
+ video_enc->intra_matrix = intra_matrix;
+ if(inter_matrix)
+ video_enc->inter_matrix = inter_matrix;
+
+ video_enc->max_qdiff = video_qdiff;
+ video_enc->rc_eq = video_rc_eq;
+ video_enc->thread_count = thread_count;
+ p= video_rc_override_string;
+ for(i=0; p; i++){
+ int start, end, q;
+ int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
+ if(e!=3){
+ fprintf(stderr, "error parsing rc_override\n");
+ exit(1);
+ }
+ video_enc->rc_override=
+ av_realloc(video_enc->rc_override,
+ sizeof(RcOverride)*(i+1));
+ video_enc->rc_override[i].start_frame= start;
+ video_enc->rc_override[i].end_frame = end;
+ if(q>0){
+ video_enc->rc_override[i].qscale= q;
+ video_enc->rc_override[i].quality_factor= 1.0;
+ }
+ else{
+ video_enc->rc_override[i].qscale= 0;
+ video_enc->rc_override[i].quality_factor= -q/100.0;
+ }
+ p= strchr(p, '/');
+ if(p) p++;
+ }
+ video_enc->rc_override_count=i;
+ video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4;
+ video_enc->me_threshold= me_threshold;
+ video_enc->intra_dc_precision= intra_dc_precision - 8;
+ video_enc->strict_std_compliance = strict;
+
+ if(packet_size){
+ video_enc->rtp_mode= 1;
+ video_enc->rtp_payload_size= packet_size;
+ }
+
+ if (do_psnr)
+ video_enc->flags|= CODEC_FLAG_PSNR;
+
+ video_enc->me_method = me_method;
+
+ /* two pass mode */
+ if (do_pass) {
+ if (do_pass == 1) {
+ video_enc->flags |= CODEC_FLAG_PASS1;
+ } else {
+ video_enc->flags |= CODEC_FLAG_PASS2;
+ }
+ }
+ }
+
+ /* reset some key parameters */
+ video_disable = 0;
+ video_codec_id = CODEC_ID_NONE;
+ video_stream_copy = 0;
+}
+
+static void new_audio_stream(AVFormatContext *oc)
+{
+ AVStream *st;
+ AVCodecContext *audio_enc;
+ int codec_id, i;
+
+ st = av_new_stream(oc, oc->nb_streams);
+ if (!st) {
+ fprintf(stderr, "Could not alloc stream\n");
+ exit(1);
+ }
+
+ bitstream_filters[nb_output_files][oc->nb_streams - 1]= audio_bitstream_filters;
+ audio_bitstream_filters= NULL;
+
+#if defined(HAVE_THREADS)
+ if(thread_count>1)
+ avcodec_thread_init(st->codec, thread_count);
+#endif
+
+ audio_enc = st->codec;
+ audio_enc->codec_type = CODEC_TYPE_AUDIO;
+ audio_enc->strict_std_compliance = strict;
+
+ if(audio_codec_tag)
+ audio_enc->codec_tag= audio_codec_tag;
+
+ if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
+ audio_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ avctx_opts->flags|= CODEC_FLAG_GLOBAL_HEADER;
+ }
+ if (audio_stream_copy) {
+ st->stream_copy = 1;
+ audio_enc->channels = audio_channels;
+ } else {
+ codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, CODEC_TYPE_AUDIO);
+
+ for(i=0; i<opt_name_count; i++){
+ AVOption *opt;
+ double d= av_get_double(avctx_opts, opt_names[i], &opt);
+ if(d==d && (opt->flags&AV_OPT_FLAG_AUDIO_PARAM) && (opt->flags&AV_OPT_FLAG_ENCODING_PARAM))
+ av_set_double(audio_enc, opt_names[i], d);
+ }
+
+ if (audio_codec_id != CODEC_ID_NONE)
+ codec_id = audio_codec_id;
+ audio_enc->codec_id = codec_id;
+
+ audio_enc->bit_rate = audio_bit_rate;
+ if (audio_qscale > QSCALE_NONE) {
+ audio_enc->flags |= CODEC_FLAG_QSCALE;
+ audio_enc->global_quality = st->quality = FF_QP2LAMBDA * audio_qscale;
+ }
+ audio_enc->thread_count = thread_count;
+ /* For audio codecs other than AC3 or DTS we limit */
+ /* the number of coded channels to stereo */
+ if (audio_channels > 2 && codec_id != CODEC_ID_AC3
+ && codec_id != CODEC_ID_DTS) {
+ audio_enc->channels = 2;
+ } else
+ audio_enc->channels = audio_channels;
+ }
+ audio_enc->sample_rate = audio_sample_rate;
+ audio_enc->time_base= (AVRational){1, audio_sample_rate};
+ if (audio_language) {
+ pstrcpy(st->language, sizeof(st->language), audio_language);
+ av_free(audio_language);
+ audio_language = NULL;
+ }
+
+ /* reset some key parameters */
+ audio_disable = 0;
+ audio_codec_id = CODEC_ID_NONE;
+ audio_stream_copy = 0;
+}
+
+static void opt_new_subtitle_stream(void)
+{
+ AVFormatContext *oc;
+ AVStream *st;
+ AVCodecContext *subtitle_enc;
+ int i;
+
+ if (nb_output_files <= 0) {
+ fprintf(stderr, "At least one output file must be specified\n");
+ exit(1);
+ }
+ oc = output_files[nb_output_files - 1];
+
+ st = av_new_stream(oc, oc->nb_streams);
+ if (!st) {
+ fprintf(stderr, "Could not alloc stream\n");
+ exit(1);
+ }
+
+ subtitle_enc = st->codec;
+ subtitle_enc->codec_type = CODEC_TYPE_SUBTITLE;
+ if (subtitle_stream_copy) {
+ st->stream_copy = 1;
+ } else {
+ for(i=0; i<opt_name_count; i++){
+ AVOption *opt;
+ double d= av_get_double(avctx_opts, opt_names[i], &opt);
+ if(d==d && (opt->flags&AV_OPT_FLAG_SUBTITLE_PARAM) && (opt->flags&AV_OPT_FLAG_ENCODING_PARAM))
+ av_set_double(subtitle_enc, opt_names[i], d);
+ }
+ subtitle_enc->codec_id = subtitle_codec_id;
+ }
+
+ if (subtitle_language) {
+ pstrcpy(st->language, sizeof(st->language), subtitle_language);
+ av_free(subtitle_language);
+ subtitle_language = NULL;
+ }
+
+ subtitle_codec_id = CODEC_ID_NONE;
+ subtitle_stream_copy = 0;
+}
+
+static void opt_new_audio_stream(void)
+{
+ AVFormatContext *oc;
+ if (nb_output_files <= 0) {
+ fprintf(stderr, "At least one output file must be specified\n");
+ exit(1);
+ }
+ oc = output_files[nb_output_files - 1];
+ new_audio_stream(oc);
+}
+
+static void opt_new_video_stream(void)
+{
+ AVFormatContext *oc;
+ if (nb_output_files <= 0) {
+ fprintf(stderr, "At least one output file must be specified\n");
+ exit(1);
+ }
+ oc = output_files[nb_output_files - 1];
+ new_video_stream(oc);
+}
+
+static void opt_output_file(const char *filename)
+{
+ AVFormatContext *oc;
+ int use_video, use_audio, input_has_video, input_has_audio, i;
+ AVFormatParameters params, *ap = &params;
+
+ if (!strcmp(filename, "-"))
+ filename = "pipe:";
+
+ oc = av_alloc_format_context();
+
+ if (!file_oformat) {
+ file_oformat = guess_format(NULL, filename, NULL);
+ if (!file_oformat) {
+ fprintf(stderr, "Unable for find a suitable output format for '%s'\n",
+ filename);
+ exit(1);
+ }
+ }
+
+ oc->oformat = file_oformat;
+ pstrcpy(oc->filename, sizeof(oc->filename), filename);
+
+ if (!strcmp(file_oformat->name, "ffm") &&
+ strstart(filename, "http:", NULL)) {
+ /* special case for files sent to ffserver: we get the stream
+ parameters from ffserver */
+ if (read_ffserver_streams(oc, filename) < 0) {
+ fprintf(stderr, "Could not read stream parameters from '%s'\n", filename);
+ exit(1);
+ }
+ } else {
+ use_video = file_oformat->video_codec != CODEC_ID_NONE || video_stream_copy || video_codec_id != CODEC_ID_NONE;
+ use_audio = file_oformat->audio_codec != CODEC_ID_NONE || audio_stream_copy || audio_codec_id != CODEC_ID_NONE;
+
+ /* disable if no corresponding type found and at least one
+ input file */
+ if (nb_input_files > 0) {
+ check_audio_video_inputs(&input_has_video, &input_has_audio);
+ if (!input_has_video)
+ use_video = 0;
+ if (!input_has_audio)
+ use_audio = 0;
+ }
+
+ /* manual disable */
+ if (audio_disable) {
+ use_audio = 0;
+ }
+ if (video_disable) {
+ use_video = 0;
+ }
+
+ if (use_video) {
+ new_video_stream(oc);
+ }
+
+ if (use_audio) {
+ new_audio_stream(oc);
+ }
+
+ oc->timestamp = rec_timestamp;
+
+ if (str_title)
+ pstrcpy(oc->title, sizeof(oc->title), str_title);
+ if (str_author)
+ pstrcpy(oc->author, sizeof(oc->author), str_author);
+ if (str_copyright)
+ pstrcpy(oc->copyright, sizeof(oc->copyright), str_copyright);
+ if (str_comment)
+ pstrcpy(oc->comment, sizeof(oc->comment), str_comment);
+ if (str_album)
+ pstrcpy(oc->album, sizeof(oc->album), str_album);
+ }
+
+ output_files[nb_output_files++] = oc;
+
+ /* check filename in case of an image number is expected */
+ if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
+ if (!av_filename_number_test(oc->filename)) {
+ print_error(oc->filename, AVERROR_NUMEXPECTED);
+ exit(1);
+ }
+ }
+
+ if (!(oc->oformat->flags & AVFMT_NOFILE)) {
+ /* test if it already exists to avoid loosing precious files */
+ if (!file_overwrite &&
+ (strchr(filename, ':') == NULL ||
+ strstart(filename, "file:", NULL))) {
+ if (url_exist(filename)) {
+ int c;
+
+ if ( !using_stdin ) {
+ fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
+ fflush(stderr);
+ c = getchar();
+ if (toupper(c) != 'Y') {
+ fprintf(stderr, "Not overwriting - exiting\n");
+ exit(1);
+ }
+ }
+ else {
+ fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
+ exit(1);
+ }
+ }
+ }
+
+ /* open the file */
+ if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
+ fprintf(stderr, "Could not open '%s'\n", filename);
+ exit(1);
+ }
+ }
+
+ memset(ap, 0, sizeof(*ap));
+ if (av_set_parameters(oc, ap) < 0) {
+ fprintf(stderr, "%s: Invalid encoding parameters\n",
+ oc->filename);
+ exit(1);
+ }
+
+ oc->preload= (int)(mux_preload*AV_TIME_BASE);
+ oc->max_delay= (int)(mux_max_delay*AV_TIME_BASE);
+ oc->loop_output = loop_output;
+
+ for(i=0; i<opt_name_count; i++){
+ AVOption *opt;
+ double d = av_get_double(avformat_opts, opt_names[i], &opt);
+ if(d==d && (opt->flags&AV_OPT_FLAG_ENCODING_PARAM))
+ av_set_double(oc, opt_names[i], d);
+ }
+
+ /* reset some options */
+ file_oformat = NULL;
+ file_iformat = NULL;
+}
+
+/* prepare dummy protocols for grab */
+static void prepare_grab(void)
+{
+ int has_video, has_audio, i, j;
+ AVFormatContext *oc;
+ AVFormatContext *ic;
+ AVFormatParameters vp1, *vp = &vp1;
+ AVFormatParameters ap1, *ap = &ap1;
+
+ /* see if audio/video inputs are needed */
+ has_video = 0;
+ has_audio = 0;
+ memset(ap, 0, sizeof(*ap));
+ memset(vp, 0, sizeof(*vp));
+ vp->time_base.num= 1;
+ for(j=0;j<nb_output_files;j++) {
+ oc = output_files[j];
+ for(i=0;i<oc->nb_streams;i++) {
+ AVCodecContext *enc = oc->streams[i]->codec;
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (enc->sample_rate > ap->sample_rate)
+ ap->sample_rate = enc->sample_rate;
+ if (enc->channels > ap->channels)
+ ap->channels = enc->channels;
+ has_audio = 1;
+ break;
+ case CODEC_TYPE_VIDEO:
+ if (enc->width > vp->width)
+ vp->width = enc->width;
+ if (enc->height > vp->height)
+ vp->height = enc->height;
+
+ if (vp->time_base.num*(int64_t)enc->time_base.den > enc->time_base.num*(int64_t)vp->time_base.den){
+ vp->time_base = enc->time_base;
+ vp->width += frame_leftBand + frame_rightBand;
+ vp->width -= (frame_padleft + frame_padright);
+ vp->height += frame_topBand + frame_bottomBand;
+ vp->height -= (frame_padtop + frame_padbottom);
+ }
+ has_video = 1;
+ break;
+ default:
+ av_abort();
+ }
+ }
+ }
+
+ if (has_video == 0 && has_audio == 0) {
+ fprintf(stderr, "Output file must have at least one audio or video stream\n");
+ exit(1);
+ }
+
+ if (has_video) {
+ AVInputFormat *fmt1;
+ fmt1 = av_find_input_format(video_grab_format);
+ vp->device = video_device;
+ vp->channel = video_channel;
+ vp->standard = video_standard;
+ vp->pix_fmt = frame_pix_fmt;
+ if (av_open_input_file(&ic, "", fmt1, 0, vp) < 0) {
+ fprintf(stderr, "Could not find video grab device\n");
+ exit(1);
+ }
+ /* If not enough info to get the stream parameters, we decode the
+ first frames to get it. */
+ if ((ic->ctx_flags & AVFMTCTX_NOHEADER) && av_find_stream_info(ic) < 0) {
+ fprintf(stderr, "Could not find video grab parameters\n");
+ exit(1);
+ }
+ /* by now video grab has one stream */
+ ic->streams[0]->r_frame_rate.num = vp->time_base.den;
+ ic->streams[0]->r_frame_rate.den = vp->time_base.num;
+ input_files[nb_input_files] = ic;
+
+ if (verbose >= 0)
+ dump_format(ic, nb_input_files, "", 0);
+
+ nb_input_files++;
+ }
+ if (has_audio && audio_grab_format) {
+ AVInputFormat *fmt1;
+ fmt1 = av_find_input_format(audio_grab_format);
+ ap->device = audio_device;
+ if (av_open_input_file(&ic, "", fmt1, 0, ap) < 0) {
+ fprintf(stderr, "Could not find audio grab device\n");
+ exit(1);
+ }
+ input_files[nb_input_files] = ic;
+
+ if (verbose >= 0)
+ dump_format(ic, nb_input_files, "", 0);
+
+ nb_input_files++;
+ }
+}
+
+/* same option as mencoder */
+static void opt_pass(const char *pass_str)
+{
+ int pass;
+ pass = atoi(pass_str);
+ if (pass != 1 && pass != 2) {
+ fprintf(stderr, "pass number can be only 1 or 2\n");
+ exit(1);
+ }
+ do_pass = pass;
+}
+
+#if defined(__MINGW32__) || defined(CONFIG_OS2)
+static int64_t getutime(void)
+{
+ return av_gettime();
+}
+#else
+static int64_t getutime(void)
+{
+ struct rusage rusage;
+
+ getrusage(RUSAGE_SELF, &rusage);
+ return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
+}
+#endif
+
+#if defined(CONFIG_FFM_DEMUXER) || defined(CONFIG_FFM_MUXER)
+extern int ffm_nopts;
+#endif
+
+static void show_formats(void)
+{
+ AVInputFormat *ifmt;
+ AVOutputFormat *ofmt;
+ URLProtocol *up;
+ AVCodec *p, *p2;
+ const char **pp, *last_name;
+
+ printf("File formats:\n");
+ last_name= "000";
+ for(;;){
+ int decode=0;
+ int encode=0;
+ const char *name=NULL;
+ const char *long_name=NULL;
+
+ for(ofmt = first_oformat; ofmt != NULL; ofmt = ofmt->next) {
+ if((name == NULL || strcmp(ofmt->name, name)<0) &&
+ strcmp(ofmt->name, last_name)>0){
+ name= ofmt->name;
+ long_name= ofmt->long_name;
+ encode=1;
+ }
+ }
+ for(ifmt = first_iformat; ifmt != NULL; ifmt = ifmt->next) {
+ if((name == NULL || strcmp(ifmt->name, name)<0) &&
+ strcmp(ifmt->name, last_name)>0){
+ name= ifmt->name;
+ long_name= ifmt->long_name;
+ encode=0;
+ }
+ if(name && strcmp(ifmt->name, name)==0)
+ decode=1;
+ }
+ if(name==NULL)
+ break;
+ last_name= name;
+
+ printf(
+ " %s%s %-15s %s\n",
+ decode ? "D":" ",
+ encode ? "E":" ",
+ name,
+ long_name ? long_name:" ");
+ }
+ printf("\n");
+
+ printf("Codecs:\n");
+ last_name= "000";
+ for(;;){
+ int decode=0;
+ int encode=0;
+ int cap=0;
+ const char *type_str;
+
+ p2=NULL;
+ for(p = first_avcodec; p != NULL; p = p->next) {
+ if((p2==NULL || strcmp(p->name, p2->name)<0) &&
+ strcmp(p->name, last_name)>0){
+ p2= p;
+ decode= encode= cap=0;
+ }
+ if(p2 && strcmp(p->name, p2->name)==0){
+ if(p->decode) decode=1;
+ if(p->encode) encode=1;
+ cap |= p->capabilities;
+ }
+ }
+ if(p2==NULL)
+ break;
+ last_name= p2->name;
+
+ switch(p2->type) {
+ case CODEC_TYPE_VIDEO:
+ type_str = "V";
+ break;
+ case CODEC_TYPE_AUDIO:
+ type_str = "A";
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ type_str = "S";
+ break;
+ default:
+ type_str = "?";
+ break;
+ }
+ printf(
+ " %s%s%s%s%s%s %s",
+ decode ? "D": (/*p2->decoder ? "d":*/" "),
+ encode ? "E":" ",
+ type_str,
+ cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S":" ",
+ cap & CODEC_CAP_DR1 ? "D":" ",
+ cap & CODEC_CAP_TRUNCATED ? "T":" ",
+ p2->name);
+ /* if(p2->decoder && decode==0)
+ printf(" use %s for decoding", p2->decoder->name);*/
+ printf("\n");
+ }
+ printf("\n");
+
+ printf("Supported file protocols:\n");
+ for(up = first_protocol; up != NULL; up = up->next)
+ printf(" %s:", up->name);
+ printf("\n");
+
+ printf("Frame size, frame rate abbreviations:\n ntsc pal qntsc qpal sntsc spal film ntsc-film sqcif qcif cif 4cif\n");
+ printf("Motion estimation methods:\n");
+ pp = motion_str;
+ while (*pp) {
+ printf(" %s", *pp);
+ if ((pp - motion_str + 1) == ME_ZERO)
+ printf("(fastest)");
+ else if ((pp - motion_str + 1) == ME_FULL)
+ printf("(slowest)");
+ else if ((pp - motion_str + 1) == ME_EPZS)
+ printf("(default)");
+ pp++;
+ }
+ printf("\n\n");
+ printf(
+"Note, the names of encoders and decoders dont always match, so there are\n"
+"several cases where the above table shows encoder only or decoder only entries\n"
+"even though both encoding and decoding are supported for example, the h263\n"
+"decoder corresponds to the h263 and h263p encoders, for file formats its even\n"
+"worse\n");
+ exit(1);
+}
+
+static void parse_matrix_coeffs(uint16_t *dest, const char *str)
+{
+ int i;
+ const char *p = str;
+ for(i = 0;; i++) {
+ dest[i] = atoi(p);
+ if(i == 63)
+ break;
+ p = strchr(p, ',');
+ if(!p) {
+ fprintf(stderr, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
+ exit(1);
+ }
+ p++;
+ }
+}
+
+static void opt_inter_matrix(const char *arg)
+{
+ inter_matrix = av_mallocz(sizeof(uint16_t) * 64);
+ parse_matrix_coeffs(inter_matrix, arg);
+}
+
+static void opt_intra_matrix(const char *arg)
+{
+ intra_matrix = av_mallocz(sizeof(uint16_t) * 64);
+ parse_matrix_coeffs(intra_matrix, arg);
+}
+
+static void opt_target(const char *arg)
+{
+ int norm = -1;
+ static const char *const frame_rates[] = {"25", "30000/1001", "24000/1001"};
+
+ if(!strncmp(arg, "pal-", 4)) {
+ norm = 0;
+ arg += 4;
+ } else if(!strncmp(arg, "ntsc-", 5)) {
+ norm = 1;
+ arg += 5;
+ } else if(!strncmp(arg, "film-", 5)) {
+ norm = 2;
+ arg += 5;
+ } else {
+ int fr;
+ /* Calculate FR via float to avoid int overflow */
+ fr = (int)(frame_rate * 1000.0 / frame_rate_base);
+ if(fr == 25000) {
+ norm = 0;
+ } else if((fr == 29970) || (fr == 23976)) {
+ norm = 1;
+ } else {
+ /* Try to determine PAL/NTSC by peeking in the input files */
+ if(nb_input_files) {
+ int i, j;
+ for(j = 0; j < nb_input_files; j++) {
+ for(i = 0; i < input_files[j]->nb_streams; i++) {
+ AVCodecContext *c = input_files[j]->streams[i]->codec;
+ if(c->codec_type != CODEC_TYPE_VIDEO)
+ continue;
+ fr = c->time_base.den * 1000 / c->time_base.num;
+ if(fr == 25000) {
+ norm = 0;
+ break;
+ } else if((fr == 29970) || (fr == 23976)) {
+ norm = 1;
+ break;
+ }
+ }
+ if(norm >= 0)
+ break;
+ }
+ }
+ }
+ if(verbose && norm >= 0)
+ fprintf(stderr, "Assuming %s for target.\n", norm ? "NTSC" : "PAL");
+ }
+
+ if(norm < 0) {
+ fprintf(stderr, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
+ fprintf(stderr, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
+ fprintf(stderr, "or set a framerate with \"-r xxx\".\n");
+ exit(1);
+ }
+
+ if(!strcmp(arg, "vcd")) {
+
+ opt_video_codec("mpeg1video");
+ opt_audio_codec("mp2");
+ opt_format("vcd");
+
+ opt_frame_size(norm ? "352x240" : "352x288");
+ opt_frame_rate(frame_rates[norm]);
+ opt_default("gop", norm ? "18" : "15");
+
+ opt_default("b", "1150000");
+ opt_default("maxrate", "1150000");
+ opt_default("minrate", "1150000");
+ opt_default("bufsize", "327680"); // 40*1024*8;
+
+ audio_bit_rate = 224000;
+ audio_sample_rate = 44100;
+
+ opt_default("packetsize", "2324");
+ opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
+
+ /* We have to offset the PTS, so that it is consistent with the SCR.
+ SCR starts at 36000, but the first two packs contain only padding
+ and the first pack from the other stream, respectively, may also have
+ been written before.
+ So the real data starts at SCR 36000+3*1200. */
+ mux_preload= (36000+3*1200) / 90000.0; //0.44
+ } else if(!strcmp(arg, "svcd")) {
+
+ opt_video_codec("mpeg2video");
+ opt_audio_codec("mp2");
+ opt_format("svcd");
+
+ opt_frame_size(norm ? "480x480" : "480x576");
+ opt_frame_rate(frame_rates[norm]);
+ opt_default("gop", norm ? "18" : "15");
+
+ opt_default("b", "2040000");
+ opt_default("maxrate", "2516000");
+ opt_default("minrate", "0"); //1145000;
+ opt_default("bufsize", "1835008"); //224*1024*8;
+ opt_default("flags", "+SCAN_OFFSET");
+
+
+ audio_bit_rate = 224000;
+ audio_sample_rate = 44100;
+
+ opt_default("packetsize", "2324");
+
+ } else if(!strcmp(arg, "dvd")) {
+
+ opt_video_codec("mpeg2video");
+ opt_audio_codec("ac3");
+ opt_format("dvd");
+
+ opt_frame_size(norm ? "720x480" : "720x576");
+ opt_frame_rate(frame_rates[norm]);
+ opt_default("gop", norm ? "18" : "15");
+
+ opt_default("b", "6000000");
+ opt_default("maxrate", "9000000");
+ opt_default("minrate", "0"); //1500000;
+ opt_default("bufsize", "1835008"); //224*1024*8;
+
+ opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
+ opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
+
+ audio_bit_rate = 448000;
+ audio_sample_rate = 48000;
+
+ } else if(!strncmp(arg, "dv", 2)) {
+
+ opt_format("dv");
+
+ opt_frame_size(norm ? "720x480" : "720x576");
+ opt_frame_pix_fmt(!strncmp(arg, "dv50", 4) ? "yuv422p" :
+ (norm ? "yuv411p" : "yuv420p"));
+ opt_frame_rate(frame_rates[norm]);
+
+ audio_sample_rate = 48000;
+ audio_channels = 2;
+
+ } else {
+ fprintf(stderr, "Unknown target: %s\n", arg);
+ exit(1);
+ }
+}
+
+static void opt_video_bsf(const char *arg)
+{
+ AVBitStreamFilterContext *bsfc= av_bitstream_filter_init(arg); //FIXME split name and args for filter at '='
+ AVBitStreamFilterContext **bsfp;
+
+ if(!bsfc){
+ fprintf(stderr, "Unkown bitstream filter %s\n", arg);
+ exit(1);
+ }
+
+ bsfp= &video_bitstream_filters;
+ while(*bsfp)
+ bsfp= &(*bsfp)->next;
+
+ *bsfp= bsfc;
+}
+
+//FIXME avoid audio - video code duplication
+static void opt_audio_bsf(const char *arg)
+{
+ AVBitStreamFilterContext *bsfc= av_bitstream_filter_init(arg); //FIXME split name and args for filter at '='
+ AVBitStreamFilterContext **bsfp;
+
+ if(!bsfc){
+ fprintf(stderr, "Unkown bitstream filter %s\n", arg);
+ exit(1);
+ }
+
+ bsfp= &audio_bitstream_filters;
+ while(*bsfp)
+ bsfp= &(*bsfp)->next;
+
+ *bsfp= bsfc;
+}
+
+static void show_version(void)
+{
+ /* TODO: add function interface to avutil and avformat */
+ fprintf(stderr, "ffmpeg " FFMPEG_VERSION "\n"
+ "libavutil %d\n"
+ "libavcodec %d\n"
+ "libavformat %d\n",
+ LIBAVUTIL_BUILD, avcodec_build(), LIBAVFORMAT_BUILD);
+ exit(1);
+}
+
+static int opt_default(const char *opt, const char *arg){
+ AVOption *o= av_set_string(avctx_opts, opt, arg);
+ if(!o)
+ o = av_set_string(avformat_opts, opt, arg);
+ if(!o)
+ return -1;
+
+// av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avctx_opts, opt, NULL), (int)av_get_int(avctx_opts, opt, NULL));
+
+ //FIXME we should always use avctx_opts, ... for storing options so there wont be any need to keep track of whats set over this
+ opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1));
+ opt_names[opt_name_count++]= o->name;
+
+#if defined(CONFIG_FFM_DEMUXER) || defined(CONFIG_FFM_MUXER)
+ /* disable generate of real time pts in ffm (need to be supressed anyway) */
+ if(avctx_opts->flags & CODEC_FLAG_BITEXACT)
+ ffm_nopts = 1;
+#endif
+
+ if(avctx_opts->debug)
+ av_log_set_level(AV_LOG_DEBUG);
+ return 0;
+}
+
+const OptionDef options[] = {
+ /* main options */
+ { "L", 0, {(void*)show_license}, "show license" },
+ { "h", 0, {(void*)show_help}, "show help" },
+ { "version", 0, {(void*)show_version}, "show version" },
+ { "formats", 0, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
+ { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
+ { "i", HAS_ARG, {(void*)opt_input_file}, "input file name", "filename" },
+ { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
+ { "map", HAS_ARG | OPT_EXPERT, {(void*)opt_map}, "set input stream mapping", "file:stream[:syncfile:syncstream]" },
+ { "map_meta_data", HAS_ARG | OPT_EXPERT, {(void*)opt_map_meta_data}, "set meta data information of outfile from infile", "outfile:infile" },
+ { "t", HAS_ARG, {(void*)opt_recording_time}, "set the recording time", "duration" },
+ { "fs", HAS_ARG | OPT_INT, {(void*)&limit_filesize}, "set the limit file size", "limit_size" }, //
+ { "ss", HAS_ARG, {(void*)opt_start_time}, "set the start time offset", "time_off" },
+ { "itsoffset", HAS_ARG, {(void*)opt_input_ts_offset}, "set the input ts offset", "time_off" },
+ { "title", HAS_ARG | OPT_STRING, {(void*)&str_title}, "set the title", "string" },
+ { "timestamp", HAS_ARG, {(void*)&opt_rec_timestamp}, "set the timestamp", "time" },
+ { "author", HAS_ARG | OPT_STRING, {(void*)&str_author}, "set the author", "string" },
+ { "copyright", HAS_ARG | OPT_STRING, {(void*)&str_copyright}, "set the copyright", "string" },
+ { "comment", HAS_ARG | OPT_STRING, {(void*)&str_comment}, "set the comment", "string" },
+ { "album", HAS_ARG | OPT_STRING, {(void*)&str_album}, "set the album", "string" },
+ { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
+ "add timings for benchmarking" },
+ { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
+ "dump each input packet" },
+ { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
+ "when dumping packets, also dump the payload" },
+ { "re", OPT_BOOL | OPT_EXPERT, {(void*)&rate_emu}, "read input at native frame rate", "" },
+ { "loop_input", OPT_BOOL | OPT_EXPERT, {(void*)&loop_input}, "loop (current only works with images)" },
+ { "loop_output", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&loop_output}, "number of times to loop output in formats that support looping (0 loops forever)", "" },
+ { "v", HAS_ARG, {(void*)opt_verbose}, "control amount of logging", "verbose" },
+ { "target", HAS_ARG, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
+ { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
+ { "vsync", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&video_sync_method}, "video sync method", "" },
+ { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
+ { "vglobal", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&video_global_header}, "video global header storage type", "" },
+ { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)&copy_ts}, "copy timestamps" },
+ { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
+ { "dts_delta_threshold", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "" },
+
+ /* video options */
+ { "vframes", OPT_INT | HAS_ARG | OPT_VIDEO, {(void*)&max_frames[CODEC_TYPE_VIDEO]}, "set the number of video frames to record", "number" },
+ { "aframes", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&max_frames[CODEC_TYPE_AUDIO]}, "set the number of audio frames to record", "number" },
+ { "dframes", OPT_INT | HAS_ARG, {(void*)&max_frames[CODEC_TYPE_DATA]}, "set the number of data frames to record", "number" },
+ { "r", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_rate}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
+ { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
+ { "aspect", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_aspect_ratio}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
+ { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
+ { "croptop", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop_top}, "set top crop band size (in pixels)", "size" },
+ { "cropbottom", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop_bottom}, "set bottom crop band size (in pixels)", "size" },
+ { "cropleft", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop_left}, "set left crop band size (in pixels)", "size" },
+ { "cropright", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_crop_right}, "set right crop band size (in pixels)", "size" },
+ { "padtop", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_pad_top}, "set top pad band size (in pixels)", "size" },
+ { "padbottom", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_pad_bottom}, "set bottom pad band size (in pixels)", "size" },
+ { "padleft", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_pad_left}, "set left pad band size (in pixels)", "size" },
+ { "padright", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_pad_right}, "set right pad band size (in pixels)", "size" },
+ { "padcolor", HAS_ARG | OPT_VIDEO, {(void*)opt_pad_color}, "set color of pad bands (Hex 000000 thru FFFFFF)", "color" },
+ { "intra", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_only}, "use only intra frames"},
+ { "vn", OPT_BOOL | OPT_VIDEO, {(void*)&video_disable}, "disable video" },
+ { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
+ { "qscale", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_qscale}, "use fixed video quantizer scale (VBR)", "q" },
+ { "qdiff", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_qdiff}, "max difference between the quantizer scale (VBR)", "q" },
+ { "rc_eq", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_video_rc_eq}, "set rate control equation", "equation" },
+ { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_video_rc_override_string}, "rate control override for specific intervals", "override" },
+ { "vcodec", HAS_ARG | OPT_VIDEO, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
+ { "me", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_motion_estimation}, "set motion estimation method",
+ "method" },
+ { "me_threshold", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_me_threshold}, "motion estimaton threshold", "" },
+ { "ps", HAS_ARG | OPT_EXPERT, {(void*)opt_packet_size}, "set packet size in bits", "size" },
+ { "strict", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_strict}, "how strictly to follow the standards", "strictness" },
+ { "sameq", OPT_BOOL | OPT_VIDEO, {(void*)&same_quality},
+ "use same video quality as source (implies VBR)" },
+ { "pass", HAS_ARG | OPT_VIDEO, {(void*)&opt_pass}, "select the pass number (1 or 2)", "n" },
+ { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename}, "select two pass log file name", "file" },
+ { "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace},
+ "deinterlace pictures" },
+ { "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" },
+ { "vstats", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_vstats}, "dump video coding statistics to file" },
+ { "vhook", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)add_frame_hooker}, "insert video processing module", "module" },
+ { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_intra_matrix}, "specify intra matrix coeffs", "matrix" },
+ { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_inter_matrix}, "specify inter matrix coeffs", "matrix" },
+ { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_top_field_first}, "top=1/bottom=0/auto=-1 field first", "" },
+ { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
+ { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
+ { "newvideo", OPT_VIDEO, {(void*)opt_new_video_stream}, "add a new video stream to the current output stream" },
+ { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
+
+ /* audio options */
+ { "ab", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_bitrate}, "set audio bitrate (in kbit/s)", "bitrate", },
+ { "aq", OPT_FLOAT | HAS_ARG | OPT_AUDIO, {(void*)&audio_qscale}, "set audio quality (codec-specific)", "quality", },
+ { "ar", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_rate}, "set audio sampling rate (in Hz)", "rate" },
+ { "ac", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_channels}, "set number of audio channels", "channels" },
+ { "an", OPT_BOOL | OPT_AUDIO, {(void*)&audio_disable}, "disable audio" },
+ { "acodec", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
+ { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
+ { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
+ { "newaudio", OPT_AUDIO, {(void*)opt_new_audio_stream}, "add a new audio stream to the current output stream" },
+ { "alang", HAS_ARG | OPT_STRING | OPT_AUDIO, {(void *)&audio_language}, "set the ISO 639 language code (3 letters) of the current audio stream" , "code" },
+
+ /* subtitle options */
+ { "scodec", HAS_ARG | OPT_SUBTITLE, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
+ { "newsubtitle", OPT_SUBTITLE, {(void*)opt_new_subtitle_stream}, "add a new subtitle stream to the current output stream" },
+ { "slang", HAS_ARG | OPT_STRING | OPT_SUBTITLE, {(void *)&subtitle_language}, "set the ISO 639 language code (3 letters) of the current subtitle stream" , "code" },
+
+ /* grab options */
+ { "vd", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_device}, "set video grab device", "device" },
+ { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_channel}, "set video grab channel (DV1394 only)", "channel" },
+ { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_video_standard}, "set television standard (NTSC, PAL (SECAM))", "standard" },
+ { "ad", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_GRAB, {(void*)opt_audio_device}, "set audio device", "device" },
+
+ /* G.2 grab options */
+ { "grab", HAS_ARG | OPT_EXPERT | OPT_GRAB, {(void*)opt_grab}, "request grabbing using", "format" },
+ { "gd", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_GRAB, {(void*)opt_grab_device}, "set grab device", "device" },
+
+ /* muxer options */
+ { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT, {(void*)&mux_max_delay}, "set the maximum demux-decode delay", "seconds" },
+ { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT, {(void*)&mux_preload}, "set the initial demux-decode delay", "seconds" },
+
+ { "absf", HAS_ARG | OPT_AUDIO | OPT_EXPERT, {(void*)opt_audio_bsf}, "", "bitstream filter" },
+ { "vbsf", HAS_ARG | OPT_VIDEO | OPT_EXPERT, {(void*)opt_video_bsf}, "", "bitstream filter" },
+
+ { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
+ { NULL, },
+};
+
+static void show_banner(void)
+{
+ fprintf(stderr, "FFmpeg version " FFMPEG_VERSION ", Copyright (c) 2000-2006 Fabrice Bellard, et al.\n");
+ fprintf(stderr, " configuration: " FFMPEG_CONFIGURATION "\n");
+ fprintf(stderr, " libavutil version: " AV_STRINGIFY(LIBAVUTIL_VERSION) "\n");
+ fprintf(stderr, " libavcodec version: " AV_STRINGIFY(LIBAVCODEC_VERSION) "\n");
+ fprintf(stderr, " libavformat version: " AV_STRINGIFY(LIBAVFORMAT_VERSION) "\n");
+ fprintf(stderr, " built on " __DATE__ " " __TIME__);
+#ifdef __GNUC__
+ fprintf(stderr, ", gcc: " __VERSION__ "\n");
+#else
+ fprintf(stderr, ", using a non-gcc compiler\n");
+#endif
+}
+
+static void show_license(void)
+{
+ show_banner();
+#ifdef CONFIG_GPL
+ printf(
+ "FFmpeg is free software; you can redistribute it and/or modify\n"
+ "it under the terms of the GNU General Public License as published by\n"
+ "the Free Software Foundation; either version 2 of the License, or\n"
+ "(at your option) any later version.\n"
+ "\n"
+ "FFmpeg is distributed in the hope that it will be useful,\n"
+ "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
+ "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n"
+ "GNU General Public License for more details.\n"
+ "\n"
+ "You should have received a copy of the GNU General Public License\n"
+ "along with FFmpeg; if not, write to the Free Software\n"
+ "Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n"
+ );
+#else
+ printf(
+ "FFmpeg is free software; you can redistribute it and/or\n"
+ "modify it under the terms of the GNU Lesser General Public\n"
+ "License as published by the Free Software Foundation; either\n"
+ "version 2.1 of the License, or (at your option) any later version.\n"
+ "\n"
+ "FFmpeg is distributed in the hope that it will be useful,\n"
+ "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
+ "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n"
+ "Lesser General Public License for more details.\n"
+ "\n"
+ "You should have received a copy of the GNU Lesser General Public\n"
+ "License along with FFmpeg; if not, write to the Free Software\n"
+ "Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n"
+ );
+#endif
+ exit(1);
+}
+
+static void show_help(void)
+{
+ show_banner();
+ printf("usage: ffmpeg [[infile options] -i infile]... {[outfile options] outfile}...\n"
+ "Hyper fast Audio and Video encoder\n");
+ printf("\n");
+ show_help_options(options, "Main options:\n",
+ OPT_EXPERT | OPT_AUDIO | OPT_VIDEO, 0);
+ show_help_options(options, "\nVideo options:\n",
+ OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
+ OPT_VIDEO);
+ show_help_options(options, "\nAdvanced Video options:\n",
+ OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
+ OPT_VIDEO | OPT_EXPERT);
+ show_help_options(options, "\nAudio options:\n",
+ OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
+ OPT_AUDIO);
+ show_help_options(options, "\nAdvanced Audio options:\n",
+ OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
+ OPT_AUDIO | OPT_EXPERT);
+ show_help_options(options, "\nSubtitle options:\n",
+ OPT_SUBTITLE | OPT_GRAB,
+ OPT_SUBTITLE);
+ show_help_options(options, "\nAudio/Video grab options:\n",
+ OPT_GRAB,
+ OPT_GRAB);
+ show_help_options(options, "\nAdvanced options:\n",
+ OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
+ OPT_EXPERT);
+ av_opt_show(avctx_opts, NULL);
+ av_opt_show(avformat_opts, NULL);
+
+ exit(1);
+}
+
+void parse_arg_file(const char *filename)
+{
+ opt_output_file(filename);
+}
+
+int main(int argc, char **argv)
+{
+ int i;
+ int64_t ti;
+
+ av_register_all();
+
+ avctx_opts= avcodec_alloc_context();
+ avformat_opts = av_alloc_format_context();
+
+ if (argc <= 1)
+ show_help();
+ else
+ show_banner();
+
+ /* parse options */
+ parse_options(argc, argv, options);
+
+ /* file converter / grab */
+ if (nb_output_files <= 0) {
+ fprintf(stderr, "Must supply at least one output file\n");
+ exit(1);
+ }
+
+ if (nb_input_files == 0) {
+ input_sync = 1;
+ prepare_grab();
+ }
+
+ ti = getutime();
+ av_encode(output_files, nb_output_files, input_files, nb_input_files,
+ stream_maps, nb_stream_maps);
+ ti = getutime() - ti;
+ if (do_benchmark) {
+ printf("bench: utime=%0.3fs\n", ti / 1000000.0);
+ }
+
+ /* close files */
+ for(i=0;i<nb_output_files;i++) {
+ /* maybe av_close_output_file ??? */
+ AVFormatContext *s = output_files[i];
+ int j;
+ if (!(s->oformat->flags & AVFMT_NOFILE))
+ url_fclose(&s->pb);
+ for(j=0;j<s->nb_streams;j++)
+ av_free(s->streams[j]);
+ av_free(s);
+ }
+ for(i=0;i<nb_input_files;i++)
+ av_close_input_file(input_files[i]);
+
+ av_free_static();
+
+ if(intra_matrix)
+ av_free(intra_matrix);
+ if(inter_matrix)
+ av_free(inter_matrix);
+
+#ifdef POWERPC_PERFORMANCE_REPORT
+ extern void powerpc_display_perf_report(void);
+ powerpc_display_perf_report();
+#endif /* POWERPC_PERFORMANCE_REPORT */
+
+ if (received_sigterm) {
+ fprintf(stderr,
+ "Received signal %d: terminating.\n",
+ (int) received_sigterm);
+ exit (255);
+ }
+
+ exit(0); /* not all OS-es handle main() return value */
+ return 0;
+}
diff --git a/contrib/ffmpeg/ffplay.c b/contrib/ffmpeg/ffplay.c
new file mode 100644
index 000000000..f80a43548
--- /dev/null
+++ b/contrib/ffmpeg/ffplay.c
@@ -0,0 +1,2488 @@
+/*
+ * FFplay : Simple Media Player based on the ffmpeg libraries
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#define HAVE_AV_CONFIG_H
+#include "avformat.h"
+#include "swscale.h"
+
+#include "version.h"
+#include "cmdutils.h"
+
+#include <SDL.h>
+#include <SDL_thread.h>
+
+#ifdef __MINGW32__
+#undef main /* We don't want SDL to override our main() */
+#endif
+
+#ifdef CONFIG_OS2
+#define INCL_DOS
+ #include <os2.h>
+ #include <stdio.h>
+
+ void MorphToPM()
+ {
+ PPIB pib;
+ PTIB tib;
+
+ DosGetInfoBlocks(&tib, &pib);
+
+ // Change flag from VIO to PM:
+ if (pib->pib_ultype==2) pib->pib_ultype = 3;
+ }
+#endif
+
+//#define DEBUG_SYNC
+
+#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
+#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
+#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
+
+/* SDL audio buffer size, in samples. Should be small to have precise
+ A/V sync as SDL does not have hardware buffer fullness info. */
+#define SDL_AUDIO_BUFFER_SIZE 1024
+
+/* no AV sync correction is done if below the AV sync threshold */
+#define AV_SYNC_THRESHOLD 0.01
+/* no AV correction is done if too big error */
+#define AV_NOSYNC_THRESHOLD 10.0
+
+/* maximum audio speed change to get correct sync */
+#define SAMPLE_CORRECTION_PERCENT_MAX 10
+
+/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
+#define AUDIO_DIFF_AVG_NB 20
+
+/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
+#define SAMPLE_ARRAY_SIZE (2*65536)
+
+static int sws_flags = SWS_BICUBIC;
+
+typedef struct PacketQueue {
+ AVPacketList *first_pkt, *last_pkt;
+ int nb_packets;
+ int size;
+ int abort_request;
+ SDL_mutex *mutex;
+ SDL_cond *cond;
+} PacketQueue;
+
+#define VIDEO_PICTURE_QUEUE_SIZE 1
+#define SUBPICTURE_QUEUE_SIZE 4
+
+typedef struct VideoPicture {
+ double pts; ///<presentation time stamp for this picture
+ SDL_Overlay *bmp;
+ int width, height; /* source height & width */
+ int allocated;
+} VideoPicture;
+
+typedef struct SubPicture {
+ double pts; /* presentation time stamp for this picture */
+ AVSubtitle sub;
+} SubPicture;
+
+enum {
+ AV_SYNC_AUDIO_MASTER, /* default choice */
+ AV_SYNC_VIDEO_MASTER,
+ AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
+};
+
+typedef struct VideoState {
+ SDL_Thread *parse_tid;
+ SDL_Thread *video_tid;
+ AVInputFormat *iformat;
+ int no_background;
+ int abort_request;
+ int paused;
+ int last_paused;
+ int seek_req;
+ int seek_flags;
+ int64_t seek_pos;
+ AVFormatContext *ic;
+ int dtg_active_format;
+
+ int audio_stream;
+
+ int av_sync_type;
+ double external_clock; /* external clock base */
+ int64_t external_clock_time;
+
+ double audio_clock;
+ double audio_diff_cum; /* used for AV difference average computation */
+ double audio_diff_avg_coef;
+ double audio_diff_threshold;
+ int audio_diff_avg_count;
+ AVStream *audio_st;
+ PacketQueue audioq;
+ int audio_hw_buf_size;
+ /* samples output by the codec. we reserve more space for avsync
+ compensation */
+ DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
+ unsigned int audio_buf_size; /* in bytes */
+ int audio_buf_index; /* in bytes */
+ AVPacket audio_pkt;
+ uint8_t *audio_pkt_data;
+ int audio_pkt_size;
+
+ int show_audio; /* if true, display audio samples */
+ int16_t sample_array[SAMPLE_ARRAY_SIZE];
+ int sample_array_index;
+ int last_i_start;
+
+ SDL_Thread *subtitle_tid;
+ int subtitle_stream;
+ int subtitle_stream_changed;
+ AVStream *subtitle_st;
+ PacketQueue subtitleq;
+ SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
+ int subpq_size, subpq_rindex, subpq_windex;
+ SDL_mutex *subpq_mutex;
+ SDL_cond *subpq_cond;
+
+ double frame_timer;
+ double frame_last_pts;
+ double frame_last_delay;
+ double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
+ int video_stream;
+ AVStream *video_st;
+ PacketQueue videoq;
+ double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
+ int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
+ VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
+ int pictq_size, pictq_rindex, pictq_windex;
+ SDL_mutex *pictq_mutex;
+ SDL_cond *pictq_cond;
+
+ // QETimer *video_timer;
+ char filename[1024];
+ int width, height, xleft, ytop;
+} VideoState;
+
+void show_help(void);
+static int audio_write_get_buf_size(VideoState *is);
+
+/* options specified by the user */
+static AVInputFormat *file_iformat;
+static const char *input_filename;
+static int fs_screen_width;
+static int fs_screen_height;
+static int screen_width = 640;
+static int screen_height = 480;
+static int audio_disable;
+static int video_disable;
+static int seek_by_bytes;
+static int display_disable;
+static int show_status;
+static int av_sync_type = AV_SYNC_AUDIO_MASTER;
+static int64_t start_time = AV_NOPTS_VALUE;
+static int debug = 0;
+static int debug_mv = 0;
+static int step = 0;
+static int thread_count = 1;
+static int workaround_bugs = 1;
+static int fast = 0;
+static int genpts = 0;
+static int lowres = 0;
+static int idct = FF_IDCT_AUTO;
+static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
+static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
+static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
+static int error_resilience = FF_ER_CAREFUL;
+static int error_concealment = 3;
+
+/* current context */
+static int is_full_screen;
+static VideoState *cur_stream;
+static int64_t audio_callback_time;
+
+AVPacket flush_pkt;
+
+#define FF_ALLOC_EVENT (SDL_USEREVENT)
+#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
+#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
+
+SDL_Surface *screen;
+
+/* packet queue handling */
+static void packet_queue_init(PacketQueue *q)
+{
+ memset(q, 0, sizeof(PacketQueue));
+ q->mutex = SDL_CreateMutex();
+ q->cond = SDL_CreateCond();
+}
+
+static void packet_queue_flush(PacketQueue *q)
+{
+ AVPacketList *pkt, *pkt1;
+
+ SDL_LockMutex(q->mutex);
+ for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
+ pkt1 = pkt->next;
+ av_free_packet(&pkt->pkt);
+ av_freep(&pkt);
+ }
+ q->last_pkt = NULL;
+ q->first_pkt = NULL;
+ q->nb_packets = 0;
+ q->size = 0;
+ SDL_UnlockMutex(q->mutex);
+}
+
+static void packet_queue_end(PacketQueue *q)
+{
+ packet_queue_flush(q);
+ SDL_DestroyMutex(q->mutex);
+ SDL_DestroyCond(q->cond);
+}
+
+static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
+{
+ AVPacketList *pkt1;
+
+ /* duplicate the packet */
+ if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
+ return -1;
+
+ pkt1 = av_malloc(sizeof(AVPacketList));
+ if (!pkt1)
+ return -1;
+ pkt1->pkt = *pkt;
+ pkt1->next = NULL;
+
+
+ SDL_LockMutex(q->mutex);
+
+ if (!q->last_pkt)
+
+ q->first_pkt = pkt1;
+ else
+ q->last_pkt->next = pkt1;
+ q->last_pkt = pkt1;
+ q->nb_packets++;
+ q->size += pkt1->pkt.size;
+ /* XXX: should duplicate packet data in DV case */
+ SDL_CondSignal(q->cond);
+
+ SDL_UnlockMutex(q->mutex);
+ return 0;
+}
+
+static void packet_queue_abort(PacketQueue *q)
+{
+ SDL_LockMutex(q->mutex);
+
+ q->abort_request = 1;
+
+ SDL_CondSignal(q->cond);
+
+ SDL_UnlockMutex(q->mutex);
+}
+
+/* return < 0 if aborted, 0 if no packet and > 0 if packet. */
+static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
+{
+ AVPacketList *pkt1;
+ int ret;
+
+ SDL_LockMutex(q->mutex);
+
+ for(;;) {
+ if (q->abort_request) {
+ ret = -1;
+ break;
+ }
+
+ pkt1 = q->first_pkt;
+ if (pkt1) {
+ q->first_pkt = pkt1->next;
+ if (!q->first_pkt)
+ q->last_pkt = NULL;
+ q->nb_packets--;
+ q->size -= pkt1->pkt.size;
+ *pkt = pkt1->pkt;
+ av_free(pkt1);
+ ret = 1;
+ break;
+ } else if (!block) {
+ ret = 0;
+ break;
+ } else {
+ SDL_CondWait(q->cond, q->mutex);
+ }
+ }
+ SDL_UnlockMutex(q->mutex);
+ return ret;
+}
+
+static inline void fill_rectangle(SDL_Surface *screen,
+ int x, int y, int w, int h, int color)
+{
+ SDL_Rect rect;
+ rect.x = x;
+ rect.y = y;
+ rect.w = w;
+ rect.h = h;
+ SDL_FillRect(screen, &rect, color);
+}
+
+#if 0
+/* draw only the border of a rectangle */
+void fill_border(VideoState *s, int x, int y, int w, int h, int color)
+{
+ int w1, w2, h1, h2;
+
+ /* fill the background */
+ w1 = x;
+ if (w1 < 0)
+ w1 = 0;
+ w2 = s->width - (x + w);
+ if (w2 < 0)
+ w2 = 0;
+ h1 = y;
+ if (h1 < 0)
+ h1 = 0;
+ h2 = s->height - (y + h);
+ if (h2 < 0)
+ h2 = 0;
+ fill_rectangle(screen,
+ s->xleft, s->ytop,
+ w1, s->height,
+ color);
+ fill_rectangle(screen,
+ s->xleft + s->width - w2, s->ytop,
+ w2, s->height,
+ color);
+ fill_rectangle(screen,
+ s->xleft + w1, s->ytop,
+ s->width - w1 - w2, h1,
+ color);
+ fill_rectangle(screen,
+ s->xleft + w1, s->ytop + s->height - h2,
+ s->width - w1 - w2, h2,
+ color);
+}
+#endif
+
+
+
+#define SCALEBITS 10
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define RGB_TO_Y_CCIR(r, g, b) \
+((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
+ FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
+
+#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
+(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
+ FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
+(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
+ FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define ALPHA_BLEND(a, oldp, newp, s)\
+((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
+
+#define RGBA_IN(r, g, b, a, s)\
+{\
+ unsigned int v = ((const uint32_t *)(s))[0];\
+ a = (v >> 24) & 0xff;\
+ r = (v >> 16) & 0xff;\
+ g = (v >> 8) & 0xff;\
+ b = v & 0xff;\
+}
+
+#define YUVA_IN(y, u, v, a, s, pal)\
+{\
+ unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
+ a = (val >> 24) & 0xff;\
+ y = (val >> 16) & 0xff;\
+ u = (val >> 8) & 0xff;\
+ v = val & 0xff;\
+}
+
+#define YUVA_OUT(d, y, u, v, a)\
+{\
+ ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
+}
+
+
+#define BPP 1
+
+static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
+{
+ int wrap, wrap3, width2, skip2;
+ int y, u, v, a, u1, v1, a1, w, h;
+ uint8_t *lum, *cb, *cr;
+ const uint8_t *p;
+ const uint32_t *pal;
+
+ lum = dst->data[0] + rect->y * dst->linesize[0];
+ cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
+ cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
+
+ width2 = (rect->w + 1) >> 1;
+ skip2 = rect->x >> 1;
+ wrap = dst->linesize[0];
+ wrap3 = rect->linesize;
+ p = rect->bitmap;
+ pal = rect->rgba_palette; /* Now in YCrCb! */
+
+ if (rect->y & 1) {
+ lum += rect->x;
+ cb += skip2;
+ cr += skip2;
+
+ if (rect->x & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ cb++;
+ cr++;
+ lum++;
+ p += BPP;
+ }
+ for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += 2 * BPP;
+ lum += 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ }
+ p += wrap3 + (wrap3 - rect->w * BPP);
+ lum += wrap + (wrap - rect->w - rect->x);
+ cb += dst->linesize[1] - width2 - skip2;
+ cr += dst->linesize[2] - width2 - skip2;
+ }
+ for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
+ lum += rect->x;
+ cb += skip2;
+ cr += skip2;
+
+ if (rect->x & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ p += wrap3;
+ lum += wrap;
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += -wrap3 + BPP;
+ lum += -wrap + 1;
+ }
+ for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ p += wrap3;
+ lum += wrap;
+
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
+
+ cb++;
+ cr++;
+ p += -wrap3 + 2 * BPP;
+ lum += -wrap + 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ p += wrap3;
+ lum += wrap;
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += -wrap3 + BPP;
+ lum += -wrap + 1;
+ }
+ p += wrap3 + (wrap3 - rect->w * BPP);
+ lum += wrap + (wrap - rect->w - rect->x);
+ cb += dst->linesize[1] - width2 - skip2;
+ cr += dst->linesize[2] - width2 - skip2;
+ }
+ /* handle odd height */
+ if (h) {
+ lum += rect->x;
+ cb += skip2;
+ cr += skip2;
+
+ if (rect->x & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ cb++;
+ cr++;
+ lum++;
+ p += BPP;
+ }
+ for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
+ cb++;
+ cr++;
+ p += 2 * BPP;
+ lum += 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ }
+ }
+}
+
+static void free_subpicture(SubPicture *sp)
+{
+ int i;
+
+ for (i = 0; i < sp->sub.num_rects; i++)
+ {
+ av_free(sp->sub.rects[i].bitmap);
+ av_free(sp->sub.rects[i].rgba_palette);
+ }
+
+ av_free(sp->sub.rects);
+
+ memset(&sp->sub, 0, sizeof(AVSubtitle));
+}
+
+static void video_image_display(VideoState *is)
+{
+ VideoPicture *vp;
+ SubPicture *sp;
+ AVPicture pict;
+ float aspect_ratio;
+ int width, height, x, y;
+ SDL_Rect rect;
+ int i;
+
+ vp = &is->pictq[is->pictq_rindex];
+ if (vp->bmp) {
+ /* XXX: use variable in the frame */
+ if (is->video_st->codec->sample_aspect_ratio.num == 0)
+ aspect_ratio = 0;
+ else
+ aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
+ * is->video_st->codec->width / is->video_st->codec->height;;
+ if (aspect_ratio <= 0.0)
+ aspect_ratio = (float)is->video_st->codec->width /
+ (float)is->video_st->codec->height;
+ /* if an active format is indicated, then it overrides the
+ mpeg format */
+#if 0
+ if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
+ is->dtg_active_format = is->video_st->codec->dtg_active_format;
+ printf("dtg_active_format=%d\n", is->dtg_active_format);
+ }
+#endif
+#if 0
+ switch(is->video_st->codec->dtg_active_format) {
+ case FF_DTG_AFD_SAME:
+ default:
+ /* nothing to do */
+ break;
+ case FF_DTG_AFD_4_3:
+ aspect_ratio = 4.0 / 3.0;
+ break;
+ case FF_DTG_AFD_16_9:
+ aspect_ratio = 16.0 / 9.0;
+ break;
+ case FF_DTG_AFD_14_9:
+ aspect_ratio = 14.0 / 9.0;
+ break;
+ case FF_DTG_AFD_4_3_SP_14_9:
+ aspect_ratio = 14.0 / 9.0;
+ break;
+ case FF_DTG_AFD_16_9_SP_14_9:
+ aspect_ratio = 14.0 / 9.0;
+ break;
+ case FF_DTG_AFD_SP_4_3:
+ aspect_ratio = 4.0 / 3.0;
+ break;
+ }
+#endif
+
+ if (is->subtitle_st)
+ {
+ if (is->subpq_size > 0)
+ {
+ sp = &is->subpq[is->subpq_rindex];
+
+ if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
+ {
+ SDL_LockYUVOverlay (vp->bmp);
+
+ pict.data[0] = vp->bmp->pixels[0];
+ pict.data[1] = vp->bmp->pixels[2];
+ pict.data[2] = vp->bmp->pixels[1];
+
+ pict.linesize[0] = vp->bmp->pitches[0];
+ pict.linesize[1] = vp->bmp->pitches[2];
+ pict.linesize[2] = vp->bmp->pitches[1];
+
+ for (i = 0; i < sp->sub.num_rects; i++)
+ blend_subrect(&pict, &sp->sub.rects[i]);
+
+ SDL_UnlockYUVOverlay (vp->bmp);
+ }
+ }
+ }
+
+
+ /* XXX: we suppose the screen has a 1.0 pixel ratio */
+ height = is->height;
+ width = ((int)rint(height * aspect_ratio)) & -3;
+ if (width > is->width) {
+ width = is->width;
+ height = ((int)rint(width / aspect_ratio)) & -3;
+ }
+ x = (is->width - width) / 2;
+ y = (is->height - height) / 2;
+ if (!is->no_background) {
+ /* fill the background */
+ // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
+ } else {
+ is->no_background = 0;
+ }
+ rect.x = is->xleft + x;
+ rect.y = is->xleft + y;
+ rect.w = width;
+ rect.h = height;
+ SDL_DisplayYUVOverlay(vp->bmp, &rect);
+ } else {
+#if 0
+ fill_rectangle(screen,
+ is->xleft, is->ytop, is->width, is->height,
+ QERGB(0x00, 0x00, 0x00));
+#endif
+ }
+}
+
+static inline int compute_mod(int a, int b)
+{
+ a = a % b;
+ if (a >= 0)
+ return a;
+ else
+ return a + b;
+}
+
+static void video_audio_display(VideoState *s)
+{
+ int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
+ int ch, channels, h, h2, bgcolor, fgcolor;
+ int16_t time_diff;
+
+ /* compute display index : center on currently output samples */
+ channels = s->audio_st->codec->channels;
+ nb_display_channels = channels;
+ if (!s->paused) {
+ n = 2 * channels;
+ delay = audio_write_get_buf_size(s);
+ delay /= n;
+
+ /* to be more precise, we take into account the time spent since
+ the last buffer computation */
+ if (audio_callback_time) {
+ time_diff = av_gettime() - audio_callback_time;
+ delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
+ }
+
+ delay -= s->width / 2;
+ if (delay < s->width)
+ delay = s->width;
+ i_start = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
+ s->last_i_start = i_start;
+ } else {
+ i_start = s->last_i_start;
+ }
+
+ bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
+ fill_rectangle(screen,
+ s->xleft, s->ytop, s->width, s->height,
+ bgcolor);
+
+ fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
+
+ /* total height for one channel */
+ h = s->height / nb_display_channels;
+ /* graph height / 2 */
+ h2 = (h * 9) / 20;
+ for(ch = 0;ch < nb_display_channels; ch++) {
+ i = i_start + ch;
+ y1 = s->ytop + ch * h + (h / 2); /* position of center line */
+ for(x = 0; x < s->width; x++) {
+ y = (s->sample_array[i] * h2) >> 15;
+ if (y < 0) {
+ y = -y;
+ ys = y1 - y;
+ } else {
+ ys = y1;
+ }
+ fill_rectangle(screen,
+ s->xleft + x, ys, 1, y,
+ fgcolor);
+ i += channels;
+ if (i >= SAMPLE_ARRAY_SIZE)
+ i -= SAMPLE_ARRAY_SIZE;
+ }
+ }
+
+ fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
+
+ for(ch = 1;ch < nb_display_channels; ch++) {
+ y = s->ytop + ch * h;
+ fill_rectangle(screen,
+ s->xleft, y, s->width, 1,
+ fgcolor);
+ }
+ SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
+}
+
+/* display the current picture, if any */
+static void video_display(VideoState *is)
+{
+ if (is->audio_st && is->show_audio)
+ video_audio_display(is);
+ else if (is->video_st)
+ video_image_display(is);
+}
+
+static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
+{
+ SDL_Event event;
+ event.type = FF_REFRESH_EVENT;
+ event.user.data1 = opaque;
+ SDL_PushEvent(&event);
+ return 0; /* 0 means stop timer */
+}
+
+/* schedule a video refresh in 'delay' ms */
+static void schedule_refresh(VideoState *is, int delay)
+{
+ SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
+}
+
+/* get the current audio clock value */
+static double get_audio_clock(VideoState *is)
+{
+ double pts;
+ int hw_buf_size, bytes_per_sec;
+ pts = is->audio_clock;
+ hw_buf_size = audio_write_get_buf_size(is);
+ bytes_per_sec = 0;
+ if (is->audio_st) {
+ bytes_per_sec = is->audio_st->codec->sample_rate *
+ 2 * is->audio_st->codec->channels;
+ }
+ if (bytes_per_sec)
+ pts -= (double)hw_buf_size / bytes_per_sec;
+ return pts;
+}
+
+/* get the current video clock value */
+static double get_video_clock(VideoState *is)
+{
+ double delta;
+ if (is->paused) {
+ delta = 0;
+ } else {
+ delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
+ }
+ return is->video_current_pts + delta;
+}
+
+/* get the current external clock value */
+static double get_external_clock(VideoState *is)
+{
+ int64_t ti;
+ ti = av_gettime();
+ return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
+}
+
+/* get the current master clock value */
+static double get_master_clock(VideoState *is)
+{
+ double val;
+
+ if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
+ if (is->video_st)
+ val = get_video_clock(is);
+ else
+ val = get_audio_clock(is);
+ } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
+ if (is->audio_st)
+ val = get_audio_clock(is);
+ else
+ val = get_video_clock(is);
+ } else {
+ val = get_external_clock(is);
+ }
+ return val;
+}
+
+/* seek in the stream */
+static void stream_seek(VideoState *is, int64_t pos, int rel)
+{
+ if (!is->seek_req) {
+ is->seek_pos = pos;
+ is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
+ if (seek_by_bytes)
+ is->seek_flags |= AVSEEK_FLAG_BYTE;
+ is->seek_req = 1;
+ }
+}
+
+/* pause or resume the video */
+static void stream_pause(VideoState *is)
+{
+ is->paused = !is->paused;
+ if (is->paused) {
+ is->video_current_pts = get_video_clock(is);
+ }
+}
+
+/* called to display each frame */
+static void video_refresh_timer(void *opaque)
+{
+ VideoState *is = opaque;
+ VideoPicture *vp;
+ double actual_delay, delay, sync_threshold, ref_clock, diff;
+
+ SubPicture *sp, *sp2;
+
+ if (is->video_st) {
+ if (is->pictq_size == 0) {
+ /* if no picture, need to wait */
+ schedule_refresh(is, 1);
+ } else {
+ /* dequeue the picture */
+ vp = &is->pictq[is->pictq_rindex];
+
+ /* update current video pts */
+ is->video_current_pts = vp->pts;
+ is->video_current_pts_time = av_gettime();
+
+ /* compute nominal delay */
+ delay = vp->pts - is->frame_last_pts;
+ if (delay <= 0 || delay >= 1.0) {
+ /* if incorrect delay, use previous one */
+ delay = is->frame_last_delay;
+ }
+ is->frame_last_delay = delay;
+ is->frame_last_pts = vp->pts;
+
+ /* update delay to follow master synchronisation source */
+ if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
+ is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
+ /* if video is slave, we try to correct big delays by
+ duplicating or deleting a frame */
+ ref_clock = get_master_clock(is);
+ diff = vp->pts - ref_clock;
+
+ /* skip or repeat frame. We take into account the
+ delay to compute the threshold. I still don't know
+ if it is the best guess */
+ sync_threshold = AV_SYNC_THRESHOLD;
+ if (delay > sync_threshold)
+ sync_threshold = delay;
+ if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
+ if (diff <= -sync_threshold)
+ delay = 0;
+ else if (diff >= sync_threshold)
+ delay = 2 * delay;
+ }
+ }
+
+ is->frame_timer += delay;
+ /* compute the REAL delay (we need to do that to avoid
+ long term errors */
+ actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
+ if (actual_delay < 0.010) {
+ /* XXX: should skip picture */
+ actual_delay = 0.010;
+ }
+ /* launch timer for next picture */
+ schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
+
+#if defined(DEBUG_SYNC)
+ printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
+ delay, actual_delay, vp->pts, -diff);
+#endif
+
+ if(is->subtitle_st) {
+ if (is->subtitle_stream_changed) {
+ SDL_LockMutex(is->subpq_mutex);
+
+ while (is->subpq_size) {
+ free_subpicture(&is->subpq[is->subpq_rindex]);
+
+ /* update queue size and signal for next picture */
+ if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
+ is->subpq_rindex = 0;
+
+ is->subpq_size--;
+ }
+ is->subtitle_stream_changed = 0;
+
+ SDL_CondSignal(is->subpq_cond);
+ SDL_UnlockMutex(is->subpq_mutex);
+ } else {
+ if (is->subpq_size > 0) {
+ sp = &is->subpq[is->subpq_rindex];
+
+ if (is->subpq_size > 1)
+ sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
+ else
+ sp2 = NULL;
+
+ if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
+ || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
+ {
+ free_subpicture(sp);
+
+ /* update queue size and signal for next picture */
+ if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
+ is->subpq_rindex = 0;
+
+ SDL_LockMutex(is->subpq_mutex);
+ is->subpq_size--;
+ SDL_CondSignal(is->subpq_cond);
+ SDL_UnlockMutex(is->subpq_mutex);
+ }
+ }
+ }
+ }
+
+ /* display picture */
+ video_display(is);
+
+ /* update queue size and signal for next picture */
+ if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
+ is->pictq_rindex = 0;
+
+ SDL_LockMutex(is->pictq_mutex);
+ is->pictq_size--;
+ SDL_CondSignal(is->pictq_cond);
+ SDL_UnlockMutex(is->pictq_mutex);
+ }
+ } else if (is->audio_st) {
+ /* draw the next audio frame */
+
+ schedule_refresh(is, 40);
+
+ /* if only audio stream, then display the audio bars (better
+ than nothing, just to test the implementation */
+
+ /* display picture */
+ video_display(is);
+ } else {
+ schedule_refresh(is, 100);
+ }
+ if (show_status) {
+ static int64_t last_time;
+ int64_t cur_time;
+ int aqsize, vqsize, sqsize;
+ double av_diff;
+
+ cur_time = av_gettime();
+ if (!last_time || (cur_time - last_time) >= 500 * 1000) {
+ aqsize = 0;
+ vqsize = 0;
+ sqsize = 0;
+ if (is->audio_st)
+ aqsize = is->audioq.size;
+ if (is->video_st)
+ vqsize = is->videoq.size;
+ if (is->subtitle_st)
+ sqsize = is->subtitleq.size;
+ av_diff = 0;
+ if (is->audio_st && is->video_st)
+ av_diff = get_audio_clock(is) - get_video_clock(is);
+ printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
+ get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
+ fflush(stdout);
+ last_time = cur_time;
+ }
+ }
+}
+
+/* allocate a picture (needs to do that in main thread to avoid
+ potential locking problems */
+static void alloc_picture(void *opaque)
+{
+ VideoState *is = opaque;
+ VideoPicture *vp;
+
+ vp = &is->pictq[is->pictq_windex];
+
+ if (vp->bmp)
+ SDL_FreeYUVOverlay(vp->bmp);
+
+#if 0
+ /* XXX: use generic function */
+ /* XXX: disable overlay if no hardware acceleration or if RGB format */
+ switch(is->video_st->codec->pix_fmt) {
+ case PIX_FMT_YUV420P:
+ case PIX_FMT_YUV422P:
+ case PIX_FMT_YUV444P:
+ case PIX_FMT_YUV422:
+ case PIX_FMT_YUV410P:
+ case PIX_FMT_YUV411P:
+ is_yuv = 1;
+ break;
+ default:
+ is_yuv = 0;
+ break;
+ }
+#endif
+ vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
+ is->video_st->codec->height,
+ SDL_YV12_OVERLAY,
+ screen);
+ vp->width = is->video_st->codec->width;
+ vp->height = is->video_st->codec->height;
+
+ SDL_LockMutex(is->pictq_mutex);
+ vp->allocated = 1;
+ SDL_CondSignal(is->pictq_cond);
+ SDL_UnlockMutex(is->pictq_mutex);
+}
+
+/**
+ *
+ * @param pts the dts of the pkt / pts of the frame and guessed if not known
+ */
+static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
+{
+ VideoPicture *vp;
+ int dst_pix_fmt;
+ AVPicture pict;
+ static struct SwsContext *img_convert_ctx;
+
+ /* wait until we have space to put a new picture */
+ SDL_LockMutex(is->pictq_mutex);
+ while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
+ !is->videoq.abort_request) {
+ SDL_CondWait(is->pictq_cond, is->pictq_mutex);
+ }
+ SDL_UnlockMutex(is->pictq_mutex);
+
+ if (is->videoq.abort_request)
+ return -1;
+
+ vp = &is->pictq[is->pictq_windex];
+
+ /* alloc or resize hardware picture buffer */
+ if (!vp->bmp ||
+ vp->width != is->video_st->codec->width ||
+ vp->height != is->video_st->codec->height) {
+ SDL_Event event;
+
+ vp->allocated = 0;
+
+ /* the allocation must be done in the main thread to avoid
+ locking problems */
+ event.type = FF_ALLOC_EVENT;
+ event.user.data1 = is;
+ SDL_PushEvent(&event);
+
+ /* wait until the picture is allocated */
+ SDL_LockMutex(is->pictq_mutex);
+ while (!vp->allocated && !is->videoq.abort_request) {
+ SDL_CondWait(is->pictq_cond, is->pictq_mutex);
+ }
+ SDL_UnlockMutex(is->pictq_mutex);
+
+ if (is->videoq.abort_request)
+ return -1;
+ }
+
+ /* if the frame is not skipped, then display it */
+ if (vp->bmp) {
+ /* get a pointer on the bitmap */
+ SDL_LockYUVOverlay (vp->bmp);
+
+ dst_pix_fmt = PIX_FMT_YUV420P;
+ pict.data[0] = vp->bmp->pixels[0];
+ pict.data[1] = vp->bmp->pixels[2];
+ pict.data[2] = vp->bmp->pixels[1];
+
+ pict.linesize[0] = vp->bmp->pitches[0];
+ pict.linesize[1] = vp->bmp->pitches[2];
+ pict.linesize[2] = vp->bmp->pitches[1];
+ if (img_convert_ctx == NULL) {
+ img_convert_ctx = sws_getContext(is->video_st->codec->width,
+ is->video_st->codec->height, is->video_st->codec->pix_fmt,
+ is->video_st->codec->width, is->video_st->codec->height,
+ dst_pix_fmt, sws_flags, NULL, NULL, NULL);
+ if (img_convert_ctx == NULL) {
+ fprintf(stderr, "Cannot initialize the conversion context\n");
+ exit(1);
+ }
+ }
+ sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
+ 0, is->video_st->codec->height, pict.data, pict.linesize);
+ /* update the bitmap content */
+ SDL_UnlockYUVOverlay(vp->bmp);
+
+ vp->pts = pts;
+
+ /* now we can update the picture count */
+ if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
+ is->pictq_windex = 0;
+ SDL_LockMutex(is->pictq_mutex);
+ is->pictq_size++;
+ SDL_UnlockMutex(is->pictq_mutex);
+ }
+ return 0;
+}
+
+/**
+ * compute the exact PTS for the picture if it is omitted in the stream
+ * @param pts1 the dts of the pkt / pts of the frame
+ */
+static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
+{
+ double frame_delay, pts;
+
+ pts = pts1;
+
+ if (pts != 0) {
+ /* update video clock with pts, if present */
+ is->video_clock = pts;
+ } else {
+ pts = is->video_clock;
+ }
+ /* update video clock for next frame */
+ frame_delay = av_q2d(is->video_st->codec->time_base);
+ /* for MPEG2, the frame can be repeated, so we update the
+ clock accordingly */
+ frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
+ is->video_clock += frame_delay;
+
+#if defined(DEBUG_SYNC) && 0
+ {
+ int ftype;
+ if (src_frame->pict_type == FF_B_TYPE)
+ ftype = 'B';
+ else if (src_frame->pict_type == FF_I_TYPE)
+ ftype = 'I';
+ else
+ ftype = 'P';
+ printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
+ ftype, pts, pts1);
+ }
+#endif
+ return queue_picture(is, src_frame, pts);
+}
+
+static int video_thread(void *arg)
+{
+ VideoState *is = arg;
+ AVPacket pkt1, *pkt = &pkt1;
+ int len1, got_picture;
+ AVFrame *frame= avcodec_alloc_frame();
+ double pts;
+
+ for(;;) {
+ while (is->paused && !is->videoq.abort_request) {
+ SDL_Delay(10);
+ }
+ if (packet_queue_get(&is->videoq, pkt, 1) < 0)
+ break;
+
+ if(pkt->data == flush_pkt.data){
+ avcodec_flush_buffers(is->video_st->codec);
+ continue;
+ }
+
+ /* NOTE: ipts is the PTS of the _first_ picture beginning in
+ this packet, if any */
+ pts = 0;
+ if (pkt->dts != AV_NOPTS_VALUE)
+ pts = av_q2d(is->video_st->time_base)*pkt->dts;
+
+ len1 = avcodec_decode_video(is->video_st->codec,
+ frame, &got_picture,
+ pkt->data, pkt->size);
+// if (len1 < 0)
+// break;
+ if (got_picture) {
+ if (output_picture2(is, frame, pts) < 0)
+ goto the_end;
+ }
+ av_free_packet(pkt);
+ if (step)
+ if (cur_stream)
+ stream_pause(cur_stream);
+ }
+ the_end:
+ av_free(frame);
+ return 0;
+}
+
+static int subtitle_thread(void *arg)
+{
+ VideoState *is = arg;
+ SubPicture *sp;
+ AVPacket pkt1, *pkt = &pkt1;
+ int len1, got_subtitle;
+ double pts;
+ int i, j;
+ int r, g, b, y, u, v, a;
+
+ for(;;) {
+ while (is->paused && !is->subtitleq.abort_request) {
+ SDL_Delay(10);
+ }
+ if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
+ break;
+
+ if(pkt->data == flush_pkt.data){
+ avcodec_flush_buffers(is->subtitle_st->codec);
+ continue;
+ }
+ SDL_LockMutex(is->subpq_mutex);
+ while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
+ !is->subtitleq.abort_request) {
+ SDL_CondWait(is->subpq_cond, is->subpq_mutex);
+ }
+ SDL_UnlockMutex(is->subpq_mutex);
+
+ if (is->subtitleq.abort_request)
+ goto the_end;
+
+ sp = &is->subpq[is->subpq_windex];
+
+ /* NOTE: ipts is the PTS of the _first_ picture beginning in
+ this packet, if any */
+ pts = 0;
+ if (pkt->pts != AV_NOPTS_VALUE)
+ pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
+
+ len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
+ &sp->sub, &got_subtitle,
+ pkt->data, pkt->size);
+// if (len1 < 0)
+// break;
+ if (got_subtitle && sp->sub.format == 0) {
+ sp->pts = pts;
+
+ for (i = 0; i < sp->sub.num_rects; i++)
+ {
+ for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
+ {
+ RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
+ y = RGB_TO_Y_CCIR(r, g, b);
+ u = RGB_TO_U_CCIR(r, g, b, 0);
+ v = RGB_TO_V_CCIR(r, g, b, 0);
+ YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
+ }
+ }
+
+ /* now we can update the picture count */
+ if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
+ is->subpq_windex = 0;
+ SDL_LockMutex(is->subpq_mutex);
+ is->subpq_size++;
+ SDL_UnlockMutex(is->subpq_mutex);
+ }
+ av_free_packet(pkt);
+// if (step)
+// if (cur_stream)
+// stream_pause(cur_stream);
+ }
+ the_end:
+ return 0;
+}
+
+/* copy samples for viewing in editor window */
+static void update_sample_display(VideoState *is, short *samples, int samples_size)
+{
+ int size, len, channels;
+
+ channels = is->audio_st->codec->channels;
+
+ size = samples_size / sizeof(short);
+ while (size > 0) {
+ len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
+ if (len > size)
+ len = size;
+ memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
+ samples += len;
+ is->sample_array_index += len;
+ if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
+ is->sample_array_index = 0;
+ size -= len;
+ }
+}
+
+/* return the new audio buffer size (samples can be added or deleted
+ to get better sync if video or external master clock) */
+static int synchronize_audio(VideoState *is, short *samples,
+ int samples_size1, double pts)
+{
+ int n, samples_size;
+ double ref_clock;
+
+ n = 2 * is->audio_st->codec->channels;
+ samples_size = samples_size1;
+
+ /* if not master, then we try to remove or add samples to correct the clock */
+ if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
+ is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
+ double diff, avg_diff;
+ int wanted_size, min_size, max_size, nb_samples;
+
+ ref_clock = get_master_clock(is);
+ diff = get_audio_clock(is) - ref_clock;
+
+ if (diff < AV_NOSYNC_THRESHOLD) {
+ is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
+ if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
+ /* not enough measures to have a correct estimate */
+ is->audio_diff_avg_count++;
+ } else {
+ /* estimate the A-V difference */
+ avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
+
+ if (fabs(avg_diff) >= is->audio_diff_threshold) {
+ wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
+ nb_samples = samples_size / n;
+
+ min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
+ max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
+ if (wanted_size < min_size)
+ wanted_size = min_size;
+ else if (wanted_size > max_size)
+ wanted_size = max_size;
+
+ /* add or remove samples to correction the synchro */
+ if (wanted_size < samples_size) {
+ /* remove samples */
+ samples_size = wanted_size;
+ } else if (wanted_size > samples_size) {
+ uint8_t *samples_end, *q;
+ int nb;
+
+ /* add samples */
+ nb = (samples_size - wanted_size);
+ samples_end = (uint8_t *)samples + samples_size - n;
+ q = samples_end + n;
+ while (nb > 0) {
+ memcpy(q, samples_end, n);
+ q += n;
+ nb -= n;
+ }
+ samples_size = wanted_size;
+ }
+ }
+#if 0
+ printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
+ diff, avg_diff, samples_size - samples_size1,
+ is->audio_clock, is->video_clock, is->audio_diff_threshold);
+#endif
+ }
+ } else {
+ /* too big difference : may be initial PTS errors, so
+ reset A-V filter */
+ is->audio_diff_avg_count = 0;
+ is->audio_diff_cum = 0;
+ }
+ }
+
+ return samples_size;
+}
+
+/* decode one audio frame and returns its uncompressed size */
+static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
+{
+ AVPacket *pkt = &is->audio_pkt;
+ int n, len1, data_size;
+ double pts;
+
+ for(;;) {
+ /* NOTE: the audio packet can contain several frames */
+ while (is->audio_pkt_size > 0) {
+ len1 = avcodec_decode_audio(is->audio_st->codec,
+ (int16_t *)audio_buf, &data_size,
+ is->audio_pkt_data, is->audio_pkt_size);
+ if (len1 < 0) {
+ /* if error, we skip the frame */
+ is->audio_pkt_size = 0;
+ break;
+ }
+
+ is->audio_pkt_data += len1;
+ is->audio_pkt_size -= len1;
+ if (data_size <= 0)
+ continue;
+ /* if no pts, then compute it */
+ pts = is->audio_clock;
+ *pts_ptr = pts;
+ n = 2 * is->audio_st->codec->channels;
+ is->audio_clock += (double)data_size /
+ (double)(n * is->audio_st->codec->sample_rate);
+#if defined(DEBUG_SYNC)
+ {
+ static double last_clock;
+ printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
+ is->audio_clock - last_clock,
+ is->audio_clock, pts);
+ last_clock = is->audio_clock;
+ }
+#endif
+ return data_size;
+ }
+
+ /* free the current packet */
+ if (pkt->data)
+ av_free_packet(pkt);
+
+ if (is->paused || is->audioq.abort_request) {
+ return -1;
+ }
+
+ /* read next packet */
+ if (packet_queue_get(&is->audioq, pkt, 1) < 0)
+ return -1;
+ if(pkt->data == flush_pkt.data){
+ avcodec_flush_buffers(is->audio_st->codec);
+ continue;
+ }
+
+ is->audio_pkt_data = pkt->data;
+ is->audio_pkt_size = pkt->size;
+
+ /* if update the audio clock with the pts */
+ if (pkt->pts != AV_NOPTS_VALUE) {
+ is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
+ }
+ }
+}
+
+/* get the current audio output buffer size, in samples. With SDL, we
+ cannot have a precise information */
+static int audio_write_get_buf_size(VideoState *is)
+{
+ return is->audio_hw_buf_size - is->audio_buf_index;
+}
+
+
+/* prepare a new audio buffer */
+void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
+{
+ VideoState *is = opaque;
+ int audio_size, len1;
+ double pts;
+
+ audio_callback_time = av_gettime();
+
+ while (len > 0) {
+ if (is->audio_buf_index >= is->audio_buf_size) {
+ audio_size = audio_decode_frame(is, is->audio_buf, &pts);
+ if (audio_size < 0) {
+ /* if error, just output silence */
+ is->audio_buf_size = 1024;
+ memset(is->audio_buf, 0, is->audio_buf_size);
+ } else {
+ if (is->show_audio)
+ update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
+ audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
+ pts);
+ is->audio_buf_size = audio_size;
+ }
+ is->audio_buf_index = 0;
+ }
+ len1 = is->audio_buf_size - is->audio_buf_index;
+ if (len1 > len)
+ len1 = len;
+ memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
+ len -= len1;
+ stream += len1;
+ is->audio_buf_index += len1;
+ }
+}
+
+
+/* open a given stream. Return 0 if OK */
+static int stream_component_open(VideoState *is, int stream_index)
+{
+ AVFormatContext *ic = is->ic;
+ AVCodecContext *enc;
+ AVCodec *codec;
+ SDL_AudioSpec wanted_spec, spec;
+
+ if (stream_index < 0 || stream_index >= ic->nb_streams)
+ return -1;
+ enc = ic->streams[stream_index]->codec;
+
+ /* prepare audio output */
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ wanted_spec.freq = enc->sample_rate;
+ wanted_spec.format = AUDIO_S16SYS;
+ /* hack for AC3. XXX: suppress that */
+ if (enc->channels > 2)
+ enc->channels = 2;
+ wanted_spec.channels = enc->channels;
+ wanted_spec.silence = 0;
+ wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
+ wanted_spec.callback = sdl_audio_callback;
+ wanted_spec.userdata = is;
+ if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
+ fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
+ return -1;
+ }
+ is->audio_hw_buf_size = spec.size;
+ }
+
+ codec = avcodec_find_decoder(enc->codec_id);
+ enc->debug_mv = debug_mv;
+ enc->debug = debug;
+ enc->workaround_bugs = workaround_bugs;
+ enc->lowres = lowres;
+ if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
+ enc->idct_algo= idct;
+ if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
+ enc->skip_frame= skip_frame;
+ enc->skip_idct= skip_idct;
+ enc->skip_loop_filter= skip_loop_filter;
+ enc->error_resilience= error_resilience;
+ enc->error_concealment= error_concealment;
+ if (!codec ||
+ avcodec_open(enc, codec) < 0)
+ return -1;
+#if defined(HAVE_THREADS)
+ if(thread_count>1)
+ avcodec_thread_init(enc, thread_count);
+#endif
+ enc->thread_count= thread_count;
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ is->audio_stream = stream_index;
+ is->audio_st = ic->streams[stream_index];
+ is->audio_buf_size = 0;
+ is->audio_buf_index = 0;
+
+ /* init averaging filter */
+ is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
+ is->audio_diff_avg_count = 0;
+ /* since we do not have a precise anough audio fifo fullness,
+ we correct audio sync only if larger than this threshold */
+ is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
+
+ memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
+ packet_queue_init(&is->audioq);
+ SDL_PauseAudio(0);
+ break;
+ case CODEC_TYPE_VIDEO:
+ is->video_stream = stream_index;
+ is->video_st = ic->streams[stream_index];
+
+ is->frame_last_delay = 40e-3;
+ is->frame_timer = (double)av_gettime() / 1000000.0;
+ is->video_current_pts_time = av_gettime();
+
+ packet_queue_init(&is->videoq);
+ is->video_tid = SDL_CreateThread(video_thread, is);
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ is->subtitle_stream = stream_index;
+ is->subtitle_st = ic->streams[stream_index];
+ packet_queue_init(&is->subtitleq);
+
+ is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void stream_component_close(VideoState *is, int stream_index)
+{
+ AVFormatContext *ic = is->ic;
+ AVCodecContext *enc;
+
+ if (stream_index < 0 || stream_index >= ic->nb_streams)
+ return;
+ enc = ic->streams[stream_index]->codec;
+
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ packet_queue_abort(&is->audioq);
+
+ SDL_CloseAudio();
+
+ packet_queue_end(&is->audioq);
+ break;
+ case CODEC_TYPE_VIDEO:
+ packet_queue_abort(&is->videoq);
+
+ /* note: we also signal this mutex to make sure we deblock the
+ video thread in all cases */
+ SDL_LockMutex(is->pictq_mutex);
+ SDL_CondSignal(is->pictq_cond);
+ SDL_UnlockMutex(is->pictq_mutex);
+
+ SDL_WaitThread(is->video_tid, NULL);
+
+ packet_queue_end(&is->videoq);
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ packet_queue_abort(&is->subtitleq);
+
+ /* note: we also signal this mutex to make sure we deblock the
+ video thread in all cases */
+ SDL_LockMutex(is->subpq_mutex);
+ is->subtitle_stream_changed = 1;
+
+ SDL_CondSignal(is->subpq_cond);
+ SDL_UnlockMutex(is->subpq_mutex);
+
+ SDL_WaitThread(is->subtitle_tid, NULL);
+
+ packet_queue_end(&is->subtitleq);
+ break;
+ default:
+ break;
+ }
+
+ avcodec_close(enc);
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ is->audio_st = NULL;
+ is->audio_stream = -1;
+ break;
+ case CODEC_TYPE_VIDEO:
+ is->video_st = NULL;
+ is->video_stream = -1;
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ is->subtitle_st = NULL;
+ is->subtitle_stream = -1;
+ break;
+ default:
+ break;
+ }
+}
+
+static void dump_stream_info(const AVFormatContext *s)
+{
+ if (s->track != 0)
+ fprintf(stderr, "Track: %d\n", s->track);
+ if (s->title[0] != '\0')
+ fprintf(stderr, "Title: %s\n", s->title);
+ if (s->author[0] != '\0')
+ fprintf(stderr, "Author: %s\n", s->author);
+ if (s->copyright[0] != '\0')
+ fprintf(stderr, "Copyright: %s\n", s->copyright);
+ if (s->comment[0] != '\0')
+ fprintf(stderr, "Comment: %s\n", s->comment);
+ if (s->album[0] != '\0')
+ fprintf(stderr, "Album: %s\n", s->album);
+ if (s->year != 0)
+ fprintf(stderr, "Year: %d\n", s->year);
+ if (s->genre[0] != '\0')
+ fprintf(stderr, "Genre: %s\n", s->genre);
+}
+
+/* since we have only one decoding thread, we can use a global
+ variable instead of a thread local variable */
+static VideoState *global_video_state;
+
+static int decode_interrupt_cb(void)
+{
+ return (global_video_state && global_video_state->abort_request);
+}
+
+/* this thread gets the stream from the disk or the network */
+static int decode_thread(void *arg)
+{
+ VideoState *is = arg;
+ AVFormatContext *ic;
+ int err, i, ret, video_index, audio_index, use_play;
+ AVPacket pkt1, *pkt = &pkt1;
+ AVFormatParameters params, *ap = &params;
+
+ video_index = -1;
+ audio_index = -1;
+ is->video_stream = -1;
+ is->audio_stream = -1;
+ is->subtitle_stream = -1;
+
+ global_video_state = is;
+ url_set_interrupt_cb(decode_interrupt_cb);
+
+ memset(ap, 0, sizeof(*ap));
+ ap->initial_pause = 1; /* we force a pause when starting an RTSP
+ stream */
+
+ err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
+ if (err < 0) {
+ print_error(is->filename, err);
+ ret = -1;
+ goto fail;
+ }
+ is->ic = ic;
+#ifdef CONFIG_NETWORK
+ use_play = (ic->iformat == &rtsp_demuxer);
+#else
+ use_play = 0;
+#endif
+
+ if(genpts)
+ ic->flags |= AVFMT_FLAG_GENPTS;
+
+ if (!use_play) {
+ err = av_find_stream_info(ic);
+ if (err < 0) {
+ fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
+ ret = -1;
+ goto fail;
+ }
+ ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe shouldnt use url_feof() to test for the end
+ }
+
+ /* if seeking requested, we execute it */
+ if (start_time != AV_NOPTS_VALUE) {
+ int64_t timestamp;
+
+ timestamp = start_time;
+ /* add the stream start time */
+ if (ic->start_time != AV_NOPTS_VALUE)
+ timestamp += ic->start_time;
+ ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
+ if (ret < 0) {
+ fprintf(stderr, "%s: could not seek to position %0.3f\n",
+ is->filename, (double)timestamp / AV_TIME_BASE);
+ }
+ }
+
+ /* now we can begin to play (RTSP stream only) */
+ av_read_play(ic);
+
+ if (use_play) {
+ err = av_find_stream_info(ic);
+ if (err < 0) {
+ fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
+ ret = -1;
+ goto fail;
+ }
+ }
+
+ for(i = 0; i < ic->nb_streams; i++) {
+ AVCodecContext *enc = ic->streams[i]->codec;
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (audio_index < 0 && !audio_disable)
+ audio_index = i;
+ break;
+ case CODEC_TYPE_VIDEO:
+ if (video_index < 0 && !video_disable)
+ video_index = i;
+ break;
+ default:
+ break;
+ }
+ }
+ if (show_status) {
+ dump_format(ic, 0, is->filename, 0);
+ dump_stream_info(ic);
+ }
+
+ /* open the streams */
+ if (audio_index >= 0) {
+ stream_component_open(is, audio_index);
+ }
+
+ if (video_index >= 0) {
+ stream_component_open(is, video_index);
+ } else {
+ if (!display_disable)
+ is->show_audio = 1;
+ }
+
+ if (is->video_stream < 0 && is->audio_stream < 0) {
+ fprintf(stderr, "%s: could not open codecs\n", is->filename);
+ ret = -1;
+ goto fail;
+ }
+
+ for(;;) {
+ if (is->abort_request)
+ break;
+#ifdef CONFIG_NETWORK
+ if (is->paused != is->last_paused) {
+ is->last_paused = is->paused;
+ if (is->paused)
+ av_read_pause(ic);
+ else
+ av_read_play(ic);
+ }
+ if (is->paused && ic->iformat == &rtsp_demuxer) {
+ /* wait 10 ms to avoid trying to get another packet */
+ /* XXX: horrible */
+ SDL_Delay(10);
+ continue;
+ }
+#endif
+ if (is->seek_req) {
+ ret = av_seek_frame(is->ic, -1, is->seek_pos, is->seek_flags);
+ if (ret < 0) {
+ fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
+ }else{
+ if (is->audio_stream >= 0) {
+ packet_queue_flush(&is->audioq);
+ packet_queue_put(&is->audioq, &flush_pkt);
+ }
+ if (is->subtitle_stream >= 0) {
+ packet_queue_flush(&is->subtitleq);
+ packet_queue_put(&is->subtitleq, &flush_pkt);
+ }
+ if (is->video_stream >= 0) {
+ packet_queue_flush(&is->videoq);
+ packet_queue_put(&is->videoq, &flush_pkt);
+ }
+ }
+ is->seek_req = 0;
+ }
+
+ /* if the queue are full, no need to read more */
+ if (is->audioq.size > MAX_AUDIOQ_SIZE ||
+ is->videoq.size > MAX_VIDEOQ_SIZE ||
+ is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
+ url_feof(&ic->pb)) {
+ /* wait 10 ms */
+ SDL_Delay(10);
+ continue;
+ }
+ ret = av_read_frame(ic, pkt);
+ if (ret < 0) {
+ if (url_ferror(&ic->pb) == 0) {
+ SDL_Delay(100); /* wait for user event */
+ continue;
+ } else
+ break;
+ }
+ if (pkt->stream_index == is->audio_stream) {
+ packet_queue_put(&is->audioq, pkt);
+ } else if (pkt->stream_index == is->video_stream) {
+ packet_queue_put(&is->videoq, pkt);
+ } else if (pkt->stream_index == is->subtitle_stream) {
+ packet_queue_put(&is->subtitleq, pkt);
+ } else {
+ av_free_packet(pkt);
+ }
+ }
+ /* wait until the end */
+ while (!is->abort_request) {
+ SDL_Delay(100);
+ }
+
+ ret = 0;
+ fail:
+ /* disable interrupting */
+ global_video_state = NULL;
+
+ /* close each stream */
+ if (is->audio_stream >= 0)
+ stream_component_close(is, is->audio_stream);
+ if (is->video_stream >= 0)
+ stream_component_close(is, is->video_stream);
+ if (is->subtitle_stream >= 0)
+ stream_component_close(is, is->subtitle_stream);
+ if (is->ic) {
+ av_close_input_file(is->ic);
+ is->ic = NULL; /* safety */
+ }
+ url_set_interrupt_cb(NULL);
+
+ if (ret != 0) {
+ SDL_Event event;
+
+ event.type = FF_QUIT_EVENT;
+ event.user.data1 = is;
+ SDL_PushEvent(&event);
+ }
+ return 0;
+}
+
+static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
+{
+ VideoState *is;
+
+ is = av_mallocz(sizeof(VideoState));
+ if (!is)
+ return NULL;
+ pstrcpy(is->filename, sizeof(is->filename), filename);
+ is->iformat = iformat;
+ if (screen) {
+ is->width = screen->w;
+ is->height = screen->h;
+ }
+ is->ytop = 0;
+ is->xleft = 0;
+
+ /* start video display */
+ is->pictq_mutex = SDL_CreateMutex();
+ is->pictq_cond = SDL_CreateCond();
+
+ is->subpq_mutex = SDL_CreateMutex();
+ is->subpq_cond = SDL_CreateCond();
+
+ /* add the refresh timer to draw the picture */
+ schedule_refresh(is, 40);
+
+ is->av_sync_type = av_sync_type;
+ is->parse_tid = SDL_CreateThread(decode_thread, is);
+ if (!is->parse_tid) {
+ av_free(is);
+ return NULL;
+ }
+ return is;
+}
+
+static void stream_close(VideoState *is)
+{
+ VideoPicture *vp;
+ int i;
+ /* XXX: use a special url_shutdown call to abort parse cleanly */
+ is->abort_request = 1;
+ SDL_WaitThread(is->parse_tid, NULL);
+
+ /* free all pictures */
+ for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
+ vp = &is->pictq[i];
+ if (vp->bmp) {
+ SDL_FreeYUVOverlay(vp->bmp);
+ vp->bmp = NULL;
+ }
+ }
+ SDL_DestroyMutex(is->pictq_mutex);
+ SDL_DestroyCond(is->pictq_cond);
+ SDL_DestroyMutex(is->subpq_mutex);
+ SDL_DestroyCond(is->subpq_cond);
+}
+
+static void stream_cycle_channel(VideoState *is, int codec_type)
+{
+ AVFormatContext *ic = is->ic;
+ int start_index, stream_index;
+ AVStream *st;
+
+ if (codec_type == CODEC_TYPE_VIDEO)
+ start_index = is->video_stream;
+ else if (codec_type == CODEC_TYPE_AUDIO)
+ start_index = is->audio_stream;
+ else
+ start_index = is->subtitle_stream;
+ if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
+ return;
+ stream_index = start_index;
+ for(;;) {
+ if (++stream_index >= is->ic->nb_streams)
+ {
+ if (codec_type == CODEC_TYPE_SUBTITLE)
+ {
+ stream_index = -1;
+ goto the_end;
+ } else
+ stream_index = 0;
+ }
+ if (stream_index == start_index)
+ return;
+ st = ic->streams[stream_index];
+ if (st->codec->codec_type == codec_type) {
+ /* check that parameters are OK */
+ switch(codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (st->codec->sample_rate != 0 &&
+ st->codec->channels != 0)
+ goto the_end;
+ break;
+ case CODEC_TYPE_VIDEO:
+ case CODEC_TYPE_SUBTITLE:
+ goto the_end;
+ default:
+ break;
+ }
+ }
+ }
+ the_end:
+ stream_component_close(is, start_index);
+ stream_component_open(is, stream_index);
+}
+
+
+static void toggle_full_screen(void)
+{
+ int w, h, flags;
+ is_full_screen = !is_full_screen;
+ if (!fs_screen_width) {
+ /* use default SDL method */
+ SDL_WM_ToggleFullScreen(screen);
+ } else {
+ /* use the recorded resolution */
+ flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
+ if (is_full_screen) {
+ w = fs_screen_width;
+ h = fs_screen_height;
+ flags |= SDL_FULLSCREEN;
+ } else {
+ w = screen_width;
+ h = screen_height;
+ flags |= SDL_RESIZABLE;
+ }
+ screen = SDL_SetVideoMode(w, h, 0, flags);
+ cur_stream->width = w;
+ cur_stream->height = h;
+ }
+}
+
+static void toggle_pause(void)
+{
+ if (cur_stream)
+ stream_pause(cur_stream);
+ step = 0;
+}
+
+static void step_to_next_frame(void)
+{
+ if (cur_stream) {
+ if (cur_stream->paused)
+ cur_stream->paused=0;
+ cur_stream->video_current_pts = get_video_clock(cur_stream);
+ }
+ step = 1;
+}
+
+static void do_exit(void)
+{
+ if (cur_stream) {
+ stream_close(cur_stream);
+ cur_stream = NULL;
+ }
+ if (show_status)
+ printf("\n");
+ SDL_Quit();
+ exit(0);
+}
+
+static void toggle_audio_display(void)
+{
+ if (cur_stream) {
+ cur_stream->show_audio = !cur_stream->show_audio;
+ }
+}
+
+/* handle an event sent by the GUI */
+static void event_loop(void)
+{
+ SDL_Event event;
+ double incr, pos, frac;
+
+ for(;;) {
+ SDL_WaitEvent(&event);
+ switch(event.type) {
+ case SDL_KEYDOWN:
+ switch(event.key.keysym.sym) {
+ case SDLK_ESCAPE:
+ case SDLK_q:
+ do_exit();
+ break;
+ case SDLK_f:
+ toggle_full_screen();
+ break;
+ case SDLK_p:
+ case SDLK_SPACE:
+ toggle_pause();
+ break;
+ case SDLK_s: //S: Step to next frame
+ step_to_next_frame();
+ break;
+ case SDLK_a:
+ if (cur_stream)
+ stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
+ break;
+ case SDLK_v:
+ if (cur_stream)
+ stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
+ break;
+ case SDLK_t:
+ if (cur_stream)
+ stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
+ break;
+ case SDLK_w:
+ toggle_audio_display();
+ break;
+ case SDLK_LEFT:
+ incr = -10.0;
+ goto do_seek;
+ case SDLK_RIGHT:
+ incr = 10.0;
+ goto do_seek;
+ case SDLK_UP:
+ incr = 60.0;
+ goto do_seek;
+ case SDLK_DOWN:
+ incr = -60.0;
+ do_seek:
+ if (cur_stream) {
+ if (seek_by_bytes) {
+ pos = url_ftell(&cur_stream->ic->pb);
+ if (cur_stream->ic->bit_rate)
+ incr *= cur_stream->ic->bit_rate / 60.0;
+ else
+ incr *= 180000.0;
+ pos += incr;
+ stream_seek(cur_stream, pos, incr);
+ } else {
+ pos = get_master_clock(cur_stream);
+ pos += incr;
+ stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case SDL_MOUSEBUTTONDOWN:
+ if (cur_stream) {
+ int ns, hh, mm, ss;
+ int tns, thh, tmm, tss;
+ tns = cur_stream->ic->duration/1000000LL;
+ thh = tns/3600;
+ tmm = (tns%3600)/60;
+ tss = (tns%60);
+ frac = (double)event.button.x/(double)cur_stream->width;
+ ns = frac*tns;
+ hh = ns/3600;
+ mm = (ns%3600)/60;
+ ss = (ns%60);
+ fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
+ hh, mm, ss, thh, tmm, tss);
+ stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
+ }
+ break;
+ case SDL_VIDEORESIZE:
+ if (cur_stream) {
+ screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
+ SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
+ cur_stream->width = event.resize.w;
+ cur_stream->height = event.resize.h;
+ }
+ break;
+ case SDL_QUIT:
+ case FF_QUIT_EVENT:
+ do_exit();
+ break;
+ case FF_ALLOC_EVENT:
+ alloc_picture(event.user.data1);
+ break;
+ case FF_REFRESH_EVENT:
+ video_refresh_timer(event.user.data1);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void opt_width(const char *arg)
+{
+ screen_width = atoi(arg);
+}
+
+void opt_height(const char *arg)
+{
+ screen_height = atoi(arg);
+}
+
+static void opt_format(const char *arg)
+{
+ file_iformat = av_find_input_format(arg);
+ if (!file_iformat) {
+ fprintf(stderr, "Unknown input format: %s\n", arg);
+ exit(1);
+ }
+}
+
+#ifdef CONFIG_NETWORK
+void opt_rtp_tcp(void)
+{
+ /* only tcp protocol */
+ rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
+}
+#endif
+
+void opt_sync(const char *arg)
+{
+ if (!strcmp(arg, "audio"))
+ av_sync_type = AV_SYNC_AUDIO_MASTER;
+ else if (!strcmp(arg, "video"))
+ av_sync_type = AV_SYNC_VIDEO_MASTER;
+ else if (!strcmp(arg, "ext"))
+ av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
+ else
+ show_help();
+}
+
+void opt_seek(const char *arg)
+{
+ start_time = parse_date(arg, 1);
+}
+
+static void opt_debug(const char *arg)
+{
+ av_log_set_level(99);
+ debug = atoi(arg);
+}
+
+static void opt_vismv(const char *arg)
+{
+ debug_mv = atoi(arg);
+}
+
+static void opt_thread_count(const char *arg)
+{
+ thread_count= atoi(arg);
+#if !defined(HAVE_THREADS)
+ fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
+#endif
+}
+
+const OptionDef options[] = {
+ { "h", 0, {(void*)show_help}, "show help" },
+ { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
+ { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
+ { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
+ { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
+ { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
+ { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
+ { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
+ { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
+ { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
+ { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
+ { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
+ { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
+ { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
+ { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
+ { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
+ { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
+ { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
+ { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
+ { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
+ { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
+ { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
+ { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
+#ifdef CONFIG_NETWORK
+ { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
+#endif
+ { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
+ { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
+ { NULL, },
+};
+
+void show_help(void)
+{
+ printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2006 Fabrice Bellard, et al.\n"
+ "usage: ffplay [options] input_file\n"
+ "Simple media player\n");
+ printf("\n");
+ show_help_options(options, "Main options:\n",
+ OPT_EXPERT, 0);
+ show_help_options(options, "\nAdvanced options:\n",
+ OPT_EXPERT, OPT_EXPERT);
+ printf("\nWhile playing:\n"
+ "q, ESC quit\n"
+ "f toggle full screen\n"
+ "p, SPC pause\n"
+ "a cycle audio channel\n"
+ "v cycle video channel\n"
+ "t cycle subtitle channel\n"
+ "w show audio waves\n"
+ "left/right seek backward/forward 10 seconds\n"
+ "down/up seek backward/forward 1 minute\n"
+ "mouse click seek to percentage in file corresponding to fraction of width\n"
+ );
+ exit(1);
+}
+
+void parse_arg_file(const char *filename)
+{
+ if (!strcmp(filename, "-"))
+ filename = "pipe:";
+ input_filename = filename;
+}
+
+/* Called from the main */
+int main(int argc, char **argv)
+{
+ int flags, w, h;
+
+ /* register all codecs, demux and protocols */
+ av_register_all();
+
+ #ifdef CONFIG_OS2
+ MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
+
+ // Make stdout and stderr unbuffered
+ setbuf( stdout, NULL );
+ setbuf( stderr, NULL );
+ #endif
+
+ parse_options(argc, argv, options);
+
+ if (!input_filename)
+ show_help();
+
+ if (display_disable) {
+ video_disable = 1;
+ }
+ flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
+#if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
+ flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
+#endif
+ if (SDL_Init (flags)) {
+ fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
+ exit(1);
+ }
+
+ if (!display_disable) {
+#ifdef HAVE_SDL_VIDEO_SIZE
+ const SDL_VideoInfo *vi = SDL_GetVideoInfo();
+ fs_screen_width = vi->current_w;
+ fs_screen_height = vi->current_h;
+#endif
+ flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
+ if (is_full_screen && fs_screen_width) {
+ w = fs_screen_width;
+ h = fs_screen_height;
+ flags |= SDL_FULLSCREEN;
+ } else {
+ w = screen_width;
+ h = screen_height;
+ flags |= SDL_RESIZABLE;
+ }
+#ifndef CONFIG_DARWIN
+ screen = SDL_SetVideoMode(w, h, 0, flags);
+#else
+ /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
+ screen = SDL_SetVideoMode(w, h, 24, flags);
+#endif
+ if (!screen) {
+ fprintf(stderr, "SDL: could not set video mode - exiting\n");
+ exit(1);
+ }
+ SDL_WM_SetCaption("FFplay", "FFplay");
+ }
+
+ SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
+ SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
+ SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
+ SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
+
+ av_init_packet(&flush_pkt);
+ flush_pkt.data= "FLUSH";
+
+ cur_stream = stream_open(input_filename, file_iformat);
+
+ event_loop();
+
+ /* never returns */
+
+ return 0;
+}
diff --git a/contrib/ffmpeg/ffserver.c b/contrib/ffmpeg/ffserver.c
new file mode 100644
index 000000000..138e607ed
--- /dev/null
+++ b/contrib/ffmpeg/ffserver.c
@@ -0,0 +1,4602 @@
+/*
+ * Multiple format streaming server
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#define HAVE_AV_CONFIG_H
+#include "avformat.h"
+
+#include <stdarg.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/poll.h>
+#include <errno.h>
+#include <sys/time.h>
+#undef time //needed because HAVE_AV_CONFIG_H is defined on top
+#include <time.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <signal.h>
+#ifdef HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include "version.h"
+#include "ffserver.h"
+
+/* maximum number of simultaneous HTTP connections */
+#define HTTP_MAX_CONNECTIONS 2000
+
+enum HTTPState {
+ HTTPSTATE_WAIT_REQUEST,
+ HTTPSTATE_SEND_HEADER,
+ HTTPSTATE_SEND_DATA_HEADER,
+ HTTPSTATE_SEND_DATA, /* sending TCP or UDP data */
+ HTTPSTATE_SEND_DATA_TRAILER,
+ HTTPSTATE_RECEIVE_DATA,
+ HTTPSTATE_WAIT_FEED, /* wait for data from the feed */
+ HTTPSTATE_READY,
+
+ RTSPSTATE_WAIT_REQUEST,
+ RTSPSTATE_SEND_REPLY,
+ RTSPSTATE_SEND_PACKET,
+};
+
+const char *http_state[] = {
+ "HTTP_WAIT_REQUEST",
+ "HTTP_SEND_HEADER",
+
+ "SEND_DATA_HEADER",
+ "SEND_DATA",
+ "SEND_DATA_TRAILER",
+ "RECEIVE_DATA",
+ "WAIT_FEED",
+ "READY",
+
+ "RTSP_WAIT_REQUEST",
+ "RTSP_SEND_REPLY",
+ "RTSP_SEND_PACKET",
+};
+
+#define IOBUFFER_INIT_SIZE 8192
+
+/* coef for exponential mean for bitrate estimation in statistics */
+#define AVG_COEF 0.9
+
+/* timeouts are in ms */
+#define HTTP_REQUEST_TIMEOUT (15 * 1000)
+#define RTSP_REQUEST_TIMEOUT (3600 * 24 * 1000)
+
+#define SYNC_TIMEOUT (10 * 1000)
+
+typedef struct {
+ int64_t count1, count2;
+ long time1, time2;
+} DataRateData;
+
+/* context associated with one connection */
+typedef struct HTTPContext {
+ enum HTTPState state;
+ int fd; /* socket file descriptor */
+ struct sockaddr_in from_addr; /* origin */
+ struct pollfd *poll_entry; /* used when polling */
+ long timeout;
+ uint8_t *buffer_ptr, *buffer_end;
+ int http_error;
+ int post;
+ struct HTTPContext *next;
+ int got_key_frame; /* stream 0 => 1, stream 1 => 2, stream 2=> 4 */
+ int64_t data_count;
+ /* feed input */
+ int feed_fd;
+ /* input format handling */
+ AVFormatContext *fmt_in;
+ long start_time; /* In milliseconds - this wraps fairly often */
+ int64_t first_pts; /* initial pts value */
+ int64_t cur_pts; /* current pts value from the stream in us */
+ int64_t cur_frame_duration; /* duration of the current frame in us */
+ int cur_frame_bytes; /* output frame size, needed to compute
+ the time at which we send each
+ packet */
+ int pts_stream_index; /* stream we choose as clock reference */
+ int64_t cur_clock; /* current clock reference value in us */
+ /* output format handling */
+ struct FFStream *stream;
+ /* -1 is invalid stream */
+ int feed_streams[MAX_STREAMS]; /* index of streams in the feed */
+ int switch_feed_streams[MAX_STREAMS]; /* index of streams in the feed */
+ int switch_pending;
+ AVFormatContext fmt_ctx; /* instance of FFStream for one user */
+ int last_packet_sent; /* true if last data packet was sent */
+ int suppress_log;
+ DataRateData datarate;
+ int wmp_client_id;
+ char protocol[16];
+ char method[16];
+ char url[128];
+ int buffer_size;
+ uint8_t *buffer;
+ int is_packetized; /* if true, the stream is packetized */
+ int packet_stream_index; /* current stream for output in state machine */
+
+ /* RTSP state specific */
+ uint8_t *pb_buffer; /* XXX: use that in all the code */
+ ByteIOContext *pb;
+ int seq; /* RTSP sequence number */
+
+ /* RTP state specific */
+ enum RTSPProtocol rtp_protocol;
+ char session_id[32]; /* session id */
+ AVFormatContext *rtp_ctx[MAX_STREAMS];
+
+ /* RTP/UDP specific */
+ URLContext *rtp_handles[MAX_STREAMS];
+
+ /* RTP/TCP specific */
+ struct HTTPContext *rtsp_c;
+ uint8_t *packet_buffer, *packet_buffer_ptr, *packet_buffer_end;
+} HTTPContext;
+
+static AVFrame dummy_frame;
+
+/* each generated stream is described here */
+enum StreamType {
+ STREAM_TYPE_LIVE,
+ STREAM_TYPE_STATUS,
+ STREAM_TYPE_REDIRECT,
+};
+
+enum IPAddressAction {
+ IP_ALLOW = 1,
+ IP_DENY,
+};
+
+typedef struct IPAddressACL {
+ struct IPAddressACL *next;
+ enum IPAddressAction action;
+ /* These are in host order */
+ struct in_addr first;
+ struct in_addr last;
+} IPAddressACL;
+
+/* description of each stream of the ffserver.conf file */
+typedef struct FFStream {
+ enum StreamType stream_type;
+ char filename[1024]; /* stream filename */
+ struct FFStream *feed; /* feed we are using (can be null if
+ coming from file) */
+ AVFormatParameters *ap_in; /* input parameters */
+ AVInputFormat *ifmt; /* if non NULL, force input format */
+ AVOutputFormat *fmt;
+ IPAddressACL *acl;
+ int nb_streams;
+ int prebuffer; /* Number of millseconds early to start */
+ long max_time; /* Number of milliseconds to run */
+ int send_on_key;
+ AVStream *streams[MAX_STREAMS];
+ int feed_streams[MAX_STREAMS]; /* index of streams in the feed */
+ char feed_filename[1024]; /* file name of the feed storage, or
+ input file name for a stream */
+ char author[512];
+ char title[512];
+ char copyright[512];
+ char comment[512];
+ pid_t pid; /* Of ffmpeg process */
+ time_t pid_start; /* Of ffmpeg process */
+ char **child_argv;
+ struct FFStream *next;
+ int bandwidth; /* bandwidth, in kbits/s */
+ /* RTSP options */
+ char *rtsp_option;
+ /* multicast specific */
+ int is_multicast;
+ struct in_addr multicast_ip;
+ int multicast_port; /* first port used for multicast */
+ int multicast_ttl;
+ int loop; /* if true, send the stream in loops (only meaningful if file) */
+
+ /* feed specific */
+ int feed_opened; /* true if someone is writing to the feed */
+ int is_feed; /* true if it is a feed */
+ int readonly; /* True if writing is prohibited to the file */
+ int conns_served;
+ int64_t bytes_served;
+ int64_t feed_max_size; /* maximum storage size, zero means unlimited */
+ int64_t feed_write_index; /* current write position in feed (it wraps round) */
+ int64_t feed_size; /* current size of feed */
+ struct FFStream *next_feed;
+} FFStream;
+
+typedef struct FeedData {
+ long long data_count;
+ float avg_frame_size; /* frame size averraged over last frames with exponential mean */
+} FeedData;
+
+struct sockaddr_in my_http_addr;
+struct sockaddr_in my_rtsp_addr;
+
+static char logfilename[1024];
+static HTTPContext *first_http_ctx;
+static FFStream *first_feed; /* contains only feeds */
+static FFStream *first_stream; /* contains all streams, including feeds */
+
+static void new_connection(int server_fd, int is_rtsp);
+static void close_connection(HTTPContext *c);
+
+/* HTTP handling */
+static int handle_connection(HTTPContext *c);
+static int http_parse_request(HTTPContext *c);
+static int http_send_data(HTTPContext *c);
+static void compute_stats(HTTPContext *c);
+static int open_input_stream(HTTPContext *c, const char *info);
+static int http_start_receive_data(HTTPContext *c);
+static int http_receive_data(HTTPContext *c);
+
+/* RTSP handling */
+static int rtsp_parse_request(HTTPContext *c);
+static void rtsp_cmd_describe(HTTPContext *c, const char *url);
+static void rtsp_cmd_options(HTTPContext *c, const char *url);
+static void rtsp_cmd_setup(HTTPContext *c, const char *url, RTSPHeader *h);
+static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPHeader *h);
+static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPHeader *h);
+static void rtsp_cmd_teardown(HTTPContext *c, const char *url, RTSPHeader *h);
+
+/* SDP handling */
+static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
+ struct in_addr my_ip);
+
+/* RTP handling */
+static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr,
+ FFStream *stream, const char *session_id,
+ enum RTSPProtocol rtp_protocol);
+static int rtp_new_av_stream(HTTPContext *c,
+ int stream_index, struct sockaddr_in *dest_addr,
+ HTTPContext *rtsp_c);
+
+static const char *my_program_name;
+static const char *my_program_dir;
+
+static int ffserver_debug;
+static int ffserver_daemon;
+static int no_launch;
+static int need_to_start_children;
+
+static int nb_max_connections;
+static int nb_connections;
+
+static int max_bandwidth;
+static int current_bandwidth;
+
+static long cur_time; // Making this global saves on passing it around everywhere
+
+static long gettime_ms(void)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv,NULL);
+ return (long long)tv.tv_sec * 1000 + (tv.tv_usec / 1000);
+}
+
+static FILE *logfile = NULL;
+
+static void __attribute__ ((format (printf, 1, 2))) http_log(const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+
+ if (logfile) {
+ vfprintf(logfile, fmt, ap);
+ fflush(logfile);
+ }
+ va_end(ap);
+}
+
+static char *ctime1(char *buf2)
+{
+ time_t ti;
+ char *p;
+
+ ti = time(NULL);
+ p = ctime(&ti);
+ strcpy(buf2, p);
+ p = buf2 + strlen(p) - 1;
+ if (*p == '\n')
+ *p = '\0';
+ return buf2;
+}
+
+static void log_connection(HTTPContext *c)
+{
+ char buf2[32];
+
+ if (c->suppress_log)
+ return;
+
+ http_log("%s - - [%s] \"%s %s %s\" %d %"PRId64"\n",
+ inet_ntoa(c->from_addr.sin_addr),
+ ctime1(buf2), c->method, c->url,
+ c->protocol, (c->http_error ? c->http_error : 200), c->data_count);
+}
+
+static void update_datarate(DataRateData *drd, int64_t count)
+{
+ if (!drd->time1 && !drd->count1) {
+ drd->time1 = drd->time2 = cur_time;
+ drd->count1 = drd->count2 = count;
+ } else {
+ if (cur_time - drd->time2 > 5000) {
+ drd->time1 = drd->time2;
+ drd->count1 = drd->count2;
+ drd->time2 = cur_time;
+ drd->count2 = count;
+ }
+ }
+}
+
+/* In bytes per second */
+static int compute_datarate(DataRateData *drd, int64_t count)
+{
+ if (cur_time == drd->time1)
+ return 0;
+
+ return ((count - drd->count1) * 1000) / (cur_time - drd->time1);
+}
+
+
+static void start_children(FFStream *feed)
+{
+ if (no_launch)
+ return;
+
+ for (; feed; feed = feed->next) {
+ if (feed->child_argv && !feed->pid) {
+ feed->pid_start = time(0);
+
+ feed->pid = fork();
+
+ if (feed->pid < 0) {
+ fprintf(stderr, "Unable to create children\n");
+ exit(1);
+ }
+ if (!feed->pid) {
+ /* In child */
+ char pathname[1024];
+ char *slash;
+ int i;
+
+ for (i = 3; i < 256; i++) {
+ close(i);
+ }
+
+ if (!ffserver_debug) {
+ i = open("/dev/null", O_RDWR);
+ if (i)
+ dup2(i, 0);
+ dup2(i, 1);
+ dup2(i, 2);
+ if (i)
+ close(i);
+ }
+
+ pstrcpy(pathname, sizeof(pathname), my_program_name);
+
+ slash = strrchr(pathname, '/');
+ if (!slash) {
+ slash = pathname;
+ } else {
+ slash++;
+ }
+ strcpy(slash, "ffmpeg");
+
+ /* This is needed to make relative pathnames work */
+ chdir(my_program_dir);
+
+ signal(SIGPIPE, SIG_DFL);
+
+ execvp(pathname, feed->child_argv);
+
+ _exit(1);
+ }
+ }
+ }
+}
+
+/* open a listening socket */
+static int socket_open_listen(struct sockaddr_in *my_addr)
+{
+ int server_fd, tmp;
+
+ server_fd = socket(AF_INET,SOCK_STREAM,0);
+ if (server_fd < 0) {
+ perror ("socket");
+ return -1;
+ }
+
+ tmp = 1;
+ setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp));
+
+ if (bind (server_fd, (struct sockaddr *) my_addr, sizeof (*my_addr)) < 0) {
+ char bindmsg[32];
+ snprintf(bindmsg, sizeof(bindmsg), "bind(port %d)", ntohs(my_addr->sin_port));
+ perror (bindmsg);
+ close(server_fd);
+ return -1;
+ }
+
+ if (listen (server_fd, 5) < 0) {
+ perror ("listen");
+ close(server_fd);
+ return -1;
+ }
+ fcntl(server_fd, F_SETFL, O_NONBLOCK);
+
+ return server_fd;
+}
+
+/* start all multicast streams */
+static void start_multicast(void)
+{
+ FFStream *stream;
+ char session_id[32];
+ HTTPContext *rtp_c;
+ struct sockaddr_in dest_addr;
+ int default_port, stream_index;
+
+ default_port = 6000;
+ for(stream = first_stream; stream != NULL; stream = stream->next) {
+ if (stream->is_multicast) {
+ /* open the RTP connection */
+ snprintf(session_id, sizeof(session_id),
+ "%08x%08x", (int)random(), (int)random());
+
+ /* choose a port if none given */
+ if (stream->multicast_port == 0) {
+ stream->multicast_port = default_port;
+ default_port += 100;
+ }
+
+ dest_addr.sin_family = AF_INET;
+ dest_addr.sin_addr = stream->multicast_ip;
+ dest_addr.sin_port = htons(stream->multicast_port);
+
+ rtp_c = rtp_new_connection(&dest_addr, stream, session_id,
+ RTSP_PROTOCOL_RTP_UDP_MULTICAST);
+ if (!rtp_c) {
+ continue;
+ }
+ if (open_input_stream(rtp_c, "") < 0) {
+ fprintf(stderr, "Could not open input stream for stream '%s'\n",
+ stream->filename);
+ continue;
+ }
+
+ /* open each RTP stream */
+ for(stream_index = 0; stream_index < stream->nb_streams;
+ stream_index++) {
+ dest_addr.sin_port = htons(stream->multicast_port +
+ 2 * stream_index);
+ if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, NULL) < 0) {
+ fprintf(stderr, "Could not open output stream '%s/streamid=%d'\n",
+ stream->filename, stream_index);
+ exit(1);
+ }
+ }
+
+ /* change state to send data */
+ rtp_c->state = HTTPSTATE_SEND_DATA;
+ }
+ }
+}
+
+/* main loop of the http server */
+static int http_server(void)
+{
+ int server_fd, ret, rtsp_server_fd, delay, delay1;
+ struct pollfd poll_table[HTTP_MAX_CONNECTIONS + 2], *poll_entry;
+ HTTPContext *c, *c_next;
+
+ server_fd = socket_open_listen(&my_http_addr);
+ if (server_fd < 0)
+ return -1;
+
+ rtsp_server_fd = socket_open_listen(&my_rtsp_addr);
+ if (rtsp_server_fd < 0)
+ return -1;
+
+ http_log("ffserver started.\n");
+
+ start_children(first_feed);
+
+ first_http_ctx = NULL;
+ nb_connections = 0;
+
+ start_multicast();
+
+ for(;;) {
+ poll_entry = poll_table;
+ poll_entry->fd = server_fd;
+ poll_entry->events = POLLIN;
+ poll_entry++;
+
+ poll_entry->fd = rtsp_server_fd;
+ poll_entry->events = POLLIN;
+ poll_entry++;
+
+ /* wait for events on each HTTP handle */
+ c = first_http_ctx;
+ delay = 1000;
+ while (c != NULL) {
+ int fd;
+ fd = c->fd;
+ switch(c->state) {
+ case HTTPSTATE_SEND_HEADER:
+ case RTSPSTATE_SEND_REPLY:
+ case RTSPSTATE_SEND_PACKET:
+ c->poll_entry = poll_entry;
+ poll_entry->fd = fd;
+ poll_entry->events = POLLOUT;
+ poll_entry++;
+ break;
+ case HTTPSTATE_SEND_DATA_HEADER:
+ case HTTPSTATE_SEND_DATA:
+ case HTTPSTATE_SEND_DATA_TRAILER:
+ if (!c->is_packetized) {
+ /* for TCP, we output as much as we can (may need to put a limit) */
+ c->poll_entry = poll_entry;
+ poll_entry->fd = fd;
+ poll_entry->events = POLLOUT;
+ poll_entry++;
+ } else {
+ /* when ffserver is doing the timing, we work by
+ looking at which packet need to be sent every
+ 10 ms */
+ delay1 = 10; /* one tick wait XXX: 10 ms assumed */
+ if (delay1 < delay)
+ delay = delay1;
+ }
+ break;
+ case HTTPSTATE_WAIT_REQUEST:
+ case HTTPSTATE_RECEIVE_DATA:
+ case HTTPSTATE_WAIT_FEED:
+ case RTSPSTATE_WAIT_REQUEST:
+ /* need to catch errors */
+ c->poll_entry = poll_entry;
+ poll_entry->fd = fd;
+ poll_entry->events = POLLIN;/* Maybe this will work */
+ poll_entry++;
+ break;
+ default:
+ c->poll_entry = NULL;
+ break;
+ }
+ c = c->next;
+ }
+
+ /* wait for an event on one connection. We poll at least every
+ second to handle timeouts */
+ do {
+ ret = poll(poll_table, poll_entry - poll_table, delay);
+ if (ret < 0 && errno != EAGAIN && errno != EINTR)
+ return -1;
+ } while (ret <= 0);
+
+ cur_time = gettime_ms();
+
+ if (need_to_start_children) {
+ need_to_start_children = 0;
+ start_children(first_feed);
+ }
+
+ /* now handle the events */
+ for(c = first_http_ctx; c != NULL; c = c_next) {
+ c_next = c->next;
+ if (handle_connection(c) < 0) {
+ /* close and free the connection */
+ log_connection(c);
+ close_connection(c);
+ }
+ }
+
+ poll_entry = poll_table;
+ /* new HTTP connection request ? */
+ if (poll_entry->revents & POLLIN) {
+ new_connection(server_fd, 0);
+ }
+ poll_entry++;
+ /* new RTSP connection request ? */
+ if (poll_entry->revents & POLLIN) {
+ new_connection(rtsp_server_fd, 1);
+ }
+ }
+}
+
+/* start waiting for a new HTTP/RTSP request */
+static void start_wait_request(HTTPContext *c, int is_rtsp)
+{
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = c->buffer + c->buffer_size - 1; /* leave room for '\0' */
+
+ if (is_rtsp) {
+ c->timeout = cur_time + RTSP_REQUEST_TIMEOUT;
+ c->state = RTSPSTATE_WAIT_REQUEST;
+ } else {
+ c->timeout = cur_time + HTTP_REQUEST_TIMEOUT;
+ c->state = HTTPSTATE_WAIT_REQUEST;
+ }
+}
+
+static void new_connection(int server_fd, int is_rtsp)
+{
+ struct sockaddr_in from_addr;
+ int fd, len;
+ HTTPContext *c = NULL;
+
+ len = sizeof(from_addr);
+ fd = accept(server_fd, (struct sockaddr *)&from_addr,
+ &len);
+ if (fd < 0)
+ return;
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+
+ /* XXX: should output a warning page when coming
+ close to the connection limit */
+ if (nb_connections >= nb_max_connections)
+ goto fail;
+
+ /* add a new connection */
+ c = av_mallocz(sizeof(HTTPContext));
+ if (!c)
+ goto fail;
+
+ c->fd = fd;
+ c->poll_entry = NULL;
+ c->from_addr = from_addr;
+ c->buffer_size = IOBUFFER_INIT_SIZE;
+ c->buffer = av_malloc(c->buffer_size);
+ if (!c->buffer)
+ goto fail;
+
+ c->next = first_http_ctx;
+ first_http_ctx = c;
+ nb_connections++;
+
+ start_wait_request(c, is_rtsp);
+
+ return;
+
+ fail:
+ if (c) {
+ av_free(c->buffer);
+ av_free(c);
+ }
+ close(fd);
+}
+
+static void close_connection(HTTPContext *c)
+{
+ HTTPContext **cp, *c1;
+ int i, nb_streams;
+ AVFormatContext *ctx;
+ URLContext *h;
+ AVStream *st;
+
+ /* remove connection from list */
+ cp = &first_http_ctx;
+ while ((*cp) != NULL) {
+ c1 = *cp;
+ if (c1 == c) {
+ *cp = c->next;
+ } else {
+ cp = &c1->next;
+ }
+ }
+
+ /* remove references, if any (XXX: do it faster) */
+ for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
+ if (c1->rtsp_c == c)
+ c1->rtsp_c = NULL;
+ }
+
+ /* remove connection associated resources */
+ if (c->fd >= 0)
+ close(c->fd);
+ if (c->fmt_in) {
+ /* close each frame parser */
+ for(i=0;i<c->fmt_in->nb_streams;i++) {
+ st = c->fmt_in->streams[i];
+ if (st->codec->codec) {
+ avcodec_close(st->codec);
+ }
+ }
+ av_close_input_file(c->fmt_in);
+ }
+
+ /* free RTP output streams if any */
+ nb_streams = 0;
+ if (c->stream)
+ nb_streams = c->stream->nb_streams;
+
+ for(i=0;i<nb_streams;i++) {
+ ctx = c->rtp_ctx[i];
+ if (ctx) {
+ av_write_trailer(ctx);
+ av_free(ctx);
+ }
+ h = c->rtp_handles[i];
+ if (h) {
+ url_close(h);
+ }
+ }
+
+ ctx = &c->fmt_ctx;
+
+ if (!c->last_packet_sent) {
+ if (ctx->oformat) {
+ /* prepare header */
+ if (url_open_dyn_buf(&ctx->pb) >= 0) {
+ av_write_trailer(ctx);
+ url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
+ }
+ }
+ }
+
+ for(i=0; i<ctx->nb_streams; i++)
+ av_free(ctx->streams[i]) ;
+
+ if (c->stream && !c->post && c->stream->stream_type == STREAM_TYPE_LIVE)
+ current_bandwidth -= c->stream->bandwidth;
+ av_freep(&c->pb_buffer);
+ av_freep(&c->packet_buffer);
+ av_free(c->buffer);
+ av_free(c);
+ nb_connections--;
+}
+
+static int handle_connection(HTTPContext *c)
+{
+ int len, ret;
+
+ switch(c->state) {
+ case HTTPSTATE_WAIT_REQUEST:
+ case RTSPSTATE_WAIT_REQUEST:
+ /* timeout ? */
+ if ((c->timeout - cur_time) < 0)
+ return -1;
+ if (c->poll_entry->revents & (POLLERR | POLLHUP))
+ return -1;
+
+ /* no need to read if no events */
+ if (!(c->poll_entry->revents & POLLIN))
+ return 0;
+ /* read the data */
+ read_loop:
+ len = read(c->fd, c->buffer_ptr, 1);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR)
+ return -1;
+ } else if (len == 0) {
+ return -1;
+ } else {
+ /* search for end of request. */
+ uint8_t *ptr;
+ c->buffer_ptr += len;
+ ptr = c->buffer_ptr;
+ if ((ptr >= c->buffer + 2 && !memcmp(ptr-2, "\n\n", 2)) ||
+ (ptr >= c->buffer + 4 && !memcmp(ptr-4, "\r\n\r\n", 4))) {
+ /* request found : parse it and reply */
+ if (c->state == HTTPSTATE_WAIT_REQUEST) {
+ ret = http_parse_request(c);
+ } else {
+ ret = rtsp_parse_request(c);
+ }
+ if (ret < 0)
+ return -1;
+ } else if (ptr >= c->buffer_end) {
+ /* request too long: cannot do anything */
+ return -1;
+ } else goto read_loop;
+ }
+ break;
+
+ case HTTPSTATE_SEND_HEADER:
+ if (c->poll_entry->revents & (POLLERR | POLLHUP))
+ return -1;
+
+ /* no need to write if no events */
+ if (!(c->poll_entry->revents & POLLOUT))
+ return 0;
+ len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ av_freep(&c->pb_buffer);
+ return -1;
+ }
+ } else {
+ c->buffer_ptr += len;
+ if (c->stream)
+ c->stream->bytes_served += len;
+ c->data_count += len;
+ if (c->buffer_ptr >= c->buffer_end) {
+ av_freep(&c->pb_buffer);
+ /* if error, exit */
+ if (c->http_error) {
+ return -1;
+ }
+ /* all the buffer was sent : synchronize to the incoming stream */
+ c->state = HTTPSTATE_SEND_DATA_HEADER;
+ c->buffer_ptr = c->buffer_end = c->buffer;
+ }
+ }
+ break;
+
+ case HTTPSTATE_SEND_DATA:
+ case HTTPSTATE_SEND_DATA_HEADER:
+ case HTTPSTATE_SEND_DATA_TRAILER:
+ /* for packetized output, we consider we can always write (the
+ input streams sets the speed). It may be better to verify
+ that we do not rely too much on the kernel queues */
+ if (!c->is_packetized) {
+ if (c->poll_entry->revents & (POLLERR | POLLHUP))
+ return -1;
+
+ /* no need to read if no events */
+ if (!(c->poll_entry->revents & POLLOUT))
+ return 0;
+ }
+ if (http_send_data(c) < 0)
+ return -1;
+ break;
+ case HTTPSTATE_RECEIVE_DATA:
+ /* no need to read if no events */
+ if (c->poll_entry->revents & (POLLERR | POLLHUP))
+ return -1;
+ if (!(c->poll_entry->revents & POLLIN))
+ return 0;
+ if (http_receive_data(c) < 0)
+ return -1;
+ break;
+ case HTTPSTATE_WAIT_FEED:
+ /* no need to read if no events */
+ if (c->poll_entry->revents & (POLLIN | POLLERR | POLLHUP))
+ return -1;
+
+ /* nothing to do, we'll be waken up by incoming feed packets */
+ break;
+
+ case RTSPSTATE_SEND_REPLY:
+ if (c->poll_entry->revents & (POLLERR | POLLHUP)) {
+ av_freep(&c->pb_buffer);
+ return -1;
+ }
+ /* no need to write if no events */
+ if (!(c->poll_entry->revents & POLLOUT))
+ return 0;
+ len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ av_freep(&c->pb_buffer);
+ return -1;
+ }
+ } else {
+ c->buffer_ptr += len;
+ c->data_count += len;
+ if (c->buffer_ptr >= c->buffer_end) {
+ /* all the buffer was sent : wait for a new request */
+ av_freep(&c->pb_buffer);
+ start_wait_request(c, 1);
+ }
+ }
+ break;
+ case RTSPSTATE_SEND_PACKET:
+ if (c->poll_entry->revents & (POLLERR | POLLHUP)) {
+ av_freep(&c->packet_buffer);
+ return -1;
+ }
+ /* no need to write if no events */
+ if (!(c->poll_entry->revents & POLLOUT))
+ return 0;
+ len = write(c->fd, c->packet_buffer_ptr,
+ c->packet_buffer_end - c->packet_buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ av_freep(&c->packet_buffer);
+ return -1;
+ }
+ } else {
+ c->packet_buffer_ptr += len;
+ if (c->packet_buffer_ptr >= c->packet_buffer_end) {
+ /* all the buffer was sent : wait for a new request */
+ av_freep(&c->packet_buffer);
+ c->state = RTSPSTATE_WAIT_REQUEST;
+ }
+ }
+ break;
+ case HTTPSTATE_READY:
+ /* nothing to do */
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static int extract_rates(char *rates, int ratelen, const char *request)
+{
+ const char *p;
+
+ for (p = request; *p && *p != '\r' && *p != '\n'; ) {
+ if (strncasecmp(p, "Pragma:", 7) == 0) {
+ const char *q = p + 7;
+
+ while (*q && *q != '\n' && isspace(*q))
+ q++;
+
+ if (strncasecmp(q, "stream-switch-entry=", 20) == 0) {
+ int stream_no;
+ int rate_no;
+
+ q += 20;
+
+ memset(rates, 0xff, ratelen);
+
+ while (1) {
+ while (*q && *q != '\n' && *q != ':')
+ q++;
+
+ if (sscanf(q, ":%d:%d", &stream_no, &rate_no) != 2) {
+ break;
+ }
+ stream_no--;
+ if (stream_no < ratelen && stream_no >= 0) {
+ rates[stream_no] = rate_no;
+ }
+
+ while (*q && *q != '\n' && !isspace(*q))
+ q++;
+ }
+
+ return 1;
+ }
+ }
+ p = strchr(p, '\n');
+ if (!p)
+ break;
+
+ p++;
+ }
+
+ return 0;
+}
+
+static int find_stream_in_feed(FFStream *feed, AVCodecContext *codec, int bit_rate)
+{
+ int i;
+ int best_bitrate = 100000000;
+ int best = -1;
+
+ for (i = 0; i < feed->nb_streams; i++) {
+ AVCodecContext *feed_codec = feed->streams[i]->codec;
+
+ if (feed_codec->codec_id != codec->codec_id ||
+ feed_codec->sample_rate != codec->sample_rate ||
+ feed_codec->width != codec->width ||
+ feed_codec->height != codec->height) {
+ continue;
+ }
+
+ /* Potential stream */
+
+ /* We want the fastest stream less than bit_rate, or the slowest
+ * faster than bit_rate
+ */
+
+ if (feed_codec->bit_rate <= bit_rate) {
+ if (best_bitrate > bit_rate || feed_codec->bit_rate > best_bitrate) {
+ best_bitrate = feed_codec->bit_rate;
+ best = i;
+ }
+ } else {
+ if (feed_codec->bit_rate < best_bitrate) {
+ best_bitrate = feed_codec->bit_rate;
+ best = i;
+ }
+ }
+ }
+
+ return best;
+}
+
+static int modify_current_stream(HTTPContext *c, char *rates)
+{
+ int i;
+ FFStream *req = c->stream;
+ int action_required = 0;
+
+ /* Not much we can do for a feed */
+ if (!req->feed)
+ return 0;
+
+ for (i = 0; i < req->nb_streams; i++) {
+ AVCodecContext *codec = req->streams[i]->codec;
+
+ switch(rates[i]) {
+ case 0:
+ c->switch_feed_streams[i] = req->feed_streams[i];
+ break;
+ case 1:
+ c->switch_feed_streams[i] = find_stream_in_feed(req->feed, codec, codec->bit_rate / 2);
+ break;
+ case 2:
+ /* Wants off or slow */
+ c->switch_feed_streams[i] = find_stream_in_feed(req->feed, codec, codec->bit_rate / 4);
+#ifdef WANTS_OFF
+ /* This doesn't work well when it turns off the only stream! */
+ c->switch_feed_streams[i] = -2;
+ c->feed_streams[i] = -2;
+#endif
+ break;
+ }
+
+ if (c->switch_feed_streams[i] >= 0 && c->switch_feed_streams[i] != c->feed_streams[i])
+ action_required = 1;
+ }
+
+ return action_required;
+}
+
+
+static void do_switch_stream(HTTPContext *c, int i)
+{
+ if (c->switch_feed_streams[i] >= 0) {
+#ifdef PHILIP
+ c->feed_streams[i] = c->switch_feed_streams[i];
+#endif
+
+ /* Now update the stream */
+ }
+ c->switch_feed_streams[i] = -1;
+}
+
+/* XXX: factorize in utils.c ? */
+/* XXX: take care with different space meaning */
+static void skip_spaces(const char **pp)
+{
+ const char *p;
+ p = *pp;
+ while (*p == ' ' || *p == '\t')
+ p++;
+ *pp = p;
+}
+
+static void get_word(char *buf, int buf_size, const char **pp)
+{
+ const char *p;
+ char *q;
+
+ p = *pp;
+ skip_spaces(&p);
+ q = buf;
+ while (!isspace(*p) && *p != '\0') {
+ if ((q - buf) < buf_size - 1)
+ *q++ = *p;
+ p++;
+ }
+ if (buf_size > 0)
+ *q = '\0';
+ *pp = p;
+}
+
+static int validate_acl(FFStream *stream, HTTPContext *c)
+{
+ enum IPAddressAction last_action = IP_DENY;
+ IPAddressACL *acl;
+ struct in_addr *src = &c->from_addr.sin_addr;
+ unsigned long src_addr = ntohl(src->s_addr);
+
+ for (acl = stream->acl; acl; acl = acl->next) {
+ if (src_addr >= acl->first.s_addr && src_addr <= acl->last.s_addr) {
+ return (acl->action == IP_ALLOW) ? 1 : 0;
+ }
+ last_action = acl->action;
+ }
+
+ /* Nothing matched, so return not the last action */
+ return (last_action == IP_DENY) ? 1 : 0;
+}
+
+/* compute the real filename of a file by matching it without its
+ extensions to all the stream filenames */
+static void compute_real_filename(char *filename, int max_size)
+{
+ char file1[1024];
+ char file2[1024];
+ char *p;
+ FFStream *stream;
+
+ /* compute filename by matching without the file extensions */
+ pstrcpy(file1, sizeof(file1), filename);
+ p = strrchr(file1, '.');
+ if (p)
+ *p = '\0';
+ for(stream = first_stream; stream != NULL; stream = stream->next) {
+ pstrcpy(file2, sizeof(file2), stream->filename);
+ p = strrchr(file2, '.');
+ if (p)
+ *p = '\0';
+ if (!strcmp(file1, file2)) {
+ pstrcpy(filename, max_size, stream->filename);
+ break;
+ }
+ }
+}
+
+enum RedirType {
+ REDIR_NONE,
+ REDIR_ASX,
+ REDIR_RAM,
+ REDIR_ASF,
+ REDIR_RTSP,
+ REDIR_SDP,
+};
+
+/* parse http request and prepare header */
+static int http_parse_request(HTTPContext *c)
+{
+ char *p;
+ enum RedirType redir_type;
+ char cmd[32];
+ char info[1024], *filename;
+ char url[1024], *q;
+ char protocol[32];
+ char msg[1024];
+ const char *mime_type;
+ FFStream *stream;
+ int i;
+ char ratebuf[32];
+ char *useragent = 0;
+
+ p = c->buffer;
+ get_word(cmd, sizeof(cmd), (const char **)&p);
+ pstrcpy(c->method, sizeof(c->method), cmd);
+
+ if (!strcmp(cmd, "GET"))
+ c->post = 0;
+ else if (!strcmp(cmd, "POST"))
+ c->post = 1;
+ else
+ return -1;
+
+ get_word(url, sizeof(url), (const char **)&p);
+ pstrcpy(c->url, sizeof(c->url), url);
+
+ get_word(protocol, sizeof(protocol), (const char **)&p);
+ if (strcmp(protocol, "HTTP/1.0") && strcmp(protocol, "HTTP/1.1"))
+ return -1;
+
+ pstrcpy(c->protocol, sizeof(c->protocol), protocol);
+
+ if (ffserver_debug)
+ http_log("New connection: %s %s\n", cmd, url);
+
+ /* find the filename and the optional info string in the request */
+ p = url;
+ if (*p == '/')
+ p++;
+ filename = p;
+ p = strchr(p, '?');
+ if (p) {
+ pstrcpy(info, sizeof(info), p);
+ *p = '\0';
+ } else {
+ info[0] = '\0';
+ }
+
+ for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
+ if (strncasecmp(p, "User-Agent:", 11) == 0) {
+ useragent = p + 11;
+ if (*useragent && *useragent != '\n' && isspace(*useragent))
+ useragent++;
+ break;
+ }
+ p = strchr(p, '\n');
+ if (!p)
+ break;
+
+ p++;
+ }
+
+ redir_type = REDIR_NONE;
+ if (match_ext(filename, "asx")) {
+ redir_type = REDIR_ASX;
+ filename[strlen(filename)-1] = 'f';
+ } else if (match_ext(filename, "asf") &&
+ (!useragent || strncasecmp(useragent, "NSPlayer", 8) != 0)) {
+ /* if this isn't WMP or lookalike, return the redirector file */
+ redir_type = REDIR_ASF;
+ } else if (match_ext(filename, "rpm,ram")) {
+ redir_type = REDIR_RAM;
+ strcpy(filename + strlen(filename)-2, "m");
+ } else if (match_ext(filename, "rtsp")) {
+ redir_type = REDIR_RTSP;
+ compute_real_filename(filename, sizeof(url) - 1);
+ } else if (match_ext(filename, "sdp")) {
+ redir_type = REDIR_SDP;
+ compute_real_filename(filename, sizeof(url) - 1);
+ }
+
+ stream = first_stream;
+ while (stream != NULL) {
+ if (!strcmp(stream->filename, filename) && validate_acl(stream, c))
+ break;
+ stream = stream->next;
+ }
+ if (stream == NULL) {
+ snprintf(msg, sizeof(msg), "File '%s' not found", url);
+ goto send_error;
+ }
+
+ c->stream = stream;
+ memcpy(c->feed_streams, stream->feed_streams, sizeof(c->feed_streams));
+ memset(c->switch_feed_streams, -1, sizeof(c->switch_feed_streams));
+
+ if (stream->stream_type == STREAM_TYPE_REDIRECT) {
+ c->http_error = 301;
+ q = c->buffer;
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 301 Moved\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Location: %s\r\n", stream->feed_filename);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: text/html\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<html><head><title>Moved</title></head><body>\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "You should be <a href=\"%s\">redirected</a>.\r\n", stream->feed_filename);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</body></html>\r\n");
+
+ /* prepare output buffer */
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = q;
+ c->state = HTTPSTATE_SEND_HEADER;
+ return 0;
+ }
+
+ /* If this is WMP, get the rate information */
+ if (extract_rates(ratebuf, sizeof(ratebuf), c->buffer)) {
+ if (modify_current_stream(c, ratebuf)) {
+ for (i = 0; i < sizeof(c->feed_streams) / sizeof(c->feed_streams[0]); i++) {
+ if (c->switch_feed_streams[i] >= 0)
+ do_switch_stream(c, i);
+ }
+ }
+ }
+
+ if (c->post == 0 && stream->stream_type == STREAM_TYPE_LIVE) {
+ current_bandwidth += stream->bandwidth;
+ }
+
+ if (c->post == 0 && max_bandwidth < current_bandwidth) {
+ c->http_error = 200;
+ q = c->buffer;
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 Server too busy\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: text/html\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<html><head><title>Too busy</title></head><body>\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<p>The server is too busy to serve your request at this time.</p>\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<p>The bandwidth being served (including your stream) is %dkbit/sec, and this exceeds the limit of %dkbit/sec.</p>\r\n",
+ current_bandwidth, max_bandwidth);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</body></html>\r\n");
+
+ /* prepare output buffer */
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = q;
+ c->state = HTTPSTATE_SEND_HEADER;
+ return 0;
+ }
+
+ if (redir_type != REDIR_NONE) {
+ char *hostinfo = 0;
+
+ for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
+ if (strncasecmp(p, "Host:", 5) == 0) {
+ hostinfo = p + 5;
+ break;
+ }
+ p = strchr(p, '\n');
+ if (!p)
+ break;
+
+ p++;
+ }
+
+ if (hostinfo) {
+ char *eoh;
+ char hostbuf[260];
+
+ while (isspace(*hostinfo))
+ hostinfo++;
+
+ eoh = strchr(hostinfo, '\n');
+ if (eoh) {
+ if (eoh[-1] == '\r')
+ eoh--;
+
+ if (eoh - hostinfo < sizeof(hostbuf) - 1) {
+ memcpy(hostbuf, hostinfo, eoh - hostinfo);
+ hostbuf[eoh - hostinfo] = 0;
+
+ c->http_error = 200;
+ q = c->buffer;
+ switch(redir_type) {
+ case REDIR_ASX:
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 ASX Follows\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: video/x-ms-asf\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<ASX Version=\"3\">\r\n");
+ //q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<!-- Autogenerated by ffserver -->\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<ENTRY><REF HREF=\"http://%s/%s%s\"/></ENTRY>\r\n",
+ hostbuf, filename, info);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</ASX>\r\n");
+ break;
+ case REDIR_RAM:
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 RAM Follows\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: audio/x-pn-realaudio\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "# Autogenerated by ffserver\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "http://%s/%s%s\r\n",
+ hostbuf, filename, info);
+ break;
+ case REDIR_ASF:
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 ASF Redirect follows\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: video/x-ms-asf\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "[Reference]\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Ref1=http://%s/%s%s\r\n",
+ hostbuf, filename, info);
+ break;
+ case REDIR_RTSP:
+ {
+ char hostname[256], *p;
+ /* extract only hostname */
+ pstrcpy(hostname, sizeof(hostname), hostbuf);
+ p = strrchr(hostname, ':');
+ if (p)
+ *p = '\0';
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 RTSP Redirect follows\r\n");
+ /* XXX: incorrect mime type ? */
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: application/x-rtsp\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "rtsp://%s:%d/%s\r\n",
+ hostname, ntohs(my_rtsp_addr.sin_port),
+ filename);
+ }
+ break;
+ case REDIR_SDP:
+ {
+ uint8_t *sdp_data;
+ int sdp_data_size, len;
+ struct sockaddr_in my_addr;
+
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 OK\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: application/sdp\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+
+ len = sizeof(my_addr);
+ getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
+
+ /* XXX: should use a dynamic buffer */
+ sdp_data_size = prepare_sdp_description(stream,
+ &sdp_data,
+ my_addr.sin_addr);
+ if (sdp_data_size > 0) {
+ memcpy(q, sdp_data, sdp_data_size);
+ q += sdp_data_size;
+ *q = '\0';
+ av_free(sdp_data);
+ }
+ }
+ break;
+ default:
+ av_abort();
+ break;
+ }
+
+ /* prepare output buffer */
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = q;
+ c->state = HTTPSTATE_SEND_HEADER;
+ return 0;
+ }
+ }
+ }
+
+ snprintf(msg, sizeof(msg), "ASX/RAM file not handled");
+ goto send_error;
+ }
+
+ stream->conns_served++;
+
+ /* XXX: add there authenticate and IP match */
+
+ if (c->post) {
+ /* if post, it means a feed is being sent */
+ if (!stream->is_feed) {
+ /* However it might be a status report from WMP! Lets log the data
+ * as it might come in handy one day
+ */
+ char *logline = 0;
+ int client_id = 0;
+
+ for (p = c->buffer; *p && *p != '\r' && *p != '\n'; ) {
+ if (strncasecmp(p, "Pragma: log-line=", 17) == 0) {
+ logline = p;
+ break;
+ }
+ if (strncasecmp(p, "Pragma: client-id=", 18) == 0) {
+ client_id = strtol(p + 18, 0, 10);
+ }
+ p = strchr(p, '\n');
+ if (!p)
+ break;
+
+ p++;
+ }
+
+ if (logline) {
+ char *eol = strchr(logline, '\n');
+
+ logline += 17;
+
+ if (eol) {
+ if (eol[-1] == '\r')
+ eol--;
+ http_log("%.*s\n", (int) (eol - logline), logline);
+ c->suppress_log = 1;
+ }
+ }
+
+#ifdef DEBUG_WMP
+ http_log("\nGot request:\n%s\n", c->buffer);
+#endif
+
+ if (client_id && extract_rates(ratebuf, sizeof(ratebuf), c->buffer)) {
+ HTTPContext *wmpc;
+
+ /* Now we have to find the client_id */
+ for (wmpc = first_http_ctx; wmpc; wmpc = wmpc->next) {
+ if (wmpc->wmp_client_id == client_id)
+ break;
+ }
+
+ if (wmpc) {
+ if (modify_current_stream(wmpc, ratebuf)) {
+ wmpc->switch_pending = 1;
+ }
+ }
+ }
+
+ snprintf(msg, sizeof(msg), "POST command not handled");
+ c->stream = 0;
+ goto send_error;
+ }
+ if (http_start_receive_data(c) < 0) {
+ snprintf(msg, sizeof(msg), "could not open feed");
+ goto send_error;
+ }
+ c->http_error = 0;
+ c->state = HTTPSTATE_RECEIVE_DATA;
+ return 0;
+ }
+
+#ifdef DEBUG_WMP
+ if (strcmp(stream->filename + strlen(stream->filename) - 4, ".asf") == 0) {
+ http_log("\nGot request:\n%s\n", c->buffer);
+ }
+#endif
+
+ if (c->stream->stream_type == STREAM_TYPE_STATUS)
+ goto send_stats;
+
+ /* open input stream */
+ if (open_input_stream(c, info) < 0) {
+ snprintf(msg, sizeof(msg), "Input stream corresponding to '%s' not found", url);
+ goto send_error;
+ }
+
+ /* prepare http header */
+ q = c->buffer;
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 OK\r\n");
+ mime_type = c->stream->fmt->mime_type;
+ if (!mime_type)
+ mime_type = "application/x-octet_stream";
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Pragma: no-cache\r\n");
+
+ /* for asf, we need extra headers */
+ if (!strcmp(c->stream->fmt->name,"asf_stream")) {
+ /* Need to allocate a client id */
+
+ c->wmp_client_id = random() & 0x7fffffff;
+
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id);
+ }
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-Type: %s\r\n", mime_type);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+
+ /* prepare output buffer */
+ c->http_error = 0;
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = q;
+ c->state = HTTPSTATE_SEND_HEADER;
+ return 0;
+ send_error:
+ c->http_error = 404;
+ q = c->buffer;
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 404 Not Found\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: %s\r\n", "text/html");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<HTML>\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<HEAD><TITLE>404 Not Found</TITLE></HEAD>\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<BODY>%s</BODY>\n", msg);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</HTML>\n");
+
+ /* prepare output buffer */
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = q;
+ c->state = HTTPSTATE_SEND_HEADER;
+ return 0;
+ send_stats:
+ compute_stats(c);
+ c->http_error = 200; /* horrible : we use this value to avoid
+ going to the send data state */
+ c->state = HTTPSTATE_SEND_HEADER;
+ return 0;
+}
+
+static void fmt_bytecount(ByteIOContext *pb, int64_t count)
+{
+ static const char *suffix = " kMGTP";
+ const char *s;
+
+ for (s = suffix; count >= 100000 && s[1]; count /= 1000, s++) {
+ }
+
+ url_fprintf(pb, "%"PRId64"%c", count, *s);
+}
+
+static void compute_stats(HTTPContext *c)
+{
+ HTTPContext *c1;
+ FFStream *stream;
+ char *p;
+ time_t ti;
+ int i, len;
+ ByteIOContext pb1, *pb = &pb1;
+
+ if (url_open_dyn_buf(pb) < 0) {
+ /* XXX: return an error ? */
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = c->buffer;
+ return;
+ }
+
+ url_fprintf(pb, "HTTP/1.0 200 OK\r\n");
+ url_fprintf(pb, "Content-type: %s\r\n", "text/html");
+ url_fprintf(pb, "Pragma: no-cache\r\n");
+ url_fprintf(pb, "\r\n");
+
+ url_fprintf(pb, "<HEAD><TITLE>FFServer Status</TITLE>\n");
+ if (c->stream->feed_filename) {
+ url_fprintf(pb, "<link rel=\"shortcut icon\" href=\"%s\">\n", c->stream->feed_filename);
+ }
+ url_fprintf(pb, "</HEAD>\n<BODY>");
+ url_fprintf(pb, "<H1>FFServer Status</H1>\n");
+ /* format status */
+ url_fprintf(pb, "<H2>Available Streams</H2>\n");
+ url_fprintf(pb, "<TABLE cellspacing=0 cellpadding=4>\n");
+ url_fprintf(pb, "<TR><Th valign=top>Path<th align=left>Served<br>Conns<Th><br>bytes<Th valign=top>Format<Th>Bit rate<br>kbits/s<Th align=left>Video<br>kbits/s<th><br>Codec<Th align=left>Audio<br>kbits/s<th><br>Codec<Th align=left valign=top>Feed\n");
+ stream = first_stream;
+ while (stream != NULL) {
+ char sfilename[1024];
+ char *eosf;
+
+ if (stream->feed != stream) {
+ pstrcpy(sfilename, sizeof(sfilename) - 10, stream->filename);
+ eosf = sfilename + strlen(sfilename);
+ if (eosf - sfilename >= 4) {
+ if (strcmp(eosf - 4, ".asf") == 0) {
+ strcpy(eosf - 4, ".asx");
+ } else if (strcmp(eosf - 3, ".rm") == 0) {
+ strcpy(eosf - 3, ".ram");
+ } else if (stream->fmt == &rtp_muxer) {
+ /* generate a sample RTSP director if
+ unicast. Generate an SDP redirector if
+ multicast */
+ eosf = strrchr(sfilename, '.');
+ if (!eosf)
+ eosf = sfilename + strlen(sfilename);
+ if (stream->is_multicast)
+ strcpy(eosf, ".sdp");
+ else
+ strcpy(eosf, ".rtsp");
+ }
+ }
+
+ url_fprintf(pb, "<TR><TD><A HREF=\"/%s\">%s</A> ",
+ sfilename, stream->filename);
+ url_fprintf(pb, "<td align=right> %d <td align=right> ",
+ stream->conns_served);
+ fmt_bytecount(pb, stream->bytes_served);
+ switch(stream->stream_type) {
+ case STREAM_TYPE_LIVE:
+ {
+ int audio_bit_rate = 0;
+ int video_bit_rate = 0;
+ const char *audio_codec_name = "";
+ const char *video_codec_name = "";
+ const char *audio_codec_name_extra = "";
+ const char *video_codec_name_extra = "";
+
+ for(i=0;i<stream->nb_streams;i++) {
+ AVStream *st = stream->streams[i];
+ AVCodec *codec = avcodec_find_encoder(st->codec->codec_id);
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ audio_bit_rate += st->codec->bit_rate;
+ if (codec) {
+ if (*audio_codec_name)
+ audio_codec_name_extra = "...";
+ audio_codec_name = codec->name;
+ }
+ break;
+ case CODEC_TYPE_VIDEO:
+ video_bit_rate += st->codec->bit_rate;
+ if (codec) {
+ if (*video_codec_name)
+ video_codec_name_extra = "...";
+ video_codec_name = codec->name;
+ }
+ break;
+ case CODEC_TYPE_DATA:
+ video_bit_rate += st->codec->bit_rate;
+ break;
+ default:
+ av_abort();
+ }
+ }
+ url_fprintf(pb, "<TD align=center> %s <TD align=right> %d <TD align=right> %d <TD> %s %s <TD align=right> %d <TD> %s %s",
+ stream->fmt->name,
+ stream->bandwidth,
+ video_bit_rate / 1000, video_codec_name, video_codec_name_extra,
+ audio_bit_rate / 1000, audio_codec_name, audio_codec_name_extra);
+ if (stream->feed) {
+ url_fprintf(pb, "<TD>%s", stream->feed->filename);
+ } else {
+ url_fprintf(pb, "<TD>%s", stream->feed_filename);
+ }
+ url_fprintf(pb, "\n");
+ }
+ break;
+ default:
+ url_fprintf(pb, "<TD align=center> - <TD align=right> - <TD align=right> - <td><td align=right> - <TD>\n");
+ break;
+ }
+ }
+ stream = stream->next;
+ }
+ url_fprintf(pb, "</TABLE>\n");
+
+ stream = first_stream;
+ while (stream != NULL) {
+ if (stream->feed == stream) {
+ url_fprintf(pb, "<h2>Feed %s</h2>", stream->filename);
+ if (stream->pid) {
+ url_fprintf(pb, "Running as pid %d.\n", stream->pid);
+
+#if defined(linux) && !defined(CONFIG_NOCUTILS)
+ {
+ FILE *pid_stat;
+ char ps_cmd[64];
+
+ /* This is somewhat linux specific I guess */
+ snprintf(ps_cmd, sizeof(ps_cmd),
+ "ps -o \"%%cpu,cputime\" --no-headers %d",
+ stream->pid);
+
+ pid_stat = popen(ps_cmd, "r");
+ if (pid_stat) {
+ char cpuperc[10];
+ char cpuused[64];
+
+ if (fscanf(pid_stat, "%10s %64s", cpuperc,
+ cpuused) == 2) {
+ url_fprintf(pb, "Currently using %s%% of the cpu. Total time used %s.\n",
+ cpuperc, cpuused);
+ }
+ fclose(pid_stat);
+ }
+ }
+#endif
+
+ url_fprintf(pb, "<p>");
+ }
+ url_fprintf(pb, "<table cellspacing=0 cellpadding=4><tr><th>Stream<th>type<th>kbits/s<th align=left>codec<th align=left>Parameters\n");
+
+ for (i = 0; i < stream->nb_streams; i++) {
+ AVStream *st = stream->streams[i];
+ AVCodec *codec = avcodec_find_encoder(st->codec->codec_id);
+ const char *type = "unknown";
+ char parameters[64];
+
+ parameters[0] = 0;
+
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ type = "audio";
+ break;
+ case CODEC_TYPE_VIDEO:
+ type = "video";
+ snprintf(parameters, sizeof(parameters), "%dx%d, q=%d-%d, fps=%d", st->codec->width, st->codec->height,
+ st->codec->qmin, st->codec->qmax, st->codec->time_base.den / st->codec->time_base.num);
+ break;
+ default:
+ av_abort();
+ }
+ url_fprintf(pb, "<tr><td align=right>%d<td>%s<td align=right>%d<td>%s<td>%s\n",
+ i, type, st->codec->bit_rate/1000, codec ? codec->name : "", parameters);
+ }
+ url_fprintf(pb, "</table>\n");
+
+ }
+ stream = stream->next;
+ }
+
+#if 0
+ {
+ float avg;
+ AVCodecContext *enc;
+ char buf[1024];
+
+ /* feed status */
+ stream = first_feed;
+ while (stream != NULL) {
+ url_fprintf(pb, "<H1>Feed '%s'</H1>\n", stream->filename);
+ url_fprintf(pb, "<TABLE>\n");
+ url_fprintf(pb, "<TR><TD>Parameters<TD>Frame count<TD>Size<TD>Avg bitrate (kbits/s)\n");
+ for(i=0;i<stream->nb_streams;i++) {
+ AVStream *st = stream->streams[i];
+ FeedData *fdata = st->priv_data;
+ enc = st->codec;
+
+ avcodec_string(buf, sizeof(buf), enc);
+ avg = fdata->avg_frame_size * (float)enc->rate * 8.0;
+ if (enc->codec->type == CODEC_TYPE_AUDIO && enc->frame_size > 0)
+ avg /= enc->frame_size;
+ url_fprintf(pb, "<TR><TD>%s <TD> %d <TD> %"PRId64" <TD> %0.1f\n",
+ buf, enc->frame_number, fdata->data_count, avg / 1000.0);
+ }
+ url_fprintf(pb, "</TABLE>\n");
+ stream = stream->next_feed;
+ }
+ }
+#endif
+
+ /* connection status */
+ url_fprintf(pb, "<H2>Connection Status</H2>\n");
+
+ url_fprintf(pb, "Number of connections: %d / %d<BR>\n",
+ nb_connections, nb_max_connections);
+
+ url_fprintf(pb, "Bandwidth in use: %dk / %dk<BR>\n",
+ current_bandwidth, max_bandwidth);
+
+ url_fprintf(pb, "<TABLE>\n");
+ url_fprintf(pb, "<TR><th>#<th>File<th>IP<th>Proto<th>State<th>Target bits/sec<th>Actual bits/sec<th>Bytes transferred\n");
+ c1 = first_http_ctx;
+ i = 0;
+ while (c1 != NULL) {
+ int bitrate;
+ int j;
+
+ bitrate = 0;
+ if (c1->stream) {
+ for (j = 0; j < c1->stream->nb_streams; j++) {
+ if (!c1->stream->feed) {
+ bitrate += c1->stream->streams[j]->codec->bit_rate;
+ } else {
+ if (c1->feed_streams[j] >= 0) {
+ bitrate += c1->stream->feed->streams[c1->feed_streams[j]]->codec->bit_rate;
+ }
+ }
+ }
+ }
+
+ i++;
+ p = inet_ntoa(c1->from_addr.sin_addr);
+ url_fprintf(pb, "<TR><TD><B>%d</B><TD>%s%s<TD>%s<TD>%s<TD>%s<td align=right>",
+ i,
+ c1->stream ? c1->stream->filename : "",
+ c1->state == HTTPSTATE_RECEIVE_DATA ? "(input)" : "",
+ p,
+ c1->protocol,
+ http_state[c1->state]);
+ fmt_bytecount(pb, bitrate);
+ url_fprintf(pb, "<td align=right>");
+ fmt_bytecount(pb, compute_datarate(&c1->datarate, c1->data_count) * 8);
+ url_fprintf(pb, "<td align=right>");
+ fmt_bytecount(pb, c1->data_count);
+ url_fprintf(pb, "\n");
+ c1 = c1->next;
+ }
+ url_fprintf(pb, "</TABLE>\n");
+
+ /* date */
+ ti = time(NULL);
+ p = ctime(&ti);
+ url_fprintf(pb, "<HR size=1 noshade>Generated at %s", p);
+ url_fprintf(pb, "</BODY>\n</HTML>\n");
+
+ len = url_close_dyn_buf(pb, &c->pb_buffer);
+ c->buffer_ptr = c->pb_buffer;
+ c->buffer_end = c->pb_buffer + len;
+}
+
+/* check if the parser needs to be opened for stream i */
+static void open_parser(AVFormatContext *s, int i)
+{
+ AVStream *st = s->streams[i];
+ AVCodec *codec;
+
+ if (!st->codec->codec) {
+ codec = avcodec_find_decoder(st->codec->codec_id);
+ if (codec && (codec->capabilities & CODEC_CAP_PARSE_ONLY)) {
+ st->codec->parse_only = 1;
+ if (avcodec_open(st->codec, codec) < 0) {
+ st->codec->parse_only = 0;
+ }
+ }
+ }
+}
+
+static int open_input_stream(HTTPContext *c, const char *info)
+{
+ char buf[128];
+ char input_filename[1024];
+ AVFormatContext *s;
+ int buf_size, i;
+ int64_t stream_pos;
+
+ /* find file name */
+ if (c->stream->feed) {
+ strcpy(input_filename, c->stream->feed->feed_filename);
+ buf_size = FFM_PACKET_SIZE;
+ /* compute position (absolute time) */
+ if (find_info_tag(buf, sizeof(buf), "date", info)) {
+ stream_pos = parse_date(buf, 0);
+ } else if (find_info_tag(buf, sizeof(buf), "buffer", info)) {
+ int prebuffer = strtol(buf, 0, 10);
+ stream_pos = av_gettime() - prebuffer * (int64_t)1000000;
+ } else {
+ stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000;
+ }
+ } else {
+ strcpy(input_filename, c->stream->feed_filename);
+ buf_size = 0;
+ /* compute position (relative time) */
+ if (find_info_tag(buf, sizeof(buf), "date", info)) {
+ stream_pos = parse_date(buf, 1);
+ } else {
+ stream_pos = 0;
+ }
+ }
+ if (input_filename[0] == '\0')
+ return -1;
+
+#if 0
+ { time_t when = stream_pos / 1000000;
+ http_log("Stream pos = %"PRId64", time=%s", stream_pos, ctime(&when));
+ }
+#endif
+
+ /* open stream */
+ if (av_open_input_file(&s, input_filename, c->stream->ifmt,
+ buf_size, c->stream->ap_in) < 0) {
+ http_log("%s not found", input_filename);
+ return -1;
+ }
+ c->fmt_in = s;
+
+ /* open each parser */
+ for(i=0;i<s->nb_streams;i++)
+ open_parser(s, i);
+
+ /* choose stream as clock source (we favorize video stream if
+ present) for packet sending */
+ c->pts_stream_index = 0;
+ for(i=0;i<c->stream->nb_streams;i++) {
+ if (c->pts_stream_index == 0 &&
+ c->stream->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
+ c->pts_stream_index = i;
+ }
+ }
+
+#if 1
+ if (c->fmt_in->iformat->read_seek) {
+ c->fmt_in->iformat->read_seek(c->fmt_in, 0, stream_pos, 0);
+ }
+#endif
+ /* set the start time (needed for maxtime and RTP packet timing) */
+ c->start_time = cur_time;
+ c->first_pts = AV_NOPTS_VALUE;
+ return 0;
+}
+
+/* return the server clock (in us) */
+static int64_t get_server_clock(HTTPContext *c)
+{
+ /* compute current pts value from system time */
+ return (int64_t)(cur_time - c->start_time) * 1000LL;
+}
+
+/* return the estimated time at which the current packet must be sent
+ (in us) */
+static int64_t get_packet_send_clock(HTTPContext *c)
+{
+ int bytes_left, bytes_sent, frame_bytes;
+
+ frame_bytes = c->cur_frame_bytes;
+ if (frame_bytes <= 0) {
+ return c->cur_pts;
+ } else {
+ bytes_left = c->buffer_end - c->buffer_ptr;
+ bytes_sent = frame_bytes - bytes_left;
+ return c->cur_pts + (c->cur_frame_duration * bytes_sent) / frame_bytes;
+ }
+}
+
+
+static int http_prepare_data(HTTPContext *c)
+{
+ int i, len, ret;
+ AVFormatContext *ctx;
+
+ av_freep(&c->pb_buffer);
+ switch(c->state) {
+ case HTTPSTATE_SEND_DATA_HEADER:
+ memset(&c->fmt_ctx, 0, sizeof(c->fmt_ctx));
+ pstrcpy(c->fmt_ctx.author, sizeof(c->fmt_ctx.author),
+ c->stream->author);
+ pstrcpy(c->fmt_ctx.comment, sizeof(c->fmt_ctx.comment),
+ c->stream->comment);
+ pstrcpy(c->fmt_ctx.copyright, sizeof(c->fmt_ctx.copyright),
+ c->stream->copyright);
+ pstrcpy(c->fmt_ctx.title, sizeof(c->fmt_ctx.title),
+ c->stream->title);
+
+ /* open output stream by using specified codecs */
+ c->fmt_ctx.oformat = c->stream->fmt;
+ c->fmt_ctx.nb_streams = c->stream->nb_streams;
+ for(i=0;i<c->fmt_ctx.nb_streams;i++) {
+ AVStream *st;
+ AVStream *src;
+ st = av_mallocz(sizeof(AVStream));
+ st->codec= avcodec_alloc_context();
+ c->fmt_ctx.streams[i] = st;
+ /* if file or feed, then just take streams from FFStream struct */
+ if (!c->stream->feed ||
+ c->stream->feed == c->stream)
+ src = c->stream->streams[i];
+ else
+ src = c->stream->feed->streams[c->stream->feed_streams[i]];
+
+ *st = *src;
+ st->priv_data = 0;
+ st->codec->frame_number = 0; /* XXX: should be done in
+ AVStream, not in codec */
+ /* I'm pretty sure that this is not correct...
+ * However, without it, we crash
+ */
+ st->codec->coded_frame = &dummy_frame;
+ }
+ c->got_key_frame = 0;
+
+ /* prepare header and save header data in a stream */
+ if (url_open_dyn_buf(&c->fmt_ctx.pb) < 0) {
+ /* XXX: potential leak */
+ return -1;
+ }
+ c->fmt_ctx.pb.is_streamed = 1;
+
+ av_set_parameters(&c->fmt_ctx, NULL);
+ av_write_header(&c->fmt_ctx);
+
+ len = url_close_dyn_buf(&c->fmt_ctx.pb, &c->pb_buffer);
+ c->buffer_ptr = c->pb_buffer;
+ c->buffer_end = c->pb_buffer + len;
+
+ c->state = HTTPSTATE_SEND_DATA;
+ c->last_packet_sent = 0;
+ break;
+ case HTTPSTATE_SEND_DATA:
+ /* find a new packet */
+ {
+ AVPacket pkt;
+
+ /* read a packet from the input stream */
+ if (c->stream->feed) {
+ ffm_set_write_index(c->fmt_in,
+ c->stream->feed->feed_write_index,
+ c->stream->feed->feed_size);
+ }
+
+ if (c->stream->max_time &&
+ c->stream->max_time + c->start_time - cur_time < 0) {
+ /* We have timed out */
+ c->state = HTTPSTATE_SEND_DATA_TRAILER;
+ } else {
+ redo:
+ if (av_read_frame(c->fmt_in, &pkt) < 0) {
+ if (c->stream->feed && c->stream->feed->feed_opened) {
+ /* if coming from feed, it means we reached the end of the
+ ffm file, so must wait for more data */
+ c->state = HTTPSTATE_WAIT_FEED;
+ return 1; /* state changed */
+ } else {
+ if (c->stream->loop) {
+ av_close_input_file(c->fmt_in);
+ c->fmt_in = NULL;
+ if (open_input_stream(c, "") < 0)
+ goto no_loop;
+ goto redo;
+ } else {
+ no_loop:
+ /* must send trailer now because eof or error */
+ c->state = HTTPSTATE_SEND_DATA_TRAILER;
+ }
+ }
+ } else {
+ /* update first pts if needed */
+ if (c->first_pts == AV_NOPTS_VALUE) {
+ c->first_pts = av_rescale_q(pkt.dts, c->fmt_in->streams[pkt.stream_index]->time_base, AV_TIME_BASE_Q);
+ c->start_time = cur_time;
+ }
+ /* send it to the appropriate stream */
+ if (c->stream->feed) {
+ /* if coming from a feed, select the right stream */
+ if (c->switch_pending) {
+ c->switch_pending = 0;
+ for(i=0;i<c->stream->nb_streams;i++) {
+ if (c->switch_feed_streams[i] == pkt.stream_index) {
+ if (pkt.flags & PKT_FLAG_KEY) {
+ do_switch_stream(c, i);
+ }
+ }
+ if (c->switch_feed_streams[i] >= 0) {
+ c->switch_pending = 1;
+ }
+ }
+ }
+ for(i=0;i<c->stream->nb_streams;i++) {
+ if (c->feed_streams[i] == pkt.stream_index) {
+ pkt.stream_index = i;
+ if (pkt.flags & PKT_FLAG_KEY) {
+ c->got_key_frame |= 1 << i;
+ }
+ /* See if we have all the key frames, then
+ * we start to send. This logic is not quite
+ * right, but it works for the case of a
+ * single video stream with one or more
+ * audio streams (for which every frame is
+ * typically a key frame).
+ */
+ if (!c->stream->send_on_key ||
+ ((c->got_key_frame + 1) >> c->stream->nb_streams)) {
+ goto send_it;
+ }
+ }
+ }
+ } else {
+ AVCodecContext *codec;
+
+ send_it:
+ /* specific handling for RTP: we use several
+ output stream (one for each RTP
+ connection). XXX: need more abstract handling */
+ if (c->is_packetized) {
+ AVStream *st;
+ /* compute send time and duration */
+ st = c->fmt_in->streams[pkt.stream_index];
+ c->cur_pts = av_rescale_q(pkt.dts, st->time_base, AV_TIME_BASE_Q);
+ if (st->start_time != AV_NOPTS_VALUE)
+ c->cur_pts -= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
+ c->cur_frame_duration = av_rescale_q(pkt.duration, st->time_base, AV_TIME_BASE_Q);
+#if 0
+ printf("index=%d pts=%0.3f duration=%0.6f\n",
+ pkt.stream_index,
+ (double)c->cur_pts /
+ AV_TIME_BASE,
+ (double)c->cur_frame_duration /
+ AV_TIME_BASE);
+#endif
+ /* find RTP context */
+ c->packet_stream_index = pkt.stream_index;
+ ctx = c->rtp_ctx[c->packet_stream_index];
+ if(!ctx) {
+ av_free_packet(&pkt);
+ break;
+ }
+ codec = ctx->streams[0]->codec;
+ /* only one stream per RTP connection */
+ pkt.stream_index = 0;
+ } else {
+ ctx = &c->fmt_ctx;
+ /* Fudge here */
+ codec = ctx->streams[pkt.stream_index]->codec;
+ }
+
+ codec->coded_frame->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
+ if (c->is_packetized) {
+ int max_packet_size;
+ if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP)
+ max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
+ else
+ max_packet_size = url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]);
+ ret = url_open_dyn_packet_buf(&ctx->pb, max_packet_size);
+ } else {
+ ret = url_open_dyn_buf(&ctx->pb);
+ }
+ if (ret < 0) {
+ /* XXX: potential leak */
+ return -1;
+ }
+ if (av_write_frame(ctx, &pkt)) {
+ c->state = HTTPSTATE_SEND_DATA_TRAILER;
+ }
+
+ len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
+ c->cur_frame_bytes = len;
+ c->buffer_ptr = c->pb_buffer;
+ c->buffer_end = c->pb_buffer + len;
+
+ codec->frame_number++;
+ if (len == 0)
+ goto redo;
+ }
+ av_free_packet(&pkt);
+ }
+ }
+ }
+ break;
+ default:
+ case HTTPSTATE_SEND_DATA_TRAILER:
+ /* last packet test ? */
+ if (c->last_packet_sent || c->is_packetized)
+ return -1;
+ ctx = &c->fmt_ctx;
+ /* prepare header */
+ if (url_open_dyn_buf(&ctx->pb) < 0) {
+ /* XXX: potential leak */
+ return -1;
+ }
+ av_write_trailer(ctx);
+ len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
+ c->buffer_ptr = c->pb_buffer;
+ c->buffer_end = c->pb_buffer + len;
+
+ c->last_packet_sent = 1;
+ break;
+ }
+ return 0;
+}
+
+/* in bit/s */
+#define SHORT_TERM_BANDWIDTH 8000000
+
+/* should convert the format at the same time */
+/* send data starting at c->buffer_ptr to the output connection
+ (either UDP or TCP connection) */
+static int http_send_data(HTTPContext *c)
+{
+ int len, ret;
+
+ for(;;) {
+ if (c->buffer_ptr >= c->buffer_end) {
+ ret = http_prepare_data(c);
+ if (ret < 0)
+ return -1;
+ else if (ret != 0) {
+ /* state change requested */
+ break;
+ }
+ } else {
+ if (c->is_packetized) {
+ /* RTP data output */
+ len = c->buffer_end - c->buffer_ptr;
+ if (len < 4) {
+ /* fail safe - should never happen */
+ fail1:
+ c->buffer_ptr = c->buffer_end;
+ return 0;
+ }
+ len = (c->buffer_ptr[0] << 24) |
+ (c->buffer_ptr[1] << 16) |
+ (c->buffer_ptr[2] << 8) |
+ (c->buffer_ptr[3]);
+ if (len > (c->buffer_end - c->buffer_ptr))
+ goto fail1;
+ if ((get_packet_send_clock(c) - get_server_clock(c)) > 0) {
+ /* nothing to send yet: we can wait */
+ return 0;
+ }
+
+ c->data_count += len;
+ update_datarate(&c->datarate, c->data_count);
+ if (c->stream)
+ c->stream->bytes_served += len;
+
+ if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP) {
+ /* RTP packets are sent inside the RTSP TCP connection */
+ ByteIOContext pb1, *pb = &pb1;
+ int interleaved_index, size;
+ uint8_t header[4];
+ HTTPContext *rtsp_c;
+
+ rtsp_c = c->rtsp_c;
+ /* if no RTSP connection left, error */
+ if (!rtsp_c)
+ return -1;
+ /* if already sending something, then wait. */
+ if (rtsp_c->state != RTSPSTATE_WAIT_REQUEST) {
+ break;
+ }
+ if (url_open_dyn_buf(pb) < 0)
+ goto fail1;
+ interleaved_index = c->packet_stream_index * 2;
+ /* RTCP packets are sent at odd indexes */
+ if (c->buffer_ptr[1] == 200)
+ interleaved_index++;
+ /* write RTSP TCP header */
+ header[0] = '$';
+ header[1] = interleaved_index;
+ header[2] = len >> 8;
+ header[3] = len;
+ put_buffer(pb, header, 4);
+ /* write RTP packet data */
+ c->buffer_ptr += 4;
+ put_buffer(pb, c->buffer_ptr, len);
+ size = url_close_dyn_buf(pb, &c->packet_buffer);
+ /* prepare asynchronous TCP sending */
+ rtsp_c->packet_buffer_ptr = c->packet_buffer;
+ rtsp_c->packet_buffer_end = c->packet_buffer + size;
+ c->buffer_ptr += len;
+
+ /* send everything we can NOW */
+ len = write(rtsp_c->fd, rtsp_c->packet_buffer_ptr,
+ rtsp_c->packet_buffer_end - rtsp_c->packet_buffer_ptr);
+ if (len > 0) {
+ rtsp_c->packet_buffer_ptr += len;
+ }
+ if (rtsp_c->packet_buffer_ptr < rtsp_c->packet_buffer_end) {
+ /* if we could not send all the data, we will
+ send it later, so a new state is needed to
+ "lock" the RTSP TCP connection */
+ rtsp_c->state = RTSPSTATE_SEND_PACKET;
+ break;
+ } else {
+ /* all data has been sent */
+ av_freep(&c->packet_buffer);
+ }
+ } else {
+ /* send RTP packet directly in UDP */
+ c->buffer_ptr += 4;
+ url_write(c->rtp_handles[c->packet_stream_index],
+ c->buffer_ptr, len);
+ c->buffer_ptr += len;
+ /* here we continue as we can send several packets per 10 ms slot */
+ }
+ } else {
+ /* TCP data output */
+ len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ return -1;
+ } else {
+ return 0;
+ }
+ } else {
+ c->buffer_ptr += len;
+ }
+ c->data_count += len;
+ update_datarate(&c->datarate, c->data_count);
+ if (c->stream)
+ c->stream->bytes_served += len;
+ break;
+ }
+ }
+ } /* for(;;) */
+ return 0;
+}
+
+static int http_start_receive_data(HTTPContext *c)
+{
+ int fd;
+
+ if (c->stream->feed_opened)
+ return -1;
+
+ /* Don't permit writing to this one */
+ if (c->stream->readonly)
+ return -1;
+
+ /* open feed */
+ fd = open(c->stream->feed_filename, O_RDWR);
+ if (fd < 0)
+ return -1;
+ c->feed_fd = fd;
+
+ c->stream->feed_write_index = ffm_read_write_index(fd);
+ c->stream->feed_size = lseek(fd, 0, SEEK_END);
+ lseek(fd, 0, SEEK_SET);
+
+ /* init buffer input */
+ c->buffer_ptr = c->buffer;
+ c->buffer_end = c->buffer + FFM_PACKET_SIZE;
+ c->stream->feed_opened = 1;
+ return 0;
+}
+
+static int http_receive_data(HTTPContext *c)
+{
+ HTTPContext *c1;
+
+ if (c->buffer_end > c->buffer_ptr) {
+ int len;
+
+ len = read(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ goto fail;
+ }
+ } else if (len == 0) {
+ /* end of connection : close it */
+ goto fail;
+ } else {
+ c->buffer_ptr += len;
+ c->data_count += len;
+ update_datarate(&c->datarate, c->data_count);
+ }
+ }
+
+ if (c->buffer_ptr - c->buffer >= 2 && c->data_count > FFM_PACKET_SIZE) {
+ if (c->buffer[0] != 'f' ||
+ c->buffer[1] != 'm') {
+ http_log("Feed stream has become desynchronized -- disconnecting\n");
+ goto fail;
+ }
+ }
+
+ if (c->buffer_ptr >= c->buffer_end) {
+ FFStream *feed = c->stream;
+ /* a packet has been received : write it in the store, except
+ if header */
+ if (c->data_count > FFM_PACKET_SIZE) {
+
+ // printf("writing pos=0x%"PRIx64" size=0x%"PRIx64"\n", feed->feed_write_index, feed->feed_size);
+ /* XXX: use llseek or url_seek */
+ lseek(c->feed_fd, feed->feed_write_index, SEEK_SET);
+ write(c->feed_fd, c->buffer, FFM_PACKET_SIZE);
+
+ feed->feed_write_index += FFM_PACKET_SIZE;
+ /* update file size */
+ if (feed->feed_write_index > c->stream->feed_size)
+ feed->feed_size = feed->feed_write_index;
+
+ /* handle wrap around if max file size reached */
+ if (c->stream->feed_max_size && feed->feed_write_index >= c->stream->feed_max_size)
+ feed->feed_write_index = FFM_PACKET_SIZE;
+
+ /* write index */
+ ffm_write_write_index(c->feed_fd, feed->feed_write_index);
+
+ /* wake up any waiting connections */
+ for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
+ if (c1->state == HTTPSTATE_WAIT_FEED &&
+ c1->stream->feed == c->stream->feed) {
+ c1->state = HTTPSTATE_SEND_DATA;
+ }
+ }
+ } else {
+ /* We have a header in our hands that contains useful data */
+ AVFormatContext s;
+ AVInputFormat *fmt_in;
+ ByteIOContext *pb = &s.pb;
+ int i;
+
+ memset(&s, 0, sizeof(s));
+
+ url_open_buf(pb, c->buffer, c->buffer_end - c->buffer, URL_RDONLY);
+ pb->buf_end = c->buffer_end; /* ?? */
+ pb->is_streamed = 1;
+
+ /* use feed output format name to find corresponding input format */
+ fmt_in = av_find_input_format(feed->fmt->name);
+ if (!fmt_in)
+ goto fail;
+
+ if (fmt_in->priv_data_size > 0) {
+ s.priv_data = av_mallocz(fmt_in->priv_data_size);
+ if (!s.priv_data)
+ goto fail;
+ } else
+ s.priv_data = NULL;
+
+ if (fmt_in->read_header(&s, 0) < 0) {
+ av_freep(&s.priv_data);
+ goto fail;
+ }
+
+ /* Now we have the actual streams */
+ if (s.nb_streams != feed->nb_streams) {
+ av_freep(&s.priv_data);
+ goto fail;
+ }
+ for (i = 0; i < s.nb_streams; i++) {
+ memcpy(feed->streams[i]->codec,
+ s.streams[i]->codec, sizeof(AVCodecContext));
+ }
+ av_freep(&s.priv_data);
+ }
+ c->buffer_ptr = c->buffer;
+ }
+
+ return 0;
+ fail:
+ c->stream->feed_opened = 0;
+ close(c->feed_fd);
+ return -1;
+}
+
+/********************************************************************/
+/* RTSP handling */
+
+static void rtsp_reply_header(HTTPContext *c, enum RTSPStatusCode error_number)
+{
+ const char *str;
+ time_t ti;
+ char *p;
+ char buf2[32];
+
+ switch(error_number) {
+#define DEF(n, c, s) case c: str = s; break;
+#include "rtspcodes.h"
+#undef DEF
+ default:
+ str = "Unknown Error";
+ break;
+ }
+
+ url_fprintf(c->pb, "RTSP/1.0 %d %s\r\n", error_number, str);
+ url_fprintf(c->pb, "CSeq: %d\r\n", c->seq);
+
+ /* output GMT time */
+ ti = time(NULL);
+ p = ctime(&ti);
+ strcpy(buf2, p);
+ p = buf2 + strlen(p) - 1;
+ if (*p == '\n')
+ *p = '\0';
+ url_fprintf(c->pb, "Date: %s GMT\r\n", buf2);
+}
+
+static void rtsp_reply_error(HTTPContext *c, enum RTSPStatusCode error_number)
+{
+ rtsp_reply_header(c, error_number);
+ url_fprintf(c->pb, "\r\n");
+}
+
+static int rtsp_parse_request(HTTPContext *c)
+{
+ const char *p, *p1, *p2;
+ char cmd[32];
+ char url[1024];
+ char protocol[32];
+ char line[1024];
+ ByteIOContext pb1;
+ int len;
+ RTSPHeader header1, *header = &header1;
+
+ c->buffer_ptr[0] = '\0';
+ p = c->buffer;
+
+ get_word(cmd, sizeof(cmd), &p);
+ get_word(url, sizeof(url), &p);
+ get_word(protocol, sizeof(protocol), &p);
+
+ pstrcpy(c->method, sizeof(c->method), cmd);
+ pstrcpy(c->url, sizeof(c->url), url);
+ pstrcpy(c->protocol, sizeof(c->protocol), protocol);
+
+ c->pb = &pb1;
+ if (url_open_dyn_buf(c->pb) < 0) {
+ /* XXX: cannot do more */
+ c->pb = NULL; /* safety */
+ return -1;
+ }
+
+ /* check version name */
+ if (strcmp(protocol, "RTSP/1.0") != 0) {
+ rtsp_reply_error(c, RTSP_STATUS_VERSION);
+ goto the_end;
+ }
+
+ /* parse each header line */
+ memset(header, 0, sizeof(RTSPHeader));
+ /* skip to next line */
+ while (*p != '\n' && *p != '\0')
+ p++;
+ if (*p == '\n')
+ p++;
+ while (*p != '\0') {
+ p1 = strchr(p, '\n');
+ if (!p1)
+ break;
+ p2 = p1;
+ if (p2 > p && p2[-1] == '\r')
+ p2--;
+ /* skip empty line */
+ if (p2 == p)
+ break;
+ len = p2 - p;
+ if (len > sizeof(line) - 1)
+ len = sizeof(line) - 1;
+ memcpy(line, p, len);
+ line[len] = '\0';
+ rtsp_parse_line(header, line);
+ p = p1 + 1;
+ }
+
+ /* handle sequence number */
+ c->seq = header->seq;
+
+ if (!strcmp(cmd, "DESCRIBE")) {
+ rtsp_cmd_describe(c, url);
+ } else if (!strcmp(cmd, "OPTIONS")) {
+ rtsp_cmd_options(c, url);
+ } else if (!strcmp(cmd, "SETUP")) {
+ rtsp_cmd_setup(c, url, header);
+ } else if (!strcmp(cmd, "PLAY")) {
+ rtsp_cmd_play(c, url, header);
+ } else if (!strcmp(cmd, "PAUSE")) {
+ rtsp_cmd_pause(c, url, header);
+ } else if (!strcmp(cmd, "TEARDOWN")) {
+ rtsp_cmd_teardown(c, url, header);
+ } else {
+ rtsp_reply_error(c, RTSP_STATUS_METHOD);
+ }
+ the_end:
+ len = url_close_dyn_buf(c->pb, &c->pb_buffer);
+ c->pb = NULL; /* safety */
+ if (len < 0) {
+ /* XXX: cannot do more */
+ return -1;
+ }
+ c->buffer_ptr = c->pb_buffer;
+ c->buffer_end = c->pb_buffer + len;
+ c->state = RTSPSTATE_SEND_REPLY;
+ return 0;
+}
+
+/* XXX: move that to rtsp.c, but would need to replace FFStream by
+ AVFormatContext */
+static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
+ struct in_addr my_ip)
+{
+ ByteIOContext pb1, *pb = &pb1;
+ int i, payload_type, port, private_payload_type, j;
+ const char *ipstr, *title, *mediatype;
+ AVStream *st;
+
+ if (url_open_dyn_buf(pb) < 0)
+ return -1;
+
+ /* general media info */
+
+ url_fprintf(pb, "v=0\n");
+ ipstr = inet_ntoa(my_ip);
+ url_fprintf(pb, "o=- 0 0 IN IP4 %s\n", ipstr);
+ title = stream->title;
+ if (title[0] == '\0')
+ title = "No Title";
+ url_fprintf(pb, "s=%s\n", title);
+ if (stream->comment[0] != '\0')
+ url_fprintf(pb, "i=%s\n", stream->comment);
+ if (stream->is_multicast) {
+ url_fprintf(pb, "c=IN IP4 %s\n", inet_ntoa(stream->multicast_ip));
+ }
+ /* for each stream, we output the necessary info */
+ private_payload_type = RTP_PT_PRIVATE;
+ for(i = 0; i < stream->nb_streams; i++) {
+ st = stream->streams[i];
+ if (st->codec->codec_id == CODEC_ID_MPEG2TS) {
+ mediatype = "video";
+ } else {
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ mediatype = "audio";
+ break;
+ case CODEC_TYPE_VIDEO:
+ mediatype = "video";
+ break;
+ default:
+ mediatype = "application";
+ break;
+ }
+ }
+ /* NOTE: the port indication is not correct in case of
+ unicast. It is not an issue because RTSP gives it */
+ payload_type = rtp_get_payload_type(st->codec);
+ if (payload_type < 0)
+ payload_type = private_payload_type++;
+ if (stream->is_multicast) {
+ port = stream->multicast_port + 2 * i;
+ } else {
+ port = 0;
+ }
+ url_fprintf(pb, "m=%s %d RTP/AVP %d\n",
+ mediatype, port, payload_type);
+ if (payload_type >= RTP_PT_PRIVATE) {
+ /* for private payload type, we need to give more info */
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MPEG4:
+ {
+ uint8_t *data;
+ url_fprintf(pb, "a=rtpmap:%d MP4V-ES/%d\n",
+ payload_type, 90000);
+ /* we must also add the mpeg4 header */
+ data = st->codec->extradata;
+ if (data) {
+ url_fprintf(pb, "a=fmtp:%d config=", payload_type);
+ for(j=0;j<st->codec->extradata_size;j++) {
+ url_fprintf(pb, "%02x", data[j]);
+ }
+ url_fprintf(pb, "\n");
+ }
+ }
+ break;
+ default:
+ /* XXX: add other codecs ? */
+ goto fail;
+ }
+ }
+ url_fprintf(pb, "a=control:streamid=%d\n", i);
+ }
+ return url_close_dyn_buf(pb, pbuffer);
+ fail:
+ url_close_dyn_buf(pb, pbuffer);
+ av_free(*pbuffer);
+ return -1;
+}
+
+static void rtsp_cmd_options(HTTPContext *c, const char *url)
+{
+// rtsp_reply_header(c, RTSP_STATUS_OK);
+ url_fprintf(c->pb, "RTSP/1.0 %d %s\r\n", RTSP_STATUS_OK, "OK");
+ url_fprintf(c->pb, "CSeq: %d\r\n", c->seq);
+ url_fprintf(c->pb, "Public: %s\r\n", "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE");
+ url_fprintf(c->pb, "\r\n");
+}
+
+static void rtsp_cmd_describe(HTTPContext *c, const char *url)
+{
+ FFStream *stream;
+ char path1[1024];
+ const char *path;
+ uint8_t *content;
+ int content_length, len;
+ struct sockaddr_in my_addr;
+
+ /* find which url is asked */
+ url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
+ path = path1;
+ if (*path == '/')
+ path++;
+
+ for(stream = first_stream; stream != NULL; stream = stream->next) {
+ if (!stream->is_feed && stream->fmt == &rtp_muxer &&
+ !strcmp(path, stream->filename)) {
+ goto found;
+ }
+ }
+ /* no stream found */
+ rtsp_reply_error(c, RTSP_STATUS_SERVICE); /* XXX: right error ? */
+ return;
+
+ found:
+ /* prepare the media description in sdp format */
+
+ /* get the host IP */
+ len = sizeof(my_addr);
+ getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
+ content_length = prepare_sdp_description(stream, &content, my_addr.sin_addr);
+ if (content_length < 0) {
+ rtsp_reply_error(c, RTSP_STATUS_INTERNAL);
+ return;
+ }
+ rtsp_reply_header(c, RTSP_STATUS_OK);
+ url_fprintf(c->pb, "Content-Type: application/sdp\r\n");
+ url_fprintf(c->pb, "Content-Length: %d\r\n", content_length);
+ url_fprintf(c->pb, "\r\n");
+ put_buffer(c->pb, content, content_length);
+}
+
+static HTTPContext *find_rtp_session(const char *session_id)
+{
+ HTTPContext *c;
+
+ if (session_id[0] == '\0')
+ return NULL;
+
+ for(c = first_http_ctx; c != NULL; c = c->next) {
+ if (!strcmp(c->session_id, session_id))
+ return c;
+ }
+ return NULL;
+}
+
+static RTSPTransportField *find_transport(RTSPHeader *h, enum RTSPProtocol protocol)
+{
+ RTSPTransportField *th;
+ int i;
+
+ for(i=0;i<h->nb_transports;i++) {
+ th = &h->transports[i];
+ if (th->protocol == protocol)
+ return th;
+ }
+ return NULL;
+}
+
+static void rtsp_cmd_setup(HTTPContext *c, const char *url,
+ RTSPHeader *h)
+{
+ FFStream *stream;
+ int stream_index, port;
+ char buf[1024];
+ char path1[1024];
+ const char *path;
+ HTTPContext *rtp_c;
+ RTSPTransportField *th;
+ struct sockaddr_in dest_addr;
+ RTSPActionServerSetup setup;
+
+ /* find which url is asked */
+ url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
+ path = path1;
+ if (*path == '/')
+ path++;
+
+ /* now check each stream */
+ for(stream = first_stream; stream != NULL; stream = stream->next) {
+ if (!stream->is_feed && stream->fmt == &rtp_muxer) {
+ /* accept aggregate filenames only if single stream */
+ if (!strcmp(path, stream->filename)) {
+ if (stream->nb_streams != 1) {
+ rtsp_reply_error(c, RTSP_STATUS_AGGREGATE);
+ return;
+ }
+ stream_index = 0;
+ goto found;
+ }
+
+ for(stream_index = 0; stream_index < stream->nb_streams;
+ stream_index++) {
+ snprintf(buf, sizeof(buf), "%s/streamid=%d",
+ stream->filename, stream_index);
+ if (!strcmp(path, buf))
+ goto found;
+ }
+ }
+ }
+ /* no stream found */
+ rtsp_reply_error(c, RTSP_STATUS_SERVICE); /* XXX: right error ? */
+ return;
+ found:
+
+ /* generate session id if needed */
+ if (h->session_id[0] == '\0') {
+ snprintf(h->session_id, sizeof(h->session_id),
+ "%08x%08x", (int)random(), (int)random());
+ }
+
+ /* find rtp session, and create it if none found */
+ rtp_c = find_rtp_session(h->session_id);
+ if (!rtp_c) {
+ /* always prefer UDP */
+ th = find_transport(h, RTSP_PROTOCOL_RTP_UDP);
+ if (!th) {
+ th = find_transport(h, RTSP_PROTOCOL_RTP_TCP);
+ if (!th) {
+ rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
+ return;
+ }
+ }
+
+ rtp_c = rtp_new_connection(&c->from_addr, stream, h->session_id,
+ th->protocol);
+ if (!rtp_c) {
+ rtsp_reply_error(c, RTSP_STATUS_BANDWIDTH);
+ return;
+ }
+
+ /* open input stream */
+ if (open_input_stream(rtp_c, "") < 0) {
+ rtsp_reply_error(c, RTSP_STATUS_INTERNAL);
+ return;
+ }
+ }
+
+ /* test if stream is OK (test needed because several SETUP needs
+ to be done for a given file) */
+ if (rtp_c->stream != stream) {
+ rtsp_reply_error(c, RTSP_STATUS_SERVICE);
+ return;
+ }
+
+ /* test if stream is already set up */
+ if (rtp_c->rtp_ctx[stream_index]) {
+ rtsp_reply_error(c, RTSP_STATUS_STATE);
+ return;
+ }
+
+ /* check transport */
+ th = find_transport(h, rtp_c->rtp_protocol);
+ if (!th || (th->protocol == RTSP_PROTOCOL_RTP_UDP &&
+ th->client_port_min <= 0)) {
+ rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
+ return;
+ }
+
+ /* setup default options */
+ setup.transport_option[0] = '\0';
+ dest_addr = rtp_c->from_addr;
+ dest_addr.sin_port = htons(th->client_port_min);
+
+ /* add transport option if needed */
+ if (ff_rtsp_callback) {
+ setup.ipaddr = ntohl(dest_addr.sin_addr.s_addr);
+ if (ff_rtsp_callback(RTSP_ACTION_SERVER_SETUP, rtp_c->session_id,
+ (char *)&setup, sizeof(setup),
+ stream->rtsp_option) < 0) {
+ rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
+ return;
+ }
+ dest_addr.sin_addr.s_addr = htonl(setup.ipaddr);
+ }
+
+ /* setup stream */
+ if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, c) < 0) {
+ rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
+ return;
+ }
+
+ /* now everything is OK, so we can send the connection parameters */
+ rtsp_reply_header(c, RTSP_STATUS_OK);
+ /* session ID */
+ url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id);
+
+ switch(rtp_c->rtp_protocol) {
+ case RTSP_PROTOCOL_RTP_UDP:
+ port = rtp_get_local_port(rtp_c->rtp_handles[stream_index]);
+ url_fprintf(c->pb, "Transport: RTP/AVP/UDP;unicast;"
+ "client_port=%d-%d;server_port=%d-%d",
+ th->client_port_min, th->client_port_min + 1,
+ port, port + 1);
+ break;
+ case RTSP_PROTOCOL_RTP_TCP:
+ url_fprintf(c->pb, "Transport: RTP/AVP/TCP;interleaved=%d-%d",
+ stream_index * 2, stream_index * 2 + 1);
+ break;
+ default:
+ break;
+ }
+ if (setup.transport_option[0] != '\0') {
+ url_fprintf(c->pb, ";%s", setup.transport_option);
+ }
+ url_fprintf(c->pb, "\r\n");
+
+
+ url_fprintf(c->pb, "\r\n");
+}
+
+
+/* find an rtp connection by using the session ID. Check consistency
+ with filename */
+static HTTPContext *find_rtp_session_with_url(const char *url,
+ const char *session_id)
+{
+ HTTPContext *rtp_c;
+ char path1[1024];
+ const char *path;
+ char buf[1024];
+ int s;
+
+ rtp_c = find_rtp_session(session_id);
+ if (!rtp_c)
+ return NULL;
+
+ /* find which url is asked */
+ url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
+ path = path1;
+ if (*path == '/')
+ path++;
+ if(!strcmp(path, rtp_c->stream->filename)) return rtp_c;
+ for(s=0; s<rtp_c->stream->nb_streams; ++s) {
+ snprintf(buf, sizeof(buf), "%s/streamid=%d",
+ rtp_c->stream->filename, s);
+ if(!strncmp(path, buf, sizeof(buf))) {
+ // XXX: Should we reply with RTSP_STATUS_ONLY_AGGREGATE if nb_streams>1?
+ return rtp_c;
+ }
+ }
+ return NULL;
+}
+
+static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPHeader *h)
+{
+ HTTPContext *rtp_c;
+
+ rtp_c = find_rtp_session_with_url(url, h->session_id);
+ if (!rtp_c) {
+ rtsp_reply_error(c, RTSP_STATUS_SESSION);
+ return;
+ }
+
+ if (rtp_c->state != HTTPSTATE_SEND_DATA &&
+ rtp_c->state != HTTPSTATE_WAIT_FEED &&
+ rtp_c->state != HTTPSTATE_READY) {
+ rtsp_reply_error(c, RTSP_STATUS_STATE);
+ return;
+ }
+
+#if 0
+ /* XXX: seek in stream */
+ if (h->range_start != AV_NOPTS_VALUE) {
+ printf("range_start=%0.3f\n", (double)h->range_start / AV_TIME_BASE);
+ av_seek_frame(rtp_c->fmt_in, -1, h->range_start);
+ }
+#endif
+
+ rtp_c->state = HTTPSTATE_SEND_DATA;
+
+ /* now everything is OK, so we can send the connection parameters */
+ rtsp_reply_header(c, RTSP_STATUS_OK);
+ /* session ID */
+ url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id);
+ url_fprintf(c->pb, "\r\n");
+}
+
+static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPHeader *h)
+{
+ HTTPContext *rtp_c;
+
+ rtp_c = find_rtp_session_with_url(url, h->session_id);
+ if (!rtp_c) {
+ rtsp_reply_error(c, RTSP_STATUS_SESSION);
+ return;
+ }
+
+ if (rtp_c->state != HTTPSTATE_SEND_DATA &&
+ rtp_c->state != HTTPSTATE_WAIT_FEED) {
+ rtsp_reply_error(c, RTSP_STATUS_STATE);
+ return;
+ }
+
+ rtp_c->state = HTTPSTATE_READY;
+ rtp_c->first_pts = AV_NOPTS_VALUE;
+ /* now everything is OK, so we can send the connection parameters */
+ rtsp_reply_header(c, RTSP_STATUS_OK);
+ /* session ID */
+ url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id);
+ url_fprintf(c->pb, "\r\n");
+}
+
+static void rtsp_cmd_teardown(HTTPContext *c, const char *url, RTSPHeader *h)
+{
+ HTTPContext *rtp_c;
+
+ rtp_c = find_rtp_session_with_url(url, h->session_id);
+ if (!rtp_c) {
+ rtsp_reply_error(c, RTSP_STATUS_SESSION);
+ return;
+ }
+
+ /* abort the session */
+ close_connection(rtp_c);
+
+ if (ff_rtsp_callback) {
+ ff_rtsp_callback(RTSP_ACTION_SERVER_TEARDOWN, rtp_c->session_id,
+ NULL, 0,
+ rtp_c->stream->rtsp_option);
+ }
+
+ /* now everything is OK, so we can send the connection parameters */
+ rtsp_reply_header(c, RTSP_STATUS_OK);
+ /* session ID */
+ url_fprintf(c->pb, "Session: %s\r\n", rtp_c->session_id);
+ url_fprintf(c->pb, "\r\n");
+}
+
+
+/********************************************************************/
+/* RTP handling */
+
+static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr,
+ FFStream *stream, const char *session_id,
+ enum RTSPProtocol rtp_protocol)
+{
+ HTTPContext *c = NULL;
+ const char *proto_str;
+
+ /* XXX: should output a warning page when coming
+ close to the connection limit */
+ if (nb_connections >= nb_max_connections)
+ goto fail;
+
+ /* add a new connection */
+ c = av_mallocz(sizeof(HTTPContext));
+ if (!c)
+ goto fail;
+
+ c->fd = -1;
+ c->poll_entry = NULL;
+ c->from_addr = *from_addr;
+ c->buffer_size = IOBUFFER_INIT_SIZE;
+ c->buffer = av_malloc(c->buffer_size);
+ if (!c->buffer)
+ goto fail;
+ nb_connections++;
+ c->stream = stream;
+ pstrcpy(c->session_id, sizeof(c->session_id), session_id);
+ c->state = HTTPSTATE_READY;
+ c->is_packetized = 1;
+ c->rtp_protocol = rtp_protocol;
+
+ /* protocol is shown in statistics */
+ switch(c->rtp_protocol) {
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ proto_str = "MCAST";
+ break;
+ case RTSP_PROTOCOL_RTP_UDP:
+ proto_str = "UDP";
+ break;
+ case RTSP_PROTOCOL_RTP_TCP:
+ proto_str = "TCP";
+ break;
+ default:
+ proto_str = "???";
+ break;
+ }
+ pstrcpy(c->protocol, sizeof(c->protocol), "RTP/");
+ pstrcat(c->protocol, sizeof(c->protocol), proto_str);
+
+ current_bandwidth += stream->bandwidth;
+
+ c->next = first_http_ctx;
+ first_http_ctx = c;
+ return c;
+
+ fail:
+ if (c) {
+ av_free(c->buffer);
+ av_free(c);
+ }
+ return NULL;
+}
+
+/* add a new RTP stream in an RTP connection (used in RTSP SETUP
+ command). If RTP/TCP protocol is used, TCP connection 'rtsp_c' is
+ used. */
+static int rtp_new_av_stream(HTTPContext *c,
+ int stream_index, struct sockaddr_in *dest_addr,
+ HTTPContext *rtsp_c)
+{
+ AVFormatContext *ctx;
+ AVStream *st;
+ char *ipaddr;
+ URLContext *h;
+ uint8_t *dummy_buf;
+ char buf2[32];
+ int max_packet_size;
+
+ /* now we can open the relevant output stream */
+ ctx = av_alloc_format_context();
+ if (!ctx)
+ return -1;
+ ctx->oformat = &rtp_muxer;
+
+ st = av_mallocz(sizeof(AVStream));
+ if (!st)
+ goto fail;
+ st->codec= avcodec_alloc_context();
+ ctx->nb_streams = 1;
+ ctx->streams[0] = st;
+
+ if (!c->stream->feed ||
+ c->stream->feed == c->stream) {
+ memcpy(st, c->stream->streams[stream_index], sizeof(AVStream));
+ } else {
+ memcpy(st,
+ c->stream->feed->streams[c->stream->feed_streams[stream_index]],
+ sizeof(AVStream));
+ }
+
+ /* build destination RTP address */
+ ipaddr = inet_ntoa(dest_addr->sin_addr);
+
+ switch(c->rtp_protocol) {
+ case RTSP_PROTOCOL_RTP_UDP:
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ /* RTP/UDP case */
+
+ /* XXX: also pass as parameter to function ? */
+ if (c->stream->is_multicast) {
+ int ttl;
+ ttl = c->stream->multicast_ttl;
+ if (!ttl)
+ ttl = 16;
+ snprintf(ctx->filename, sizeof(ctx->filename),
+ "rtp://%s:%d?multicast=1&ttl=%d",
+ ipaddr, ntohs(dest_addr->sin_port), ttl);
+ } else {
+ snprintf(ctx->filename, sizeof(ctx->filename),
+ "rtp://%s:%d", ipaddr, ntohs(dest_addr->sin_port));
+ }
+
+ if (url_open(&h, ctx->filename, URL_WRONLY) < 0)
+ goto fail;
+ c->rtp_handles[stream_index] = h;
+ max_packet_size = url_get_max_packet_size(h);
+ break;
+ case RTSP_PROTOCOL_RTP_TCP:
+ /* RTP/TCP case */
+ c->rtsp_c = rtsp_c;
+ max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
+ break;
+ default:
+ goto fail;
+ }
+
+ http_log("%s:%d - - [%s] \"PLAY %s/streamid=%d %s\"\n",
+ ipaddr, ntohs(dest_addr->sin_port),
+ ctime1(buf2),
+ c->stream->filename, stream_index, c->protocol);
+
+ /* normally, no packets should be output here, but the packet size may be checked */
+ if (url_open_dyn_packet_buf(&ctx->pb, max_packet_size) < 0) {
+ /* XXX: close stream */
+ goto fail;
+ }
+ av_set_parameters(ctx, NULL);
+ if (av_write_header(ctx) < 0) {
+ fail:
+ if (h)
+ url_close(h);
+ av_free(ctx);
+ return -1;
+ }
+ url_close_dyn_buf(&ctx->pb, &dummy_buf);
+ av_free(dummy_buf);
+
+ c->rtp_ctx[stream_index] = ctx;
+ return 0;
+}
+
+/********************************************************************/
+/* ffserver initialization */
+
+static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec)
+{
+ AVStream *fst;
+
+ fst = av_mallocz(sizeof(AVStream));
+ if (!fst)
+ return NULL;
+ fst->codec= avcodec_alloc_context();
+ fst->priv_data = av_mallocz(sizeof(FeedData));
+ memcpy(fst->codec, codec, sizeof(AVCodecContext));
+ fst->codec->coded_frame = &dummy_frame;
+ fst->index = stream->nb_streams;
+ av_set_pts_info(fst, 33, 1, 90000);
+ stream->streams[stream->nb_streams++] = fst;
+ return fst;
+}
+
+/* return the stream number in the feed */
+static int add_av_stream(FFStream *feed, AVStream *st)
+{
+ AVStream *fst;
+ AVCodecContext *av, *av1;
+ int i;
+
+ av = st->codec;
+ for(i=0;i<feed->nb_streams;i++) {
+ st = feed->streams[i];
+ av1 = st->codec;
+ if (av1->codec_id == av->codec_id &&
+ av1->codec_type == av->codec_type &&
+ av1->bit_rate == av->bit_rate) {
+
+ switch(av->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (av1->channels == av->channels &&
+ av1->sample_rate == av->sample_rate)
+ goto found;
+ break;
+ case CODEC_TYPE_VIDEO:
+ if (av1->width == av->width &&
+ av1->height == av->height &&
+ av1->time_base.den == av->time_base.den &&
+ av1->time_base.num == av->time_base.num &&
+ av1->gop_size == av->gop_size)
+ goto found;
+ break;
+ default:
+ av_abort();
+ }
+ }
+ }
+
+ fst = add_av_stream1(feed, av);
+ if (!fst)
+ return -1;
+ return feed->nb_streams - 1;
+ found:
+ return i;
+}
+
+static void remove_stream(FFStream *stream)
+{
+ FFStream **ps;
+ ps = &first_stream;
+ while (*ps != NULL) {
+ if (*ps == stream) {
+ *ps = (*ps)->next;
+ } else {
+ ps = &(*ps)->next;
+ }
+ }
+}
+
+/* specific mpeg4 handling : we extract the raw parameters */
+static void extract_mpeg4_header(AVFormatContext *infile)
+{
+ int mpeg4_count, i, size;
+ AVPacket pkt;
+ AVStream *st;
+ const uint8_t *p;
+
+ mpeg4_count = 0;
+ for(i=0;i<infile->nb_streams;i++) {
+ st = infile->streams[i];
+ if (st->codec->codec_id == CODEC_ID_MPEG4 &&
+ st->codec->extradata_size == 0) {
+ mpeg4_count++;
+ }
+ }
+ if (!mpeg4_count)
+ return;
+
+ printf("MPEG4 without extra data: trying to find header in %s\n", infile->filename);
+ while (mpeg4_count > 0) {
+ if (av_read_packet(infile, &pkt) < 0)
+ break;
+ st = infile->streams[pkt.stream_index];
+ if (st->codec->codec_id == CODEC_ID_MPEG4 &&
+ st->codec->extradata_size == 0) {
+ av_freep(&st->codec->extradata);
+ /* fill extradata with the header */
+ /* XXX: we make hard suppositions here ! */
+ p = pkt.data;
+ while (p < pkt.data + pkt.size - 4) {
+ /* stop when vop header is found */
+ if (p[0] == 0x00 && p[1] == 0x00 &&
+ p[2] == 0x01 && p[3] == 0xb6) {
+ size = p - pkt.data;
+ // av_hex_dump(pkt.data, size);
+ st->codec->extradata = av_malloc(size);
+ st->codec->extradata_size = size;
+ memcpy(st->codec->extradata, pkt.data, size);
+ break;
+ }
+ p++;
+ }
+ mpeg4_count--;
+ }
+ av_free_packet(&pkt);
+ }
+}
+
+/* compute the needed AVStream for each file */
+static void build_file_streams(void)
+{
+ FFStream *stream, *stream_next;
+ AVFormatContext *infile;
+ int i;
+
+ /* gather all streams */
+ for(stream = first_stream; stream != NULL; stream = stream_next) {
+ stream_next = stream->next;
+ if (stream->stream_type == STREAM_TYPE_LIVE &&
+ !stream->feed) {
+ /* the stream comes from a file */
+ /* try to open the file */
+ /* open stream */
+ stream->ap_in = av_mallocz(sizeof(AVFormatParameters));
+ if (stream->fmt == &rtp_muxer) {
+ /* specific case : if transport stream output to RTP,
+ we use a raw transport stream reader */
+ stream->ap_in->mpeg2ts_raw = 1;
+ stream->ap_in->mpeg2ts_compute_pcr = 1;
+ }
+
+ if (av_open_input_file(&infile, stream->feed_filename,
+ stream->ifmt, 0, stream->ap_in) < 0) {
+ http_log("%s not found", stream->feed_filename);
+ /* remove stream (no need to spend more time on it) */
+ fail:
+ remove_stream(stream);
+ } else {
+ /* find all the AVStreams inside and reference them in
+ 'stream' */
+ if (av_find_stream_info(infile) < 0) {
+ http_log("Could not find codec parameters from '%s'",
+ stream->feed_filename);
+ av_close_input_file(infile);
+ goto fail;
+ }
+ extract_mpeg4_header(infile);
+
+ for(i=0;i<infile->nb_streams;i++) {
+ add_av_stream1(stream, infile->streams[i]->codec);
+ }
+ av_close_input_file(infile);
+ }
+ }
+ }
+}
+
+/* compute the needed AVStream for each feed */
+static void build_feed_streams(void)
+{
+ FFStream *stream, *feed;
+ int i;
+
+ /* gather all streams */
+ for(stream = first_stream; stream != NULL; stream = stream->next) {
+ feed = stream->feed;
+ if (feed) {
+ if (!stream->is_feed) {
+ /* we handle a stream coming from a feed */
+ for(i=0;i<stream->nb_streams;i++) {
+ stream->feed_streams[i] = add_av_stream(feed, stream->streams[i]);
+ }
+ }
+ }
+ }
+
+ /* gather all streams */
+ for(stream = first_stream; stream != NULL; stream = stream->next) {
+ feed = stream->feed;
+ if (feed) {
+ if (stream->is_feed) {
+ for(i=0;i<stream->nb_streams;i++) {
+ stream->feed_streams[i] = i;
+ }
+ }
+ }
+ }
+
+ /* create feed files if needed */
+ for(feed = first_feed; feed != NULL; feed = feed->next_feed) {
+ int fd;
+
+ if (url_exist(feed->feed_filename)) {
+ /* See if it matches */
+ AVFormatContext *s;
+ int matches = 0;
+
+ if (av_open_input_file(&s, feed->feed_filename, NULL, FFM_PACKET_SIZE, NULL) >= 0) {
+ /* Now see if it matches */
+ if (s->nb_streams == feed->nb_streams) {
+ matches = 1;
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *sf, *ss;
+ sf = feed->streams[i];
+ ss = s->streams[i];
+
+ if (sf->index != ss->index ||
+ sf->id != ss->id) {
+ printf("Index & Id do not match for stream %d (%s)\n",
+ i, feed->feed_filename);
+ matches = 0;
+ } else {
+ AVCodecContext *ccf, *ccs;
+
+ ccf = sf->codec;
+ ccs = ss->codec;
+#define CHECK_CODEC(x) (ccf->x != ccs->x)
+
+ if (CHECK_CODEC(codec) || CHECK_CODEC(codec_type)) {
+ printf("Codecs do not match for stream %d\n", i);
+ matches = 0;
+ } else if (CHECK_CODEC(bit_rate) || CHECK_CODEC(flags)) {
+ printf("Codec bitrates do not match for stream %d\n", i);
+ matches = 0;
+ } else if (ccf->codec_type == CODEC_TYPE_VIDEO) {
+ if (CHECK_CODEC(time_base.den) ||
+ CHECK_CODEC(time_base.num) ||
+ CHECK_CODEC(width) ||
+ CHECK_CODEC(height)) {
+ printf("Codec width, height and framerate do not match for stream %d\n", i);
+ matches = 0;
+ }
+ } else if (ccf->codec_type == CODEC_TYPE_AUDIO) {
+ if (CHECK_CODEC(sample_rate) ||
+ CHECK_CODEC(channels) ||
+ CHECK_CODEC(frame_size)) {
+ printf("Codec sample_rate, channels, frame_size do not match for stream %d\n", i);
+ matches = 0;
+ }
+ } else {
+ printf("Unknown codec type\n");
+ matches = 0;
+ }
+ }
+ if (!matches) {
+ break;
+ }
+ }
+ } else {
+ printf("Deleting feed file '%s' as stream counts differ (%d != %d)\n",
+ feed->feed_filename, s->nb_streams, feed->nb_streams);
+ }
+
+ av_close_input_file(s);
+ } else {
+ printf("Deleting feed file '%s' as it appears to be corrupt\n",
+ feed->feed_filename);
+ }
+ if (!matches) {
+ if (feed->readonly) {
+ printf("Unable to delete feed file '%s' as it is marked readonly\n",
+ feed->feed_filename);
+ exit(1);
+ }
+ unlink(feed->feed_filename);
+ }
+ }
+ if (!url_exist(feed->feed_filename)) {
+ AVFormatContext s1, *s = &s1;
+
+ if (feed->readonly) {
+ printf("Unable to create feed file '%s' as it is marked readonly\n",
+ feed->feed_filename);
+ exit(1);
+ }
+
+ /* only write the header of the ffm file */
+ if (url_fopen(&s->pb, feed->feed_filename, URL_WRONLY) < 0) {
+ fprintf(stderr, "Could not open output feed file '%s'\n",
+ feed->feed_filename);
+ exit(1);
+ }
+ s->oformat = feed->fmt;
+ s->nb_streams = feed->nb_streams;
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st;
+ st = feed->streams[i];
+ s->streams[i] = st;
+ }
+ av_set_parameters(s, NULL);
+ av_write_header(s);
+ /* XXX: need better api */
+ av_freep(&s->priv_data);
+ url_fclose(&s->pb);
+ }
+ /* get feed size and write index */
+ fd = open(feed->feed_filename, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "Could not open output feed file '%s'\n",
+ feed->feed_filename);
+ exit(1);
+ }
+
+ feed->feed_write_index = ffm_read_write_index(fd);
+ feed->feed_size = lseek(fd, 0, SEEK_END);
+ /* ensure that we do not wrap before the end of file */
+ if (feed->feed_max_size && feed->feed_max_size < feed->feed_size)
+ feed->feed_max_size = feed->feed_size;
+
+ close(fd);
+ }
+}
+
+/* compute the bandwidth used by each stream */
+static void compute_bandwidth(void)
+{
+ int bandwidth, i;
+ FFStream *stream;
+
+ for(stream = first_stream; stream != NULL; stream = stream->next) {
+ bandwidth = 0;
+ for(i=0;i<stream->nb_streams;i++) {
+ AVStream *st = stream->streams[i];
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ case CODEC_TYPE_VIDEO:
+ bandwidth += st->codec->bit_rate;
+ break;
+ default:
+ break;
+ }
+ }
+ stream->bandwidth = (bandwidth + 999) / 1000;
+ }
+}
+
+static void get_arg(char *buf, int buf_size, const char **pp)
+{
+ const char *p;
+ char *q;
+ int quote;
+
+ p = *pp;
+ while (isspace(*p)) p++;
+ q = buf;
+ quote = 0;
+ if (*p == '\"' || *p == '\'')
+ quote = *p++;
+ for(;;) {
+ if (quote) {
+ if (*p == quote)
+ break;
+ } else {
+ if (isspace(*p))
+ break;
+ }
+ if (*p == '\0')
+ break;
+ if ((q - buf) < buf_size - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+ if (quote && *p == quote)
+ p++;
+ *pp = p;
+}
+
+/* add a codec and set the default parameters */
+static void add_codec(FFStream *stream, AVCodecContext *av)
+{
+ AVStream *st;
+
+ /* compute default parameters */
+ switch(av->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (av->bit_rate == 0)
+ av->bit_rate = 64000;
+ if (av->sample_rate == 0)
+ av->sample_rate = 22050;
+ if (av->channels == 0)
+ av->channels = 1;
+ break;
+ case CODEC_TYPE_VIDEO:
+ if (av->bit_rate == 0)
+ av->bit_rate = 64000;
+ if (av->time_base.num == 0){
+ av->time_base.den = 5;
+ av->time_base.num = 1;
+ }
+ if (av->width == 0 || av->height == 0) {
+ av->width = 160;
+ av->height = 128;
+ }
+ /* Bitrate tolerance is less for streaming */
+ if (av->bit_rate_tolerance == 0)
+ av->bit_rate_tolerance = av->bit_rate / 4;
+ if (av->qmin == 0)
+ av->qmin = 3;
+ if (av->qmax == 0)
+ av->qmax = 31;
+ if (av->max_qdiff == 0)
+ av->max_qdiff = 3;
+ av->qcompress = 0.5;
+ av->qblur = 0.5;
+
+ if (!av->nsse_weight)
+ av->nsse_weight = 8;
+
+ av->frame_skip_cmp = FF_CMP_DCTMAX;
+ av->me_method = ME_EPZS;
+ av->rc_buffer_aggressivity = 1.0;
+
+ if (!av->rc_eq)
+ av->rc_eq = "tex^qComp";
+ if (!av->i_quant_factor)
+ av->i_quant_factor = -0.8;
+ if (!av->b_quant_factor)
+ av->b_quant_factor = 1.25;
+ if (!av->b_quant_offset)
+ av->b_quant_offset = 1.25;
+ if (!av->rc_max_rate)
+ av->rc_max_rate = av->bit_rate * 2;
+
+ if (av->rc_max_rate && !av->rc_buffer_size) {
+ av->rc_buffer_size = av->rc_max_rate;
+ }
+
+
+ break;
+ default:
+ av_abort();
+ }
+
+ st = av_mallocz(sizeof(AVStream));
+ if (!st)
+ return;
+ st->codec = avcodec_alloc_context();
+ stream->streams[stream->nb_streams++] = st;
+ memcpy(st->codec, av, sizeof(AVCodecContext));
+}
+
+static int opt_audio_codec(const char *arg)
+{
+ AVCodec *p;
+
+ p = first_avcodec;
+ while (p) {
+ if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_AUDIO)
+ break;
+ p = p->next;
+ }
+ if (p == NULL) {
+ return CODEC_ID_NONE;
+ }
+
+ return p->id;
+}
+
+static int opt_video_codec(const char *arg)
+{
+ AVCodec *p;
+
+ p = first_avcodec;
+ while (p) {
+ if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_VIDEO)
+ break;
+ p = p->next;
+ }
+ if (p == NULL) {
+ return CODEC_ID_NONE;
+ }
+
+ return p->id;
+}
+
+/* simplistic plugin support */
+
+#ifdef HAVE_DLOPEN
+static void load_module(const char *filename)
+{
+ void *dll;
+ void (*init_func)(void);
+ dll = dlopen(filename, RTLD_NOW);
+ if (!dll) {
+ fprintf(stderr, "Could not load module '%s' - %s\n",
+ filename, dlerror());
+ return;
+ }
+
+ init_func = dlsym(dll, "ffserver_module_init");
+ if (!init_func) {
+ fprintf(stderr,
+ "%s: init function 'ffserver_module_init()' not found\n",
+ filename);
+ dlclose(dll);
+ }
+
+ init_func();
+}
+#endif
+
+static int parse_ffconfig(const char *filename)
+{
+ FILE *f;
+ char line[1024];
+ char cmd[64];
+ char arg[1024];
+ const char *p;
+ int val, errors, line_num;
+ FFStream **last_stream, *stream, *redirect;
+ FFStream **last_feed, *feed;
+ AVCodecContext audio_enc, video_enc;
+ int audio_id, video_id;
+
+ f = fopen(filename, "r");
+ if (!f) {
+ perror(filename);
+ return -1;
+ }
+
+ errors = 0;
+ line_num = 0;
+ first_stream = NULL;
+ last_stream = &first_stream;
+ first_feed = NULL;
+ last_feed = &first_feed;
+ stream = NULL;
+ feed = NULL;
+ redirect = NULL;
+ audio_id = CODEC_ID_NONE;
+ video_id = CODEC_ID_NONE;
+ for(;;) {
+ if (fgets(line, sizeof(line), f) == NULL)
+ break;
+ line_num++;
+ p = line;
+ while (isspace(*p))
+ p++;
+ if (*p == '\0' || *p == '#')
+ continue;
+
+ get_arg(cmd, sizeof(cmd), &p);
+
+ if (!strcasecmp(cmd, "Port")) {
+ get_arg(arg, sizeof(arg), &p);
+ my_http_addr.sin_port = htons (atoi(arg));
+ } else if (!strcasecmp(cmd, "BindAddress")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (!inet_aton(arg, &my_http_addr.sin_addr)) {
+ fprintf(stderr, "%s:%d: Invalid IP address: %s\n",
+ filename, line_num, arg);
+ errors++;
+ }
+ } else if (!strcasecmp(cmd, "NoDaemon")) {
+ ffserver_daemon = 0;
+ } else if (!strcasecmp(cmd, "RTSPPort")) {
+ get_arg(arg, sizeof(arg), &p);
+ my_rtsp_addr.sin_port = htons (atoi(arg));
+ } else if (!strcasecmp(cmd, "RTSPBindAddress")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (!inet_aton(arg, &my_rtsp_addr.sin_addr)) {
+ fprintf(stderr, "%s:%d: Invalid IP address: %s\n",
+ filename, line_num, arg);
+ errors++;
+ }
+ } else if (!strcasecmp(cmd, "MaxClients")) {
+ get_arg(arg, sizeof(arg), &p);
+ val = atoi(arg);
+ if (val < 1 || val > HTTP_MAX_CONNECTIONS) {
+ fprintf(stderr, "%s:%d: Invalid MaxClients: %s\n",
+ filename, line_num, arg);
+ errors++;
+ } else {
+ nb_max_connections = val;
+ }
+ } else if (!strcasecmp(cmd, "MaxBandwidth")) {
+ get_arg(arg, sizeof(arg), &p);
+ val = atoi(arg);
+ if (val < 10 || val > 100000) {
+ fprintf(stderr, "%s:%d: Invalid MaxBandwidth: %s\n",
+ filename, line_num, arg);
+ errors++;
+ } else {
+ max_bandwidth = val;
+ }
+ } else if (!strcasecmp(cmd, "CustomLog")) {
+ get_arg(logfilename, sizeof(logfilename), &p);
+ } else if (!strcasecmp(cmd, "<Feed")) {
+ /*********************************************/
+ /* Feed related options */
+ char *q;
+ if (stream || feed) {
+ fprintf(stderr, "%s:%d: Already in a tag\n",
+ filename, line_num);
+ } else {
+ feed = av_mallocz(sizeof(FFStream));
+ /* add in stream list */
+ *last_stream = feed;
+ last_stream = &feed->next;
+ /* add in feed list */
+ *last_feed = feed;
+ last_feed = &feed->next_feed;
+
+ get_arg(feed->filename, sizeof(feed->filename), &p);
+ q = strrchr(feed->filename, '>');
+ if (*q)
+ *q = '\0';
+ feed->fmt = guess_format("ffm", NULL, NULL);
+ /* defaut feed file */
+ snprintf(feed->feed_filename, sizeof(feed->feed_filename),
+ "/tmp/%s.ffm", feed->filename);
+ feed->feed_max_size = 5 * 1024 * 1024;
+ feed->is_feed = 1;
+ feed->feed = feed; /* self feeding :-) */
+ }
+ } else if (!strcasecmp(cmd, "Launch")) {
+ if (feed) {
+ int i;
+
+ feed->child_argv = (char **) av_mallocz(64 * sizeof(char *));
+
+ for (i = 0; i < 62; i++) {
+ char argbuf[256];
+
+ get_arg(argbuf, sizeof(argbuf), &p);
+ if (!argbuf[0])
+ break;
+
+ feed->child_argv[i] = av_malloc(strlen(argbuf) + 1);
+ strcpy(feed->child_argv[i], argbuf);
+ }
+
+ feed->child_argv[i] = av_malloc(30 + strlen(feed->filename));
+
+ snprintf(feed->child_argv[i], 30+strlen(feed->filename),
+ "http://%s:%d/%s",
+ (my_http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" :
+ inet_ntoa(my_http_addr.sin_addr),
+ ntohs(my_http_addr.sin_port), feed->filename);
+
+ if (ffserver_debug)
+ {
+ int j;
+ fprintf(stdout, "Launch commandline: ");
+ for (j = 0; j <= i; j++)
+ fprintf(stdout, "%s ", feed->child_argv[j]);
+ fprintf(stdout, "\n");
+ }
+ }
+ } else if (!strcasecmp(cmd, "ReadOnlyFile")) {
+ if (feed) {
+ get_arg(feed->feed_filename, sizeof(feed->feed_filename), &p);
+ feed->readonly = 1;
+ } else if (stream) {
+ get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
+ }
+ } else if (!strcasecmp(cmd, "File")) {
+ if (feed) {
+ get_arg(feed->feed_filename, sizeof(feed->feed_filename), &p);
+ } else if (stream) {
+ get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
+ }
+ } else if (!strcasecmp(cmd, "FileMaxSize")) {
+ if (feed) {
+ const char *p1;
+ double fsize;
+
+ get_arg(arg, sizeof(arg), &p);
+ p1 = arg;
+ fsize = strtod(p1, (char **)&p1);
+ switch(toupper(*p1)) {
+ case 'K':
+ fsize *= 1024;
+ break;
+ case 'M':
+ fsize *= 1024 * 1024;
+ break;
+ case 'G':
+ fsize *= 1024 * 1024 * 1024;
+ break;
+ }
+ feed->feed_max_size = (int64_t)fsize;
+ }
+ } else if (!strcasecmp(cmd, "</Feed>")) {
+ if (!feed) {
+ fprintf(stderr, "%s:%d: No corresponding <Feed> for </Feed>\n",
+ filename, line_num);
+ errors++;
+#if 0
+ } else {
+ /* Make sure that we start out clean */
+ if (unlink(feed->feed_filename) < 0
+ && errno != ENOENT) {
+ fprintf(stderr, "%s:%d: Unable to clean old feed file '%s': %s\n",
+ filename, line_num, feed->feed_filename, strerror(errno));
+ errors++;
+ }
+#endif
+ }
+ feed = NULL;
+ } else if (!strcasecmp(cmd, "<Stream")) {
+ /*********************************************/
+ /* Stream related options */
+ char *q;
+ if (stream || feed) {
+ fprintf(stderr, "%s:%d: Already in a tag\n",
+ filename, line_num);
+ } else {
+ stream = av_mallocz(sizeof(FFStream));
+ *last_stream = stream;
+ last_stream = &stream->next;
+
+ get_arg(stream->filename, sizeof(stream->filename), &p);
+ q = strrchr(stream->filename, '>');
+ if (*q)
+ *q = '\0';
+ stream->fmt = guess_stream_format(NULL, stream->filename, NULL);
+ memset(&audio_enc, 0, sizeof(AVCodecContext));
+ memset(&video_enc, 0, sizeof(AVCodecContext));
+ audio_id = CODEC_ID_NONE;
+ video_id = CODEC_ID_NONE;
+ if (stream->fmt) {
+ audio_id = stream->fmt->audio_codec;
+ video_id = stream->fmt->video_codec;
+ }
+ }
+ } else if (!strcasecmp(cmd, "Feed")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ FFStream *sfeed;
+
+ sfeed = first_feed;
+ while (sfeed != NULL) {
+ if (!strcmp(sfeed->filename, arg))
+ break;
+ sfeed = sfeed->next_feed;
+ }
+ if (!sfeed) {
+ fprintf(stderr, "%s:%d: feed '%s' not defined\n",
+ filename, line_num, arg);
+ } else {
+ stream->feed = sfeed;
+ }
+ }
+ } else if (!strcasecmp(cmd, "Format")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (!strcmp(arg, "status")) {
+ stream->stream_type = STREAM_TYPE_STATUS;
+ stream->fmt = NULL;
+ } else {
+ stream->stream_type = STREAM_TYPE_LIVE;
+ /* jpeg cannot be used here, so use single frame jpeg */
+ if (!strcmp(arg, "jpeg"))
+ strcpy(arg, "mjpeg");
+ stream->fmt = guess_stream_format(arg, NULL, NULL);
+ if (!stream->fmt) {
+ fprintf(stderr, "%s:%d: Unknown Format: %s\n",
+ filename, line_num, arg);
+ errors++;
+ }
+ }
+ if (stream->fmt) {
+ audio_id = stream->fmt->audio_codec;
+ video_id = stream->fmt->video_codec;
+ }
+ } else if (!strcasecmp(cmd, "InputFormat")) {
+ stream->ifmt = av_find_input_format(arg);
+ if (!stream->ifmt) {
+ fprintf(stderr, "%s:%d: Unknown input format: %s\n",
+ filename, line_num, arg);
+ }
+ } else if (!strcasecmp(cmd, "FaviconURL")) {
+ if (stream && stream->stream_type == STREAM_TYPE_STATUS) {
+ get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
+ } else {
+ fprintf(stderr, "%s:%d: FaviconURL only permitted for status streams\n",
+ filename, line_num);
+ errors++;
+ }
+ } else if (!strcasecmp(cmd, "Author")) {
+ if (stream) {
+ get_arg(stream->author, sizeof(stream->author), &p);
+ }
+ } else if (!strcasecmp(cmd, "Comment")) {
+ if (stream) {
+ get_arg(stream->comment, sizeof(stream->comment), &p);
+ }
+ } else if (!strcasecmp(cmd, "Copyright")) {
+ if (stream) {
+ get_arg(stream->copyright, sizeof(stream->copyright), &p);
+ }
+ } else if (!strcasecmp(cmd, "Title")) {
+ if (stream) {
+ get_arg(stream->title, sizeof(stream->title), &p);
+ }
+ } else if (!strcasecmp(cmd, "Preroll")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ stream->prebuffer = atof(arg) * 1000;
+ }
+ } else if (!strcasecmp(cmd, "StartSendOnKey")) {
+ if (stream) {
+ stream->send_on_key = 1;
+ }
+ } else if (!strcasecmp(cmd, "AudioCodec")) {
+ get_arg(arg, sizeof(arg), &p);
+ audio_id = opt_audio_codec(arg);
+ if (audio_id == CODEC_ID_NONE) {
+ fprintf(stderr, "%s:%d: Unknown AudioCodec: %s\n",
+ filename, line_num, arg);
+ errors++;
+ }
+ } else if (!strcasecmp(cmd, "VideoCodec")) {
+ get_arg(arg, sizeof(arg), &p);
+ video_id = opt_video_codec(arg);
+ if (video_id == CODEC_ID_NONE) {
+ fprintf(stderr, "%s:%d: Unknown VideoCodec: %s\n",
+ filename, line_num, arg);
+ errors++;
+ }
+ } else if (!strcasecmp(cmd, "MaxTime")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ stream->max_time = atof(arg) * 1000;
+ }
+ } else if (!strcasecmp(cmd, "AudioBitRate")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ audio_enc.bit_rate = atoi(arg) * 1000;
+ }
+ } else if (!strcasecmp(cmd, "AudioChannels")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ audio_enc.channels = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "AudioSampleRate")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ audio_enc.sample_rate = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "AudioQuality")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+// audio_enc.quality = atof(arg) * 1000;
+ }
+ } else if (!strcasecmp(cmd, "VideoBitRateRange")) {
+ if (stream) {
+ int minrate, maxrate;
+
+ get_arg(arg, sizeof(arg), &p);
+
+ if (sscanf(arg, "%d-%d", &minrate, &maxrate) == 2) {
+ video_enc.rc_min_rate = minrate * 1000;
+ video_enc.rc_max_rate = maxrate * 1000;
+ } else {
+ fprintf(stderr, "%s:%d: Incorrect format for VideoBitRateRange -- should be <min>-<max>: %s\n",
+ filename, line_num, arg);
+ errors++;
+ }
+ }
+ } else if (!strcasecmp(cmd, "Debug")) {
+ if (stream) {
+ get_arg(arg, sizeof(arg), &p);
+ video_enc.debug = strtol(arg,0,0);
+ }
+ } else if (!strcasecmp(cmd, "Strict")) {
+ if (stream) {
+ get_arg(arg, sizeof(arg), &p);
+ video_enc.strict_std_compliance = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "VideoBufferSize")) {
+ if (stream) {
+ get_arg(arg, sizeof(arg), &p);
+ video_enc.rc_buffer_size = atoi(arg) * 8*1024;
+ }
+ } else if (!strcasecmp(cmd, "VideoBitRateTolerance")) {
+ if (stream) {
+ get_arg(arg, sizeof(arg), &p);
+ video_enc.bit_rate_tolerance = atoi(arg) * 1000;
+ }
+ } else if (!strcasecmp(cmd, "VideoBitRate")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.bit_rate = atoi(arg) * 1000;
+ }
+ } else if (!strcasecmp(cmd, "VideoSize")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ parse_image_size(&video_enc.width, &video_enc.height, arg);
+ if ((video_enc.width % 16) != 0 ||
+ (video_enc.height % 16) != 0) {
+ fprintf(stderr, "%s:%d: Image size must be a multiple of 16\n",
+ filename, line_num);
+ errors++;
+ }
+ }
+ } else if (!strcasecmp(cmd, "VideoFrameRate")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.time_base.num= DEFAULT_FRAME_RATE_BASE;
+ video_enc.time_base.den = (int)(strtod(arg, NULL) * video_enc.time_base.num);
+ }
+ } else if (!strcasecmp(cmd, "VideoGopSize")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.gop_size = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "VideoIntraOnly")) {
+ if (stream) {
+ video_enc.gop_size = 1;
+ }
+ } else if (!strcasecmp(cmd, "VideoHighQuality")) {
+ if (stream) {
+ video_enc.mb_decision = FF_MB_DECISION_BITS;
+ }
+ } else if (!strcasecmp(cmd, "Video4MotionVector")) {
+ if (stream) {
+ video_enc.mb_decision = FF_MB_DECISION_BITS; //FIXME remove
+ video_enc.flags |= CODEC_FLAG_4MV;
+ }
+ } else if (!strcasecmp(cmd, "BitExact")) {
+ if (stream) {
+ video_enc.flags |= CODEC_FLAG_BITEXACT;
+ }
+ } else if (!strcasecmp(cmd, "DctFastint")) {
+ if (stream) {
+ video_enc.dct_algo = FF_DCT_FASTINT;
+ }
+ } else if (!strcasecmp(cmd, "IdctSimple")) {
+ if (stream) {
+ video_enc.idct_algo = FF_IDCT_SIMPLE;
+ }
+ } else if (!strcasecmp(cmd, "Qscale")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.flags |= CODEC_FLAG_QSCALE;
+ video_enc.global_quality = FF_QP2LAMBDA * atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "VideoQDiff")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.max_qdiff = atoi(arg);
+ if (video_enc.max_qdiff < 1 || video_enc.max_qdiff > 31) {
+ fprintf(stderr, "%s:%d: VideoQDiff out of range\n",
+ filename, line_num);
+ errors++;
+ }
+ }
+ } else if (!strcasecmp(cmd, "VideoQMax")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.qmax = atoi(arg);
+ if (video_enc.qmax < 1 || video_enc.qmax > 31) {
+ fprintf(stderr, "%s:%d: VideoQMax out of range\n",
+ filename, line_num);
+ errors++;
+ }
+ }
+ } else if (!strcasecmp(cmd, "VideoQMin")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.qmin = atoi(arg);
+ if (video_enc.qmin < 1 || video_enc.qmin > 31) {
+ fprintf(stderr, "%s:%d: VideoQMin out of range\n",
+ filename, line_num);
+ errors++;
+ }
+ }
+ } else if (!strcasecmp(cmd, "LumaElim")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.luma_elim_threshold = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "ChromaElim")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.chroma_elim_threshold = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "LumiMask")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.lumi_masking = atof(arg);
+ }
+ } else if (!strcasecmp(cmd, "DarkMask")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ video_enc.dark_masking = atof(arg);
+ }
+ } else if (!strcasecmp(cmd, "NoVideo")) {
+ video_id = CODEC_ID_NONE;
+ } else if (!strcasecmp(cmd, "NoAudio")) {
+ audio_id = CODEC_ID_NONE;
+ } else if (!strcasecmp(cmd, "ACL")) {
+ IPAddressACL acl;
+ struct hostent *he;
+
+ get_arg(arg, sizeof(arg), &p);
+ if (strcasecmp(arg, "allow") == 0) {
+ acl.action = IP_ALLOW;
+ } else if (strcasecmp(arg, "deny") == 0) {
+ acl.action = IP_DENY;
+ } else {
+ fprintf(stderr, "%s:%d: ACL action '%s' is not ALLOW or DENY\n",
+ filename, line_num, arg);
+ errors++;
+ }
+
+ get_arg(arg, sizeof(arg), &p);
+
+ he = gethostbyname(arg);
+ if (!he) {
+ fprintf(stderr, "%s:%d: ACL refers to invalid host or ip address '%s'\n",
+ filename, line_num, arg);
+ errors++;
+ } else {
+ /* Only take the first */
+ acl.first.s_addr = ntohl(((struct in_addr *) he->h_addr_list[0])->s_addr);
+ acl.last = acl.first;
+ }
+
+ get_arg(arg, sizeof(arg), &p);
+
+ if (arg[0]) {
+ he = gethostbyname(arg);
+ if (!he) {
+ fprintf(stderr, "%s:%d: ACL refers to invalid host or ip address '%s'\n",
+ filename, line_num, arg);
+ errors++;
+ } else {
+ /* Only take the first */
+ acl.last.s_addr = ntohl(((struct in_addr *) he->h_addr_list[0])->s_addr);
+ }
+ }
+
+ if (!errors) {
+ IPAddressACL *nacl = (IPAddressACL *) av_mallocz(sizeof(*nacl));
+ IPAddressACL **naclp = 0;
+
+ *nacl = acl;
+ nacl->next = 0;
+
+ if (stream) {
+ naclp = &stream->acl;
+ } else if (feed) {
+ naclp = &feed->acl;
+ } else {
+ fprintf(stderr, "%s:%d: ACL found not in <stream> or <feed>\n",
+ filename, line_num);
+ errors++;
+ }
+
+ if (naclp) {
+ while (*naclp)
+ naclp = &(*naclp)->next;
+
+ *naclp = nacl;
+ }
+ }
+ } else if (!strcasecmp(cmd, "RTSPOption")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ av_freep(&stream->rtsp_option);
+ /* XXX: av_strdup ? */
+ stream->rtsp_option = av_malloc(strlen(arg) + 1);
+ if (stream->rtsp_option) {
+ strcpy(stream->rtsp_option, arg);
+ }
+ }
+ } else if (!strcasecmp(cmd, "MulticastAddress")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ if (!inet_aton(arg, &stream->multicast_ip)) {
+ fprintf(stderr, "%s:%d: Invalid IP address: %s\n",
+ filename, line_num, arg);
+ errors++;
+ }
+ stream->is_multicast = 1;
+ stream->loop = 1; /* default is looping */
+ }
+ } else if (!strcasecmp(cmd, "MulticastPort")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ stream->multicast_port = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "MulticastTTL")) {
+ get_arg(arg, sizeof(arg), &p);
+ if (stream) {
+ stream->multicast_ttl = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "NoLoop")) {
+ if (stream) {
+ stream->loop = 0;
+ }
+ } else if (!strcasecmp(cmd, "</Stream>")) {
+ if (!stream) {
+ fprintf(stderr, "%s:%d: No corresponding <Stream> for </Stream>\n",
+ filename, line_num);
+ errors++;
+ }
+ if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) {
+ if (audio_id != CODEC_ID_NONE) {
+ audio_enc.codec_type = CODEC_TYPE_AUDIO;
+ audio_enc.codec_id = audio_id;
+ add_codec(stream, &audio_enc);
+ }
+ if (video_id != CODEC_ID_NONE) {
+ video_enc.codec_type = CODEC_TYPE_VIDEO;
+ video_enc.codec_id = video_id;
+ add_codec(stream, &video_enc);
+ }
+ }
+ stream = NULL;
+ } else if (!strcasecmp(cmd, "<Redirect")) {
+ /*********************************************/
+ char *q;
+ if (stream || feed || redirect) {
+ fprintf(stderr, "%s:%d: Already in a tag\n",
+ filename, line_num);
+ errors++;
+ } else {
+ redirect = av_mallocz(sizeof(FFStream));
+ *last_stream = redirect;
+ last_stream = &redirect->next;
+
+ get_arg(redirect->filename, sizeof(redirect->filename), &p);
+ q = strrchr(redirect->filename, '>');
+ if (*q)
+ *q = '\0';
+ redirect->stream_type = STREAM_TYPE_REDIRECT;
+ }
+ } else if (!strcasecmp(cmd, "URL")) {
+ if (redirect) {
+ get_arg(redirect->feed_filename, sizeof(redirect->feed_filename), &p);
+ }
+ } else if (!strcasecmp(cmd, "</Redirect>")) {
+ if (!redirect) {
+ fprintf(stderr, "%s:%d: No corresponding <Redirect> for </Redirect>\n",
+ filename, line_num);
+ errors++;
+ }
+ if (!redirect->feed_filename[0]) {
+ fprintf(stderr, "%s:%d: No URL found for <Redirect>\n",
+ filename, line_num);
+ errors++;
+ }
+ redirect = NULL;
+ } else if (!strcasecmp(cmd, "LoadModule")) {
+ get_arg(arg, sizeof(arg), &p);
+#ifdef HAVE_DLOPEN
+ load_module(arg);
+#else
+ fprintf(stderr, "%s:%d: Module support not compiled into this version: '%s'\n",
+ filename, line_num, arg);
+ errors++;
+#endif
+ } else {
+ fprintf(stderr, "%s:%d: Incorrect keyword: '%s'\n",
+ filename, line_num, cmd);
+ errors++;
+ }
+ }
+
+ fclose(f);
+ if (errors)
+ return -1;
+ else
+ return 0;
+}
+
+
+#if 0
+static void write_packet(FFCodec *ffenc,
+ uint8_t *buf, int size)
+{
+ PacketHeader hdr;
+ AVCodecContext *enc = &ffenc->enc;
+ uint8_t *wptr;
+ mk_header(&hdr, enc, size);
+ wptr = http_fifo.wptr;
+ fifo_write(&http_fifo, (uint8_t *)&hdr, sizeof(hdr), &wptr);
+ fifo_write(&http_fifo, buf, size, &wptr);
+ /* atomic modification of wptr */
+ http_fifo.wptr = wptr;
+ ffenc->data_count += size;
+ ffenc->avg_frame_size = ffenc->avg_frame_size * AVG_COEF + size * (1.0 - AVG_COEF);
+}
+#endif
+
+static void show_banner(void)
+{
+ printf("ffserver version " FFMPEG_VERSION ", Copyright (c) 2000-2006 Fabrice Bellard, et al.\n");
+}
+
+static void show_help(void)
+{
+ show_banner();
+ printf("usage: ffserver [-L] [-h] [-f configfile]\n"
+ "Hyper fast multi format Audio/Video streaming server\n"
+ "\n"
+ "-L : print the LICENSE\n"
+ "-h : this help\n"
+ "-f configfile : use configfile instead of /etc/ffserver.conf\n"
+ );
+}
+
+static void show_license(void)
+{
+ show_banner();
+ printf(
+ "FFmpeg is free software; you can redistribute it and/or\n"
+ "modify it under the terms of the GNU Lesser General Public\n"
+ "License as published by the Free Software Foundation; either\n"
+ "version 2.1 of the License, or (at your option) any later version.\n"
+ "\n"
+ "FFmpeg is distributed in the hope that it will be useful,\n"
+ "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
+ "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n"
+ "Lesser General Public License for more details.\n"
+ "\n"
+ "You should have received a copy of the GNU Lesser General Public\n"
+ "License along with FFmpeg; if not, write to the Free Software\n"
+ "Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n"
+ );
+}
+
+static void handle_child_exit(int sig)
+{
+ pid_t pid;
+ int status;
+
+ while ((pid = waitpid(-1, &status, WNOHANG)) > 0) {
+ FFStream *feed;
+
+ for (feed = first_feed; feed; feed = feed->next) {
+ if (feed->pid == pid) {
+ int uptime = time(0) - feed->pid_start;
+
+ feed->pid = 0;
+ fprintf(stderr, "%s: Pid %d exited with status %d after %d seconds\n", feed->filename, pid, status, uptime);
+
+ if (uptime < 30) {
+ /* Turn off any more restarts */
+ feed->child_argv = 0;
+ }
+ }
+ }
+ }
+
+ need_to_start_children = 1;
+}
+
+int main(int argc, char **argv)
+{
+ const char *config_filename;
+ int c;
+ struct sigaction sigact;
+
+ av_register_all();
+
+ config_filename = "/etc/ffserver.conf";
+
+ my_program_name = argv[0];
+ my_program_dir = getcwd(0, 0);
+ ffserver_daemon = 1;
+
+ for(;;) {
+ c = getopt(argc, argv, "ndLh?f:");
+ if (c == -1)
+ break;
+ switch(c) {
+ case 'L':
+ show_license();
+ exit(1);
+ case '?':
+ case 'h':
+ show_help();
+ exit(1);
+ case 'n':
+ no_launch = 1;
+ break;
+ case 'd':
+ ffserver_debug = 1;
+ ffserver_daemon = 0;
+ break;
+ case 'f':
+ config_filename = optarg;
+ break;
+ default:
+ exit(2);
+ }
+ }
+
+ putenv("http_proxy"); /* Kill the http_proxy */
+
+ srandom(gettime_ms() + (getpid() << 16));
+
+ /* address on which the server will handle HTTP connections */
+ my_http_addr.sin_family = AF_INET;
+ my_http_addr.sin_port = htons (8080);
+ my_http_addr.sin_addr.s_addr = htonl (INADDR_ANY);
+
+ /* address on which the server will handle RTSP connections */
+ my_rtsp_addr.sin_family = AF_INET;
+ my_rtsp_addr.sin_port = htons (5454);
+ my_rtsp_addr.sin_addr.s_addr = htonl (INADDR_ANY);
+
+ nb_max_connections = 5;
+ max_bandwidth = 1000;
+ first_stream = NULL;
+ logfilename[0] = '\0';
+
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = handle_child_exit;
+ sigact.sa_flags = SA_NOCLDSTOP | SA_RESTART;
+ sigaction(SIGCHLD, &sigact, 0);
+
+ if (parse_ffconfig(config_filename) < 0) {
+ fprintf(stderr, "Incorrect config file - exiting.\n");
+ exit(1);
+ }
+
+ build_file_streams();
+
+ build_feed_streams();
+
+ compute_bandwidth();
+
+ /* put the process in background and detach it from its TTY */
+ if (ffserver_daemon) {
+ int pid;
+
+ pid = fork();
+ if (pid < 0) {
+ perror("fork");
+ exit(1);
+ } else if (pid > 0) {
+ /* parent : exit */
+ exit(0);
+ } else {
+ /* child */
+ setsid();
+ chdir("/");
+ close(0);
+ open("/dev/null", O_RDWR);
+ if (strcmp(logfilename, "-") != 0) {
+ close(1);
+ dup(0);
+ }
+ close(2);
+ dup(0);
+ }
+ }
+
+ /* signal init */
+ signal(SIGPIPE, SIG_IGN);
+
+ /* open log file if needed */
+ if (logfilename[0] != '\0') {
+ if (!strcmp(logfilename, "-"))
+ logfile = stdout;
+ else
+ logfile = fopen(logfilename, "w");
+ }
+
+ if (http_server() < 0) {
+ fprintf(stderr, "Could not start server\n");
+ exit(1);
+ }
+
+ return 0;
+}
diff --git a/contrib/ffmpeg/ffserver.h b/contrib/ffmpeg/ffserver.h
new file mode 100644
index 000000000..868e4cd9b
--- /dev/null
+++ b/contrib/ffmpeg/ffserver.h
@@ -0,0 +1,8 @@
+#ifndef FFSERVER_H
+#define FFSERVER_H
+
+/* interface between ffserver and modules */
+
+void ffserver_module_init(void);
+
+#endif
diff --git a/src/libffmpeg/libavcodec/4xm.c b/contrib/ffmpeg/libavcodec/4xm.c
index a986f151e..ea60e9bf2 100644
--- a/src/libffmpeg/libavcodec/4xm.c
+++ b/contrib/ffmpeg/libavcodec/4xm.c
@@ -2,18 +2,20 @@
* 4XM codec
* Copyright (c) 2003 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -104,8 +106,8 @@ static VLC block_type_vlc[4];
typedef struct CFrameBuffer{
- int allocated_size;
- int size;
+ unsigned int allocated_size;
+ unsigned int size;
int id;
uint8_t *data;
}CFrameBuffer;
diff --git a/src/libffmpeg/libavcodec/8bps.c b/contrib/ffmpeg/libavcodec/8bps.c
index b16e3bb56..297465043 100644
--- a/src/libffmpeg/libavcodec/8bps.c
+++ b/contrib/ffmpeg/libavcodec/8bps.c
@@ -2,18 +2,20 @@
* Quicktime Planar RGB (8BPS) Video Decoder
* Copyright (C) 2003 Roberto Togni
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -37,7 +39,7 @@
#include "avcodec.h"
-const enum PixelFormat pixfmt_rgb24[] = {PIX_FMT_BGR24, PIX_FMT_RGBA32, -1};
+static const enum PixelFormat pixfmt_rgb24[] = {PIX_FMT_BGR24, PIX_FMT_RGBA32, -1};
/*
* Decoder context
diff --git a/contrib/ffmpeg/libavcodec/Makefile b/contrib/ffmpeg/libavcodec/Makefile
new file mode 100644
index 000000000..03c1ae43d
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/Makefile
@@ -0,0 +1,456 @@
+#
+# libavcodec Makefile
+# (c) 2000-2005 Fabrice Bellard
+#
+include ../config.mak
+
+CFLAGS+=-I$(SRC_PATH)/libswscale $(AMR_CFLAGS)
+
+OBJS= bitstream.o \
+ utils.o \
+ allcodecs.o \
+ mpegvideo.o \
+ jrevdct.o \
+ jfdctfst.o \
+ jfdctint.o\
+ mjpeg.o \
+ resample.o \
+ resample2.o \
+ dsputil.o \
+ motion_est.o \
+ imgconvert.o \
+ mpeg12.o \
+ mpegaudiodec.o \
+ simple_idct.o \
+ ratecontrol.o \
+ eval.o \
+ error_resilience.o \
+ fft.o \
+ mdct.o \
+ raw.o \
+ golomb.o \
+ cabac.o\
+ faandct.o \
+ parser.o \
+ vp3dsp.o \
+ h264idct.o \
+ rangecoder.o \
+ pnm.o \
+ h263.o \
+ msmpeg4.o \
+ h263dec.o \
+ opt.o \
+ bitstream_filter.o \
+ audioconvert.o \
+
+
+HEADERS = avcodec.h opt.h
+
+OBJS-$(CONFIG_AASC_DECODER) += aasc.o
+OBJS-$(CONFIG_AC3_ENCODER) += ac3enc.o
+OBJS-$(CONFIG_ALAC_DECODER) += alac.o
+OBJS-$(CONFIG_ASV1_DECODER) += asv1.o
+OBJS-$(CONFIG_ASV1_ENCODER) += asv1.o
+OBJS-$(CONFIG_ASV2_DECODER) += asv1.o
+OBJS-$(CONFIG_ASV2_ENCODER) += asv1.o
+OBJS-$(CONFIG_AVS_DECODER) += avs.o
+OBJS-$(CONFIG_BMP_DECODER) += bmp.o
+OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdsp.o
+OBJS-$(CONFIG_CINEPAK_DECODER) += cinepak.o
+OBJS-$(CONFIG_CLJR_DECODER) += cljr.o
+OBJS-$(CONFIG_CLJR_ENCODER) += cljr.o
+OBJS-$(CONFIG_COOK_DECODER) += cook.o
+OBJS-$(CONFIG_CSCD_DECODER) += cscd.o lzo.o
+OBJS-$(CONFIG_CYUV_DECODER) += cyuv.o
+OBJS-$(CONFIG_DSICINVIDEO_DECODER) += dsicinav.o
+OBJS-$(CONFIG_DSICINAUDIO_DECODER) += dsicinav.o
+OBJS-$(CONFIG_DVBSUB_DECODER) += dvbsubdec.o
+OBJS-$(CONFIG_DVBSUB_ENCODER) += dvbsub.o
+OBJS-$(CONFIG_DVDSUB_DECODER) += dvdsubdec.o
+OBJS-$(CONFIG_DVDSUB_ENCODER) += dvdsubenc.o
+OBJS-$(CONFIG_DVVIDEO_DECODER) += dv.o
+OBJS-$(CONFIG_DVVIDEO_ENCODER) += dv.o
+OBJS-$(CONFIG_EIGHTBPS_DECODER) += 8bps.o
+OBJS-$(CONFIG_FFV1_DECODER) += ffv1.o
+OBJS-$(CONFIG_FFV1_ENCODER) += ffv1.o
+OBJS-$(CONFIG_FFVHUFF_DECODER) += huffyuv.o
+OBJS-$(CONFIG_FFVHUFF_ENCODER) += huffyuv.o
+OBJS-$(CONFIG_FLAC_DECODER) += flac.o
+OBJS-$(CONFIG_FLAC_ENCODER) += flacenc.o
+OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
+OBJS-$(CONFIG_FLIC_DECODER) += flicvideo.o
+OBJS-$(CONFIG_FOURXM_DECODER) += 4xm.o
+OBJS-$(CONFIG_FRAPS_DECODER) += fraps.o
+OBJS-$(CONFIG_GIF_DECODER) += gifdec.o lzw.o
+OBJS-$(CONFIG_GIF_ENCODER) += gif.o
+OBJS-$(CONFIG_H261_DECODER) += h261.o
+OBJS-$(CONFIG_H261_ENCODER) += h261.o
+OBJS-$(CONFIG_H264_DECODER) += h264.o
+OBJS-$(CONFIG_HUFFYUV_DECODER) += huffyuv.o
+OBJS-$(CONFIG_HUFFYUV_ENCODER) += huffyuv.o
+OBJS-$(CONFIG_IDCIN_DECODER) += idcinvideo.o
+OBJS-$(CONFIG_IMC_DECODER) += imc.o
+OBJS-$(CONFIG_INDEO2_DECODER) += indeo2.o
+OBJS-$(CONFIG_INDEO3_DECODER) += indeo3.o
+OBJS-$(CONFIG_INTERPLAY_VIDEO_DECODER) += interplayvideo.o
+OBJS-$(CONFIG_INTERPLAY_DPCM_DECODER) += dpcm.o
+OBJS-$(CONFIG_KMVC_DECODER) += kmvc.o
+OBJS-$(CONFIG_LOCO_DECODER) += loco.o
+OBJS-$(CONFIG_MACE3_DECODER) += mace.o
+OBJS-$(CONFIG_MACE6_DECODER) += mace.o
+OBJS-$(CONFIG_MMVIDEO_DECODER) += mmvideo.o
+OBJS-$(CONFIG_MP2_ENCODER) += mpegaudio.o
+OBJS-$(CONFIG_MSRLE_DECODER) += msrle.o
+OBJS-$(CONFIG_MSVIDEO1_DECODER) += msvideo1.o
+OBJS-$(CONFIG_MSZH_DECODER) += lcl.o
+OBJS-$(CONFIG_NUV_DECODER) += nuv.o rtjpeg.o lzo.o
+OBJS-$(CONFIG_PNG_DECODER) += png.o
+OBJS-$(CONFIG_PNG_ENCODER) += png.o
+OBJS-$(CONFIG_QDM2_DECODER) += qdm2.o
+OBJS-$(CONFIG_QDRAW_DECODER) += qdrw.o
+OBJS-$(CONFIG_QPEG_DECODER) += qpeg.o
+OBJS-$(CONFIG_QTRLE_DECODER) += qtrle.o
+OBJS-$(CONFIG_RA_144_DECODER) += ra144.o
+OBJS-$(CONFIG_RA_288_DECODER) += ra288.o
+OBJS-$(CONFIG_ROQ_DECODER) += roqvideo.o
+OBJS-$(CONFIG_ROQ_DPCM_DECODER) += dpcm.o
+OBJS-$(CONFIG_RPZA_DECODER) += rpza.o
+OBJS-$(CONFIG_RV10_DECODER) += rv10.o
+OBJS-$(CONFIG_RV10_ENCODER) += rv10.o
+OBJS-$(CONFIG_RV20_DECODER) += rv10.o
+OBJS-$(CONFIG_RV20_ENCODER) += rv10.o
+OBJS-$(CONFIG_SHORTEN_DECODER) += shorten.o
+OBJS-$(CONFIG_SMACKAUD_DECODER) += smacker.o
+OBJS-$(CONFIG_SMACKER_DECODER) += smacker.o
+OBJS-$(CONFIG_SMC_DECODER) += smc.o
+OBJS-$(CONFIG_SNOW_DECODER) += snow.o
+OBJS-$(CONFIG_SNOW_ENCODER) += snow.o
+OBJS-$(CONFIG_SOL_DPCM_DECODER) += dpcm.o
+OBJS-$(CONFIG_SONIC_DECODER) += sonic.o
+OBJS-$(CONFIG_SONIC_ENCODER) += sonic.o
+OBJS-$(CONFIG_SONIC_LS_DECODER) += sonic.o
+OBJS-$(CONFIG_SVQ1_DECODER) += svq1.o
+OBJS-$(CONFIG_SVQ1_ENCODER) += svq1.o
+OBJS-$(CONFIG_SVQ3_DECODER) += h264.o
+OBJS-$(CONFIG_TARGA_DECODER) += targa.o
+OBJS-$(CONFIG_THEORA_DECODER) += vp3.o
+OBJS-$(CONFIG_TIERTEXSEQVIDEO_DECODER) += tiertexseqv.o
+OBJS-$(CONFIG_TIFF_DECODER) += tiff.o lzw.o
+OBJS-$(CONFIG_TRUEMOTION1_DECODER) += truemotion1.o
+OBJS-$(CONFIG_TRUEMOTION2_DECODER) += truemotion2.o
+OBJS-$(CONFIG_TRUESPEECH_DECODER) += truespeech.o
+OBJS-$(CONFIG_TSCC_DECODER) += tscc.o
+OBJS-$(CONFIG_TTA_DECODER) += tta.o
+OBJS-$(CONFIG_ULTI_DECODER) += ulti.o
+OBJS-$(CONFIG_VC1_DECODER) += vc1.o vc1dsp.o
+OBJS-$(CONFIG_VCR1_DECODER) += vcr1.o
+OBJS-$(CONFIG_VCR1_ENCODER) += vcr1.o
+OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdav.o
+OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdav.o
+OBJS-$(CONFIG_VMNC_DECODER) += vmnc.o
+OBJS-$(CONFIG_VORBIS_DECODER) += vorbis.o vorbis_data.o
+OBJS-$(CONFIG_VORBIS_ENCODER) += vorbis_enc.o vorbis.o vorbis_data.o
+OBJS-$(CONFIG_VP3_DECODER) += vp3.o
+OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o
+OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o
+OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
+OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
+OBJS-$(CONFIG_WMAV1_DECODER) += wmadec.o
+OBJS-$(CONFIG_WMAV2_DECODER) += wmadec.o
+OBJS-$(CONFIG_WMV3_DECODER) += vc1.o vc1dsp.o
+OBJS-$(CONFIG_WNV1_DECODER) += wnv1.o
+OBJS-$(CONFIG_WS_SND1_DECODER) += ws-snd1.o
+OBJS-$(CONFIG_XAN_DPCM_DECODER) += dpcm.o
+OBJS-$(CONFIG_XAN_WC3_DECODER) += xan.o
+OBJS-$(CONFIG_XAN_WC4_DECODER) += xan.o
+OBJS-$(CONFIG_XL_DECODER) += xl.o
+OBJS-$(CONFIG_ZLIB_DECODER) += lcl.o
+OBJS-$(CONFIG_ZLIB_ENCODER) += lcl.o
+OBJS-$(CONFIG_ZMBV_DECODER) += zmbv.o
+
+OBJS-$(CONFIG_PCM_S32LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S32LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S32BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S32BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U32LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U32LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U32BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U32BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U24LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U24LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U24BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U24BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24DAUD_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S24DAUD_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S16LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S16LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S16BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S16BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U16LE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U16LE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U16BE_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U16BE_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_S8_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_S8_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_U8_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_U8_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_ALAW_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_ALAW_ENCODER) += pcm.o
+OBJS-$(CONFIG_PCM_MULAW_DECODER) += pcm.o
+OBJS-$(CONFIG_PCM_MULAW_ENCODER) += pcm.o
+
+OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_4XM_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_ADX_DECODER) += adx.o
+OBJS-$(CONFIG_ADPCM_ADX_ENCODER) += adx.o
+OBJS-$(CONFIG_ADPCM_CT_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_CT_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_EA_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_EA_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_G726_DECODER) += g726.o
+OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o
+OBJS-$(CONFIG_ADPCM_IMA_DK3_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_DK3_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_DK4_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_DK4_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_QT_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_QT_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_DECODER)+= adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_SMJPEG_ENCODER)+= adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_WAV_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_WAV_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_WS_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_IMA_WS_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_MS_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_MS_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SBPRO_2_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SBPRO_2_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SBPRO_3_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SBPRO_3_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SBPRO_4_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SBPRO_4_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SWF_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_SWF_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_XA_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_XA_ENCODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_YAMAHA_DECODER) += adpcm.o
+OBJS-$(CONFIG_ADPCM_YAMAHA_ENCODER) += adpcm.o
+
+OBJS-$(CONFIG_FAAD) += faad.o
+OBJS-$(CONFIG_FAAC) += faac.o
+OBJS-$(CONFIG_XVID) += xvidff.o xvid_rc.o
+OBJS-$(CONFIG_X264) += x264.o
+OBJS-$(CONFIG_MP3LAME) += mp3lameaudio.o
+OBJS-$(CONFIG_LIBVORBIS) += oggvorbis.o
+OBJS-$(CONFIG_LIBGSM) += libgsm.o
+
+# currently using liba52 for ac3 decoding
+OBJS-$(CONFIG_A52) += a52dec.o
+
+# using builtin liba52 or runtime linked liba52.so.0
+OBJS-$(CONFIG_A52)$(CONFIG_A52BIN) += liba52/bit_allocate.o \
+ liba52/bitstream.o \
+ liba52/downmix.o \
+ liba52/imdct.o \
+ liba52/parse.o \
+ liba52/crc.o \
+ liba52/resample.o
+
+# currently using libdts for dts decoding
+OBJS-$(CONFIG_DTS) += dtsdec.o
+
+OBJS-$(CONFIG_AMR) += amr.o
+OBJS-$(CONFIG_AMR_NB) += amr_float/sp_dec.o \
+ amr_float/sp_enc.o \
+ amr_float/interf_dec.o \
+ amr_float/interf_enc.o
+
+ifeq ($(CONFIG_AMR_NB_FIXED),yes)
+EXTRAOBJS += amr/*.o
+EXTRADEPS=amrlibs
+endif
+
+OBJS-$(CONFIG_AMR_WB) += amrwb_float/dec_acelp.o \
+ amrwb_float/dec_dtx.o \
+ amrwb_float/dec_gain.o \
+ amrwb_float/dec_if.o \
+ amrwb_float/dec_lpc.o \
+ amrwb_float/dec_main.o \
+ amrwb_float/dec_rom.o \
+ amrwb_float/dec_util.o \
+ amrwb_float/enc_acelp.o \
+ amrwb_float/enc_dtx.o \
+ amrwb_float/enc_gain.o \
+ amrwb_float/enc_if.o \
+ amrwb_float/enc_lpc.o \
+ amrwb_float/enc_main.o \
+ amrwb_float/enc_rom.o \
+ amrwb_float/enc_util.o \
+ amrwb_float/if_rom.o
+
+OBJS-$(CONFIG_AAC_PARSER) += parser.o
+OBJS-$(CONFIG_AC3_PARSER) += parser.o
+OBJS-$(CONFIG_CAVSVIDEO_PARSER) += cavs.o parser.o
+OBJS-$(CONFIG_DVBSUB_PARSER) += dvbsubdec.o
+OBJS-$(CONFIG_DVDSUB_PARSER) += dvdsubdec.o
+OBJS-$(CONFIG_H261_PARSER) += h261.o
+OBJS-$(CONFIG_H263_PARSER) += h263dec.o
+OBJS-$(CONFIG_H264_PARSER) += h264.o
+OBJS-$(CONFIG_MJPEG_PARSER) += mjpeg.o
+OBJS-$(CONFIG_MPEG4VIDEO_PARSER) += parser.o
+OBJS-$(CONFIG_MPEGAUDIO_PARSER) += parser.o
+OBJS-$(CONFIG_MPEGVIDEO_PARSER) += parser.o
+OBJS-$(CONFIG_PNM_PARSER) += pnm.o
+
+OBJS-$(HAVE_PTHREADS) += pthread.o
+OBJS-$(HAVE_W32THREADS) += w32thread.o
+OBJS-$(HAVE_OS2THREADS) += os2thread.o
+OBJS-$(HAVE_BEOSTHREADS) += beosthread.o
+
+OBJS-$(HAVE_XVMC_ACCEL) += xvmcvideo.o
+
+ifneq ($(CONFIG_SWSCALER),yes)
+OBJS += imgresample.o
+endif
+
+# i386 mmx specific stuff
+ifeq ($(TARGET_MMX),yes)
+OBJS += i386/fdct_mmx.o \
+ i386/cputest.o \
+ i386/dsputil_mmx.o \
+ i386/mpegvideo_mmx.o \
+ i386/motion_est_mmx.o \
+ i386/simple_idct_mmx.o \
+ i386/idct_mmx_xvid.o \
+ i386/fft_sse.o \
+ i386/vp3dsp_mmx.o \
+ i386/vp3dsp_sse2.o \
+ i386/fft_3dn.o \
+ i386/fft_3dn2.o \
+ i386/snowdsp_mmx.o \
+
+ifeq ($(CONFIG_GPL),yes)
+OBJS += i386/idct_mmx.o
+endif
+ifeq ($(CONFIG_CAVS_DECODER),yes)
+OBJS += i386/cavsdsp_mmx.o
+endif
+endif
+
+# armv4l specific stuff
+ASM_OBJS-$(TARGET_ARCH_ARMV4L) += armv4l/jrevdct_arm.o \
+ armv4l/simple_idct_arm.o \
+ armv4l/dsputil_arm_s.o \
+
+OBJS-$(TARGET_ARCH_ARMV4L) += armv4l/dsputil_arm.o \
+ armv4l/mpegvideo_arm.o \
+
+OBJS-$(TARGET_IWMMXT) += armv4l/dsputil_iwmmxt.o \
+ armv4l/mpegvideo_iwmmxt.o \
+
+ASM_OBJS-$(TARGET_ARMV5TE) += armv4l/simple_idct_armv5te.o \
+
+# sun sparc
+OBJS-$(TARGET_ARCH_SPARC) += sparc/dsputil_vis.o \
+
+sparc/dsputil_vis.o: CFLAGS += -mcpu=ultrasparc -mtune=ultrasparc
+
+# sun mediaLib specific stuff
+OBJS-$(HAVE_MLIB) += mlib/dsputil_mlib.o \
+
+# alpha specific stuff
+OBJS-$(TARGET_ARCH_ALPHA) += alpha/dsputil_alpha.o \
+ alpha/mpegvideo_alpha.o \
+ alpha/simple_idct_alpha.o \
+ alpha/motion_est_alpha.o \
+
+ASM_OBJS-$(TARGET_ARCH_ALPHA) += alpha/dsputil_alpha_asm.o \
+ alpha/motion_est_mvi_asm.o \
+
+OBJS-$(TARGET_ARCH_POWERPC) += ppc/dsputil_ppc.o \
+ ppc/mpegvideo_ppc.o \
+
+OBJS-$(TARGET_MMI) += ps2/dsputil_mmi.o \
+ ps2/idct_mmi.o \
+ ps2/mpegvideo_mmi.o \
+
+OBJS-$(TARGET_ARCH_SH4) += sh4/idct_sh4.o \
+ sh4/dsputil_sh4.o \
+ sh4/dsputil_align.o \
+
+OBJS-$(TARGET_ALTIVEC) += ppc/dsputil_altivec.o \
+ ppc/mpegvideo_altivec.o \
+ ppc/idct_altivec.o \
+ ppc/fft_altivec.o \
+ ppc/gmc_altivec.o \
+ ppc/fdct_altivec.o \
+ ppc/float_altivec.o \
+
+ifeq ($(TARGET_ALTIVEC),yes)
+OBJS-$(CONFIG_H264_DECODER) += ppc/h264_altivec.o
+OBJS-$(CONFIG_SNOW_DECODER) += ppc/snow_altivec.o
+OBJS-$(CONFIG_VC1_DECODER) += ppc/vc1dsp_altivec.o
+OBJS-$(CONFIG_WMV3_DECODER) += ppc/vc1dsp_altivec.o
+endif
+
+OBJS-$(TARGET_ARCH_BFIN) += bfin/dsputil_bfin.o \
+
+CFLAGS += $(CFLAGS-yes)
+OBJS += $(OBJS-yes)
+ASM_OBJS += $(ASM_OBJS-yes)
+
+EXTRALIBS := -L$(BUILD_ROOT)/libavutil -lavutil$(BUILDSUF) $(EXTRALIBS)
+
+NAME=avcodec
+ifeq ($(BUILD_SHARED),yes)
+LIBVERSION=$(LAVCVERSION)
+LIBMAJOR=$(LAVCMAJOR)
+endif
+
+TESTS= imgresample-test fft-test
+ifeq ($(TARGET_ARCH_X86),yes)
+TESTS+= cpuid_test dct-test motion-test
+endif
+
+include ../common.mak
+
+amrlibs:
+ $(MAKE) -C amr spclib fipoplib
+
+tests: apiexample $(TESTS)
+
+clean::
+ rm -f \
+ i386/*.o i386/*~ \
+ armv4l/*.o armv4l/*~ \
+ mlib/*.o mlib/*~ \
+ alpha/*.o alpha/*~ \
+ ppc/*.o ppc/*~ \
+ ps2/*.o ps2/*~ \
+ sh4/*.o sh4/*~ \
+ sparc/*.o sparc/*~ \
+ liba52/*.o liba52/*~ \
+ amr_float/*.o \
+ apiexample $(TESTS)
+ -$(MAKE) -C amr clean
+ -$(MAKE) -C amrwb_float -f makefile.gcc clean
+
+apiexample: apiexample.o $(LIB)
+
+cpuid_test: i386/cputest.c
+ $(CC) $(CFLAGS) -D__TEST__ -o $@ $<
+
+imgresample-test: imgresample.c $(LIB)
+ $(CC) $(CFLAGS) -DTEST -o $@ $^ $(EXTRALIBS)
+
+dct-test: dct-test.o fdctref.o $(LIB)
+
+motion-test: motion_test.o $(LIB)
+
+fft-test: fft-test.o $(LIB)
+
+.PHONY: amrlibs tests
diff --git a/contrib/ffmpeg/libavcodec/a52dec.c b/contrib/ffmpeg/libavcodec/a52dec.c
new file mode 100644
index 000000000..dec25138e
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/a52dec.c
@@ -0,0 +1,257 @@
+/*
+ * A52 decoder
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file a52dec.c
+ * A52 decoder.
+ */
+
+#include "avcodec.h"
+#include "liba52/a52.h"
+
+#ifdef CONFIG_A52BIN
+#include <dlfcn.h>
+static const char* liba52name = "liba52.so.0";
+#endif
+
+/**
+ * liba52 - Copyright (C) Aaron Holtzman
+ * released under the GPL license.
+ */
+typedef struct AC3DecodeState {
+ uint8_t inbuf[4096]; /* input buffer */
+ uint8_t *inbuf_ptr;
+ int frame_size;
+ int flags;
+ int channels;
+ a52_state_t* state;
+ sample_t* samples;
+
+ /*
+ * virtual method table
+ *
+ * using this function table so the liba52 doesn't
+ * have to be really linked together with ffmpeg
+ * and might be linked in runtime - this allows binary
+ * distribution of ffmpeg library which doens't depend
+ * on liba52 library - but if user has it installed
+ * it will be used - user might install such library
+ * separately
+ */
+ void* handle;
+ a52_state_t* (*a52_init)(uint32_t mm_accel);
+ sample_t* (*a52_samples)(a52_state_t * state);
+ int (*a52_syncinfo)(uint8_t * buf, int * flags,
+ int * sample_rate, int * bit_rate);
+ int (*a52_frame)(a52_state_t * state, uint8_t * buf, int * flags,
+ sample_t * level, sample_t bias);
+ void (*a52_dynrng)(a52_state_t * state,
+ sample_t (* call) (sample_t, void *), void * data);
+ int (*a52_block)(a52_state_t * state);
+ void (*a52_free)(a52_state_t * state);
+
+} AC3DecodeState;
+
+#ifdef CONFIG_A52BIN
+static void* dlsymm(void* handle, const char* symbol)
+{
+ void* f = dlsym(handle, symbol);
+ if (!f)
+ av_log( NULL, AV_LOG_ERROR, "A52 Decoder - function '%s' can't be resolved\n", symbol);
+ return f;
+}
+#endif
+
+static int a52_decode_init(AVCodecContext *avctx)
+{
+ AC3DecodeState *s = avctx->priv_data;
+
+#ifdef CONFIG_A52BIN
+ s->handle = dlopen(liba52name, RTLD_LAZY);
+ if (!s->handle)
+ {
+ av_log( avctx, AV_LOG_ERROR, "A52 library %s could not be opened! \n%s\n", liba52name, dlerror());
+ return -1;
+ }
+ s->a52_init = (a52_state_t* (*)(uint32_t)) dlsymm(s->handle, "a52_init");
+ s->a52_samples = (sample_t* (*)(a52_state_t*)) dlsymm(s->handle, "a52_samples");
+ s->a52_syncinfo = (int (*)(uint8_t*, int*, int*, int*)) dlsymm(s->handle, "a52_syncinfo");
+ s->a52_frame = (int (*)(a52_state_t*, uint8_t*, int*, sample_t*, sample_t)) dlsymm(s->handle, "a52_frame");
+ s->a52_block = (int (*)(a52_state_t*)) dlsymm(s->handle, "a52_block");
+ s->a52_free = (void (*)(a52_state_t*)) dlsymm(s->handle, "a52_free");
+ if (!s->a52_init || !s->a52_samples || !s->a52_syncinfo
+ || !s->a52_frame || !s->a52_block || !s->a52_free)
+ {
+ dlclose(s->handle);
+ return -1;
+ }
+#else
+ /* static linked version */
+ s->handle = 0;
+ s->a52_init = a52_init;
+ s->a52_samples = a52_samples;
+ s->a52_syncinfo = a52_syncinfo;
+ s->a52_frame = a52_frame;
+ s->a52_block = a52_block;
+ s->a52_free = a52_free;
+#endif
+ s->state = s->a52_init(0); /* later use CPU flags */
+ s->samples = s->a52_samples(s->state);
+ s->inbuf_ptr = s->inbuf;
+ s->frame_size = 0;
+
+ return 0;
+}
+
+/**** the following two functions comes from a52dec */
+static inline int blah (int32_t i)
+{
+ if (i > 0x43c07fff)
+ return 32767;
+ else if (i < 0x43bf8000)
+ return -32768;
+ return i - 0x43c00000;
+}
+
+static inline void float_to_int (float * _f, int16_t * s16, int nchannels)
+{
+ int i, j, c;
+ int32_t * f = (int32_t *) _f; // XXX assumes IEEE float format
+
+ j = 0;
+ nchannels *= 256;
+ for (i = 0; i < 256; i++) {
+ for (c = 0; c < nchannels; c += 256)
+ s16[j++] = blah (f[i + c]);
+ }
+}
+
+/**** end */
+
+#define HEADER_SIZE 7
+
+static int a52_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ AC3DecodeState *s = avctx->priv_data;
+ uint8_t *buf_ptr;
+ int flags, i, len;
+ int sample_rate, bit_rate;
+ short *out_samples = data;
+ float level;
+ static const int ac3_channels[8] = {
+ 2, 1, 2, 3, 3, 4, 4, 5
+ };
+
+ buf_ptr = buf;
+ while (buf_size > 0) {
+ len = s->inbuf_ptr - s->inbuf;
+ if (s->frame_size == 0) {
+ /* no header seen : find one. We need at least 7 bytes to parse it */
+ len = HEADER_SIZE - len;
+ if (len > buf_size)
+ len = buf_size;
+ memcpy(s->inbuf_ptr, buf_ptr, len);
+ buf_ptr += len;
+ s->inbuf_ptr += len;
+ buf_size -= len;
+ if ((s->inbuf_ptr - s->inbuf) == HEADER_SIZE) {
+ len = s->a52_syncinfo(s->inbuf, &s->flags, &sample_rate, &bit_rate);
+ if (len == 0) {
+ /* no sync found : move by one byte (inefficient, but simple!) */
+ memcpy(s->inbuf, s->inbuf + 1, HEADER_SIZE - 1);
+ s->inbuf_ptr--;
+ } else {
+ s->frame_size = len;
+ /* update codec info */
+ avctx->sample_rate = sample_rate;
+ s->channels = ac3_channels[s->flags & 7];
+ if (s->flags & A52_LFE)
+ s->channels++;
+ if (avctx->channels == 0)
+ /* No specific number of channel requested */
+ avctx->channels = s->channels;
+ else if (s->channels < avctx->channels) {
+ av_log(avctx, AV_LOG_ERROR, "ac3dec: AC3 Source channels are less than specified: output to %d channels.. (frmsize: %d)\n", s->channels, len);
+ avctx->channels = s->channels;
+ }
+ avctx->bit_rate = bit_rate;
+ }
+ }
+ } else if (len < s->frame_size) {
+ len = s->frame_size - len;
+ if (len > buf_size)
+ len = buf_size;
+
+ memcpy(s->inbuf_ptr, buf_ptr, len);
+ buf_ptr += len;
+ s->inbuf_ptr += len;
+ buf_size -= len;
+ } else {
+ flags = s->flags;
+ if (avctx->channels == 1)
+ flags = A52_MONO;
+ else if (avctx->channels == 2)
+ flags = A52_STEREO;
+ else
+ flags |= A52_ADJUST_LEVEL;
+ level = 1;
+ if (s->a52_frame(s->state, s->inbuf, &flags, &level, 384)) {
+ fail:
+ s->inbuf_ptr = s->inbuf;
+ s->frame_size = 0;
+ continue;
+ }
+ for (i = 0; i < 6; i++) {
+ if (s->a52_block(s->state))
+ goto fail;
+ float_to_int(s->samples, out_samples + i * 256 * avctx->channels, avctx->channels);
+ }
+ s->inbuf_ptr = s->inbuf;
+ s->frame_size = 0;
+ *data_size = 6 * avctx->channels * 256 * sizeof(int16_t);
+ break;
+ }
+ }
+ return buf_ptr - buf;
+}
+
+static int a52_decode_end(AVCodecContext *avctx)
+{
+ AC3DecodeState *s = avctx->priv_data;
+ s->a52_free(s->state);
+#ifdef CONFIG_A52BIN
+ dlclose(s->handle);
+#endif
+ return 0;
+}
+
+AVCodec ac3_decoder = {
+ "ac3",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_AC3,
+ sizeof(AC3DecodeState),
+ a52_decode_init,
+ NULL,
+ a52_decode_end,
+ a52_decode_frame,
+};
diff --git a/src/libffmpeg/libavcodec/aasc.c b/contrib/ffmpeg/libavcodec/aasc.c
index 462282800..6c8e3166e 100644
--- a/src/libffmpeg/libavcodec/aasc.c
+++ b/contrib/ffmpeg/libavcodec/aasc.c
@@ -2,18 +2,20 @@
* Autodesc RLE Decoder
* Copyright (C) 2005 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/contrib/ffmpeg/libavcodec/ac3.h b/contrib/ffmpeg/libavcodec/ac3.h
new file mode 100644
index 000000000..5daa9750f
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ac3.h
@@ -0,0 +1,65 @@
+/*
+ * Common code between AC3 encoder and decoder
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file ac3.h
+ * Common code between AC3 encoder and decoder.
+ */
+
+#define AC3_MAX_CODED_FRAME_SIZE 3840 /* in bytes */
+#define AC3_MAX_CHANNELS 6 /* including LFE channel */
+
+#define NB_BLOCKS 6 /* number of PCM blocks inside an AC3 frame */
+#define AC3_FRAME_SIZE (NB_BLOCKS * 256)
+
+/* exponent encoding strategy */
+#define EXP_REUSE 0
+#define EXP_NEW 1
+
+#define EXP_D15 1
+#define EXP_D25 2
+#define EXP_D45 3
+
+typedef struct AC3BitAllocParameters {
+ int fscod; /* frequency */
+ int halfratecod;
+ int sgain, sdecay, fdecay, dbknee, floor;
+ int cplfleak, cplsleak;
+} AC3BitAllocParameters;
+
+#if 0
+extern const uint16_t ac3_freqs[3];
+extern const uint16_t ac3_bitratetab[19];
+extern const int16_t ac3_window[256];
+extern const uint8_t sdecaytab[4];
+extern const uint8_t fdecaytab[4];
+extern const uint16_t sgaintab[4];
+extern const uint16_t dbkneetab[4];
+extern const uint16_t floortab[8];
+extern const uint16_t fgaintab[8];
+#endif
+
+void ac3_common_init(void);
+void ac3_parametric_bit_allocation(AC3BitAllocParameters *s, uint8_t *bap,
+ int8_t *exp, int start, int end,
+ int snroffset, int fgain, int is_lfe,
+ int deltbae,int deltnseg,
+ uint8_t *deltoffst, uint8_t *deltlen, uint8_t *deltba);
diff --git a/contrib/ffmpeg/libavcodec/ac3dec.c b/contrib/ffmpeg/libavcodec/ac3dec.c
new file mode 100644
index 000000000..b6bebfb59
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ac3dec.c
@@ -0,0 +1,184 @@
+/*
+ * AC3 decoder
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file ac3dec.c
+ * AC3 decoder.
+ */
+
+//#define DEBUG
+
+#include "avcodec.h"
+#include "libac3/ac3.h"
+
+/* currently, I use libac3 which is Copyright (C) Aaron Holtzman and
+ released under the GPL license. I may reimplement it someday... */
+typedef struct AC3DecodeState {
+ uint8_t inbuf[4096]; /* input buffer */
+ uint8_t *inbuf_ptr;
+ int frame_size;
+ int flags;
+ int channels;
+ ac3_state_t state;
+} AC3DecodeState;
+
+static int ac3_decode_init(AVCodecContext *avctx)
+{
+ AC3DecodeState *s = avctx->priv_data;
+
+ ac3_init ();
+ s->inbuf_ptr = s->inbuf;
+ s->frame_size = 0;
+ return 0;
+}
+
+stream_samples_t samples;
+
+/**** the following two functions comes from ac3dec */
+static inline int blah (int32_t i)
+{
+ if (i > 0x43c07fff)
+ return 32767;
+ else if (i < 0x43bf8000)
+ return -32768;
+ else
+ return i - 0x43c00000;
+}
+
+static inline void float_to_int (float * _f, int16_t * s16, int nchannels)
+{
+ int i, j, c;
+ int32_t * f = (int32_t *) _f; // XXX assumes IEEE float format
+
+ j = 0;
+ nchannels *= 256;
+ for (i = 0; i < 256; i++) {
+ for (c = 0; c < nchannels; c += 256)
+ s16[j++] = blah (f[i + c]);
+ }
+}
+
+/**** end */
+
+#define HEADER_SIZE 7
+
+static int ac3_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ AC3DecodeState *s = avctx->priv_data;
+ uint8_t *buf_ptr;
+ int flags, i, len;
+ int sample_rate, bit_rate;
+ short *out_samples = data;
+ float level;
+ static const int ac3_channels[8] = {
+ 2, 1, 2, 3, 3, 4, 4, 5
+ };
+
+ buf_ptr = buf;
+ while (buf_size > 0) {
+ len = s->inbuf_ptr - s->inbuf;
+ if (s->frame_size == 0) {
+ /* no header seen : find one. We need at least 7 bytes to parse it */
+ len = HEADER_SIZE - len;
+ if (len > buf_size)
+ len = buf_size;
+ memcpy(s->inbuf_ptr, buf_ptr, len);
+ buf_ptr += len;
+ s->inbuf_ptr += len;
+ buf_size -= len;
+ if ((s->inbuf_ptr - s->inbuf) == HEADER_SIZE) {
+ len = ac3_syncinfo (s->inbuf, &s->flags, &sample_rate, &bit_rate);
+ if (len == 0) {
+ /* no sync found : move by one byte (inefficient, but simple!) */
+ memcpy(s->inbuf, s->inbuf + 1, HEADER_SIZE - 1);
+ s->inbuf_ptr--;
+ } else {
+ s->frame_size = len;
+ /* update codec info */
+ avctx->sample_rate = sample_rate;
+ s->channels = ac3_channels[s->flags & 7];
+ if (s->flags & AC3_LFE)
+ s->channels++;
+ if (avctx->channels == 0)
+ /* No specific number of channel requested */
+ avctx->channels = s->channels;
+ else if (s->channels < avctx->channels) {
+ av_log( avctx, AV_LOG_INFO, "ac3dec: AC3 Source channels are less than specified: output to %d channels.. (frmsize: %d)\n", s->channels, len);
+ avctx->channels = s->channels;
+ }
+ avctx->bit_rate = bit_rate;
+ }
+ }
+ } else if (len < s->frame_size) {
+ len = s->frame_size - len;
+ if (len > buf_size)
+ len = buf_size;
+
+ memcpy(s->inbuf_ptr, buf_ptr, len);
+ buf_ptr += len;
+ s->inbuf_ptr += len;
+ buf_size -= len;
+ } else {
+ flags = s->flags;
+ if (avctx->channels == 1)
+ flags = AC3_MONO;
+ else if (avctx->channels == 2)
+ flags = AC3_STEREO;
+ else
+ flags |= AC3_ADJUST_LEVEL;
+ level = 1;
+ if (ac3_frame (&s->state, s->inbuf, &flags, &level, 384)) {
+ fail:
+ s->inbuf_ptr = s->inbuf;
+ s->frame_size = 0;
+ continue;
+ }
+ for (i = 0; i < 6; i++) {
+ if (ac3_block (&s->state))
+ goto fail;
+ float_to_int (*samples, out_samples + i * 256 * avctx->channels, avctx->channels);
+ }
+ s->inbuf_ptr = s->inbuf;
+ s->frame_size = 0;
+ *data_size = 6 * avctx->channels * 256 * sizeof(int16_t);
+ break;
+ }
+ }
+ return buf_ptr - buf;
+}
+
+static int ac3_decode_end(AVCodecContext *s)
+{
+ return 0;
+}
+
+AVCodec ac3_decoder = {
+ "ac3",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_AC3,
+ sizeof(AC3DecodeState),
+ ac3_decode_init,
+ NULL,
+ ac3_decode_end,
+ ac3_decode_frame,
+};
diff --git a/contrib/ffmpeg/libavcodec/ac3enc.c b/contrib/ffmpeg/libavcodec/ac3enc.c
new file mode 100644
index 000000000..c8c8920ed
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ac3enc.c
@@ -0,0 +1,1557 @@
+/*
+ * The simplest AC3 encoder
+ * Copyright (c) 2000 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file ac3enc.c
+ * The simplest AC3 encoder.
+ */
+//#define DEBUG
+//#define DEBUG_BITALLOC
+#include "avcodec.h"
+#include "bitstream.h"
+#include "crc.h"
+#include "ac3.h"
+
+typedef struct AC3EncodeContext {
+ PutBitContext pb;
+ int nb_channels;
+ int nb_all_channels;
+ int lfe_channel;
+ int bit_rate;
+ unsigned int sample_rate;
+ unsigned int bsid;
+ unsigned int frame_size_min; /* minimum frame size in case rounding is necessary */
+ unsigned int frame_size; /* current frame size in words */
+ unsigned int bits_written;
+ unsigned int samples_written;
+ int halfratecod;
+ unsigned int frmsizecod;
+ unsigned int fscod; /* frequency */
+ unsigned int acmod;
+ int lfe;
+ unsigned int bsmod;
+ short last_samples[AC3_MAX_CHANNELS][256];
+ unsigned int chbwcod[AC3_MAX_CHANNELS];
+ int nb_coefs[AC3_MAX_CHANNELS];
+
+ /* bitrate allocation control */
+ int sgaincod, sdecaycod, fdecaycod, dbkneecod, floorcod;
+ AC3BitAllocParameters bit_alloc;
+ int csnroffst;
+ int fgaincod[AC3_MAX_CHANNELS];
+ int fsnroffst[AC3_MAX_CHANNELS];
+ /* mantissa encoding */
+ int mant1_cnt, mant2_cnt, mant4_cnt;
+} AC3EncodeContext;
+
+#include "ac3tab.h"
+
+#define MDCT_NBITS 9
+#define N (1 << MDCT_NBITS)
+
+/* new exponents are sent if their Norm 1 exceed this number */
+#define EXP_DIFF_THRESHOLD 1000
+
+static void fft_init(int ln);
+
+static inline int16_t fix15(float a)
+{
+ int v;
+ v = (int)(a * (float)(1 << 15));
+ if (v < -32767)
+ v = -32767;
+ else if (v > 32767)
+ v = 32767;
+ return v;
+}
+
+static inline int calc_lowcomp1(int a, int b0, int b1)
+{
+ if ((b0 + 256) == b1) {
+ a = 384 ;
+ } else if (b0 > b1) {
+ a = a - 64;
+ if (a < 0) a=0;
+ }
+ return a;
+}
+
+static inline int calc_lowcomp(int a, int b0, int b1, int bin)
+{
+ if (bin < 7) {
+ if ((b0 + 256) == b1) {
+ a = 384 ;
+ } else if (b0 > b1) {
+ a = a - 64;
+ if (a < 0) a=0;
+ }
+ } else if (bin < 20) {
+ if ((b0 + 256) == b1) {
+ a = 320 ;
+ } else if (b0 > b1) {
+ a= a - 64;
+ if (a < 0) a=0;
+ }
+ } else {
+ a = a - 128;
+ if (a < 0) a=0;
+ }
+ return a;
+}
+
+/* AC3 bit allocation. The algorithm is the one described in the AC3
+ spec. */
+void ac3_parametric_bit_allocation(AC3BitAllocParameters *s, uint8_t *bap,
+ int8_t *exp, int start, int end,
+ int snroffset, int fgain, int is_lfe,
+ int deltbae,int deltnseg,
+ uint8_t *deltoffst, uint8_t *deltlen, uint8_t *deltba)
+{
+ int bin,i,j,k,end1,v,v1,bndstrt,bndend,lowcomp,begin;
+ int fastleak,slowleak,address,tmp;
+ int16_t psd[256]; /* scaled exponents */
+ int16_t bndpsd[50]; /* interpolated exponents */
+ int16_t excite[50]; /* excitation */
+ int16_t mask[50]; /* masking value */
+
+ /* exponent mapping to PSD */
+ for(bin=start;bin<end;bin++) {
+ psd[bin]=(3072 - (exp[bin] << 7));
+ }
+
+ /* PSD integration */
+ j=start;
+ k=masktab[start];
+ do {
+ v=psd[j];
+ j++;
+ end1=bndtab[k+1];
+ if (end1 > end) end1=end;
+ for(i=j;i<end1;i++) {
+ int c,adr;
+ /* logadd */
+ v1=psd[j];
+ c=v-v1;
+ if (c >= 0) {
+ adr=c >> 1;
+ if (adr > 255) adr=255;
+ v=v + latab[adr];
+ } else {
+ adr=(-c) >> 1;
+ if (adr > 255) adr=255;
+ v=v1 + latab[adr];
+ }
+ j++;
+ }
+ bndpsd[k]=v;
+ k++;
+ } while (end > bndtab[k]);
+
+ /* excitation function */
+ bndstrt = masktab[start];
+ bndend = masktab[end-1] + 1;
+
+ if (bndstrt == 0) {
+ lowcomp = 0;
+ lowcomp = calc_lowcomp1(lowcomp, bndpsd[0], bndpsd[1]) ;
+ excite[0] = bndpsd[0] - fgain - lowcomp ;
+ lowcomp = calc_lowcomp1(lowcomp, bndpsd[1], bndpsd[2]) ;
+ excite[1] = bndpsd[1] - fgain - lowcomp ;
+ begin = 7 ;
+ for (bin = 2; bin < 7; bin++) {
+ if (!(is_lfe && bin == 6))
+ lowcomp = calc_lowcomp1(lowcomp, bndpsd[bin], bndpsd[bin+1]) ;
+ fastleak = bndpsd[bin] - fgain ;
+ slowleak = bndpsd[bin] - s->sgain ;
+ excite[bin] = fastleak - lowcomp ;
+ if (!(is_lfe && bin == 6)) {
+ if (bndpsd[bin] <= bndpsd[bin+1]) {
+ begin = bin + 1 ;
+ break ;
+ }
+ }
+ }
+
+ end1=bndend;
+ if (end1 > 22) end1=22;
+
+ for (bin = begin; bin < end1; bin++) {
+ if (!(is_lfe && bin == 6))
+ lowcomp = calc_lowcomp(lowcomp, bndpsd[bin], bndpsd[bin+1], bin) ;
+
+ fastleak -= s->fdecay ;
+ v = bndpsd[bin] - fgain;
+ if (fastleak < v) fastleak = v;
+
+ slowleak -= s->sdecay ;
+ v = bndpsd[bin] - s->sgain;
+ if (slowleak < v) slowleak = v;
+
+ v=fastleak - lowcomp;
+ if (slowleak > v) v=slowleak;
+
+ excite[bin] = v;
+ }
+ begin = 22;
+ } else {
+ /* coupling channel */
+ begin = bndstrt;
+
+ fastleak = (s->cplfleak << 8) + 768;
+ slowleak = (s->cplsleak << 8) + 768;
+ }
+
+ for (bin = begin; bin < bndend; bin++) {
+ fastleak -= s->fdecay ;
+ v = bndpsd[bin] - fgain;
+ if (fastleak < v) fastleak = v;
+ slowleak -= s->sdecay ;
+ v = bndpsd[bin] - s->sgain;
+ if (slowleak < v) slowleak = v;
+
+ v=fastleak;
+ if (slowleak > v) v = slowleak;
+ excite[bin] = v;
+ }
+
+ /* compute masking curve */
+
+ for (bin = bndstrt; bin < bndend; bin++) {
+ v1 = excite[bin];
+ tmp = s->dbknee - bndpsd[bin];
+ if (tmp > 0) {
+ v1 += tmp >> 2;
+ }
+ v=hth[bin >> s->halfratecod][s->fscod];
+ if (v1 > v) v=v1;
+ mask[bin] = v;
+ }
+
+ /* delta bit allocation */
+
+ if (deltbae == 0 || deltbae == 1) {
+ int band, seg, delta;
+ band = 0 ;
+ for (seg = 0; seg < deltnseg; seg++) {
+ band += deltoffst[seg] ;
+ if (deltba[seg] >= 4) {
+ delta = (deltba[seg] - 3) << 7;
+ } else {
+ delta = (deltba[seg] - 4) << 7;
+ }
+ for (k = 0; k < deltlen[seg]; k++) {
+ mask[band] += delta ;
+ band++ ;
+ }
+ }
+ }
+
+ /* compute bit allocation */
+
+ i = start ;
+ j = masktab[start] ;
+ do {
+ v=mask[j];
+ v -= snroffset ;
+ v -= s->floor ;
+ if (v < 0) v = 0;
+ v &= 0x1fe0 ;
+ v += s->floor ;
+
+ end1=bndtab[j] + bndsz[j];
+ if (end1 > end) end1=end;
+
+ for (k = i; k < end1; k++) {
+ address = (psd[i] - v) >> 5 ;
+ if (address < 0) address=0;
+ else if (address > 63) address=63;
+ bap[i] = baptab[address];
+ i++;
+ }
+ } while (end > bndtab[j++]) ;
+}
+
+typedef struct IComplex {
+ short re,im;
+} IComplex;
+
+static void fft_init(int ln)
+{
+ int i, j, m, n;
+ float alpha;
+
+ n = 1 << ln;
+
+ for(i=0;i<(n/2);i++) {
+ alpha = 2 * M_PI * (float)i / (float)n;
+ costab[i] = fix15(cos(alpha));
+ sintab[i] = fix15(sin(alpha));
+ }
+
+ for(i=0;i<n;i++) {
+ m=0;
+ for(j=0;j<ln;j++) {
+ m |= ((i >> j) & 1) << (ln-j-1);
+ }
+ fft_rev[i]=m;
+ }
+}
+
+/* butter fly op */
+#define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \
+{\
+ int ax, ay, bx, by;\
+ bx=pre1;\
+ by=pim1;\
+ ax=qre1;\
+ ay=qim1;\
+ pre = (bx + ax) >> 1;\
+ pim = (by + ay) >> 1;\
+ qre = (bx - ax) >> 1;\
+ qim = (by - ay) >> 1;\
+}
+
+#define MUL16(a,b) ((a) * (b))
+
+#define CMUL(pre, pim, are, aim, bre, bim) \
+{\
+ pre = (MUL16(are, bre) - MUL16(aim, bim)) >> 15;\
+ pim = (MUL16(are, bim) + MUL16(bre, aim)) >> 15;\
+}
+
+
+/* do a 2^n point complex fft on 2^ln points. */
+static void fft(IComplex *z, int ln)
+{
+ int j, l, np, np2;
+ int nblocks, nloops;
+ register IComplex *p,*q;
+ int tmp_re, tmp_im;
+
+ np = 1 << ln;
+
+ /* reverse */
+ for(j=0;j<np;j++) {
+ int k;
+ IComplex tmp;
+ k = fft_rev[j];
+ if (k < j) {
+ tmp = z[k];
+ z[k] = z[j];
+ z[j] = tmp;
+ }
+ }
+
+ /* pass 0 */
+
+ p=&z[0];
+ j=(np >> 1);
+ do {
+ BF(p[0].re, p[0].im, p[1].re, p[1].im,
+ p[0].re, p[0].im, p[1].re, p[1].im);
+ p+=2;
+ } while (--j != 0);
+
+ /* pass 1 */
+
+ p=&z[0];
+ j=np >> 2;
+ do {
+ BF(p[0].re, p[0].im, p[2].re, p[2].im,
+ p[0].re, p[0].im, p[2].re, p[2].im);
+ BF(p[1].re, p[1].im, p[3].re, p[3].im,
+ p[1].re, p[1].im, p[3].im, -p[3].re);
+ p+=4;
+ } while (--j != 0);
+
+ /* pass 2 .. ln-1 */
+
+ nblocks = np >> 3;
+ nloops = 1 << 2;
+ np2 = np >> 1;
+ do {
+ p = z;
+ q = z + nloops;
+ for (j = 0; j < nblocks; ++j) {
+
+ BF(p->re, p->im, q->re, q->im,
+ p->re, p->im, q->re, q->im);
+
+ p++;
+ q++;
+ for(l = nblocks; l < np2; l += nblocks) {
+ CMUL(tmp_re, tmp_im, costab[l], -sintab[l], q->re, q->im);
+ BF(p->re, p->im, q->re, q->im,
+ p->re, p->im, tmp_re, tmp_im);
+ p++;
+ q++;
+ }
+ p += nloops;
+ q += nloops;
+ }
+ nblocks = nblocks >> 1;
+ nloops = nloops << 1;
+ } while (nblocks != 0);
+}
+
+/* do a 512 point mdct */
+static void mdct512(int32_t *out, int16_t *in)
+{
+ int i, re, im, re1, im1;
+ int16_t rot[N];
+ IComplex x[N/4];
+
+ /* shift to simplify computations */
+ for(i=0;i<N/4;i++)
+ rot[i] = -in[i + 3*N/4];
+ for(i=N/4;i<N;i++)
+ rot[i] = in[i - N/4];
+
+ /* pre rotation */
+ for(i=0;i<N/4;i++) {
+ re = ((int)rot[2*i] - (int)rot[N-1-2*i]) >> 1;
+ im = -((int)rot[N/2+2*i] - (int)rot[N/2-1-2*i]) >> 1;
+ CMUL(x[i].re, x[i].im, re, im, -xcos1[i], xsin1[i]);
+ }
+
+ fft(x, MDCT_NBITS - 2);
+
+ /* post rotation */
+ for(i=0;i<N/4;i++) {
+ re = x[i].re;
+ im = x[i].im;
+ CMUL(re1, im1, re, im, xsin1[i], xcos1[i]);
+ out[2*i] = im1;
+ out[N/2-1-2*i] = re1;
+ }
+}
+
+/* XXX: use another norm ? */
+static int calc_exp_diff(uint8_t *exp1, uint8_t *exp2, int n)
+{
+ int sum, i;
+ sum = 0;
+ for(i=0;i<n;i++) {
+ sum += abs(exp1[i] - exp2[i]);
+ }
+ return sum;
+}
+
+static void compute_exp_strategy(uint8_t exp_strategy[NB_BLOCKS][AC3_MAX_CHANNELS],
+ uint8_t exp[NB_BLOCKS][AC3_MAX_CHANNELS][N/2],
+ int ch, int is_lfe)
+{
+ int i, j;
+ int exp_diff;
+
+ /* estimate if the exponent variation & decide if they should be
+ reused in the next frame */
+ exp_strategy[0][ch] = EXP_NEW;
+ for(i=1;i<NB_BLOCKS;i++) {
+ exp_diff = calc_exp_diff(exp[i][ch], exp[i-1][ch], N/2);
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "exp_diff=%d\n", exp_diff);
+#endif
+ if (exp_diff > EXP_DIFF_THRESHOLD)
+ exp_strategy[i][ch] = EXP_NEW;
+ else
+ exp_strategy[i][ch] = EXP_REUSE;
+ }
+ if (is_lfe)
+ return;
+
+ /* now select the encoding strategy type : if exponents are often
+ recoded, we use a coarse encoding */
+ i = 0;
+ while (i < NB_BLOCKS) {
+ j = i + 1;
+ while (j < NB_BLOCKS && exp_strategy[j][ch] == EXP_REUSE)
+ j++;
+ switch(j - i) {
+ case 1:
+ exp_strategy[i][ch] = EXP_D45;
+ break;
+ case 2:
+ case 3:
+ exp_strategy[i][ch] = EXP_D25;
+ break;
+ default:
+ exp_strategy[i][ch] = EXP_D15;
+ break;
+ }
+ i = j;
+ }
+}
+
+/* set exp[i] to min(exp[i], exp1[i]) */
+static void exponent_min(uint8_t exp[N/2], uint8_t exp1[N/2], int n)
+{
+ int i;
+
+ for(i=0;i<n;i++) {
+ if (exp1[i] < exp[i])
+ exp[i] = exp1[i];
+ }
+}
+
+/* update the exponents so that they are the ones the decoder will
+ decode. Return the number of bits used to code the exponents */
+static int encode_exp(uint8_t encoded_exp[N/2],
+ uint8_t exp[N/2],
+ int nb_exps,
+ int exp_strategy)
+{
+ int group_size, nb_groups, i, j, k, exp_min;
+ uint8_t exp1[N/2];
+
+ switch(exp_strategy) {
+ case EXP_D15:
+ group_size = 1;
+ break;
+ case EXP_D25:
+ group_size = 2;
+ break;
+ default:
+ case EXP_D45:
+ group_size = 4;
+ break;
+ }
+ nb_groups = ((nb_exps + (group_size * 3) - 4) / (3 * group_size)) * 3;
+
+ /* for each group, compute the minimum exponent */
+ exp1[0] = exp[0]; /* DC exponent is handled separately */
+ k = 1;
+ for(i=1;i<=nb_groups;i++) {
+ exp_min = exp[k];
+ assert(exp_min >= 0 && exp_min <= 24);
+ for(j=1;j<group_size;j++) {
+ if (exp[k+j] < exp_min)
+ exp_min = exp[k+j];
+ }
+ exp1[i] = exp_min;
+ k += group_size;
+ }
+
+ /* constraint for DC exponent */
+ if (exp1[0] > 15)
+ exp1[0] = 15;
+
+ /* Decrease the delta between each groups to within 2
+ * so that they can be differentially encoded */
+ for (i=1;i<=nb_groups;i++)
+ exp1[i] = FFMIN(exp1[i], exp1[i-1] + 2);
+ for (i=nb_groups-1;i>=0;i--)
+ exp1[i] = FFMIN(exp1[i], exp1[i+1] + 2);
+
+ /* now we have the exponent values the decoder will see */
+ encoded_exp[0] = exp1[0];
+ k = 1;
+ for(i=1;i<=nb_groups;i++) {
+ for(j=0;j<group_size;j++) {
+ encoded_exp[k+j] = exp1[i];
+ }
+ k += group_size;
+ }
+
+#if defined(DEBUG)
+ av_log(NULL, AV_LOG_DEBUG, "exponents: strategy=%d\n", exp_strategy);
+ for(i=0;i<=nb_groups * group_size;i++) {
+ av_log(NULL, AV_LOG_DEBUG, "%d ", encoded_exp[i]);
+ }
+ av_log(NULL, AV_LOG_DEBUG, "\n");
+#endif
+
+ return 4 + (nb_groups / 3) * 7;
+}
+
+/* return the size in bits taken by the mantissa */
+static int compute_mantissa_size(AC3EncodeContext *s, uint8_t *m, int nb_coefs)
+{
+ int bits, mant, i;
+
+ bits = 0;
+ for(i=0;i<nb_coefs;i++) {
+ mant = m[i];
+ switch(mant) {
+ case 0:
+ /* nothing */
+ break;
+ case 1:
+ /* 3 mantissa in 5 bits */
+ if (s->mant1_cnt == 0)
+ bits += 5;
+ if (++s->mant1_cnt == 3)
+ s->mant1_cnt = 0;
+ break;
+ case 2:
+ /* 3 mantissa in 7 bits */
+ if (s->mant2_cnt == 0)
+ bits += 7;
+ if (++s->mant2_cnt == 3)
+ s->mant2_cnt = 0;
+ break;
+ case 3:
+ bits += 3;
+ break;
+ case 4:
+ /* 2 mantissa in 7 bits */
+ if (s->mant4_cnt == 0)
+ bits += 7;
+ if (++s->mant4_cnt == 2)
+ s->mant4_cnt = 0;
+ break;
+ case 14:
+ bits += 14;
+ break;
+ case 15:
+ bits += 16;
+ break;
+ default:
+ bits += mant - 1;
+ break;
+ }
+ }
+ return bits;
+}
+
+
+static int bit_alloc(AC3EncodeContext *s,
+ uint8_t bap[NB_BLOCKS][AC3_MAX_CHANNELS][N/2],
+ uint8_t encoded_exp[NB_BLOCKS][AC3_MAX_CHANNELS][N/2],
+ uint8_t exp_strategy[NB_BLOCKS][AC3_MAX_CHANNELS],
+ int frame_bits, int csnroffst, int fsnroffst)
+{
+ int i, ch;
+
+ /* compute size */
+ for(i=0;i<NB_BLOCKS;i++) {
+ s->mant1_cnt = 0;
+ s->mant2_cnt = 0;
+ s->mant4_cnt = 0;
+ for(ch=0;ch<s->nb_all_channels;ch++) {
+ ac3_parametric_bit_allocation(&s->bit_alloc,
+ bap[i][ch], (int8_t *)encoded_exp[i][ch],
+ 0, s->nb_coefs[ch],
+ (((csnroffst-15) << 4) +
+ fsnroffst) << 2,
+ fgaintab[s->fgaincod[ch]],
+ ch == s->lfe_channel,
+ 2, 0, NULL, NULL, NULL);
+ frame_bits += compute_mantissa_size(s, bap[i][ch],
+ s->nb_coefs[ch]);
+ }
+ }
+#if 0
+ printf("csnr=%d fsnr=%d frame_bits=%d diff=%d\n",
+ csnroffst, fsnroffst, frame_bits,
+ 16 * s->frame_size - ((frame_bits + 7) & ~7));
+#endif
+ return 16 * s->frame_size - frame_bits;
+}
+
+#define SNR_INC1 4
+
+static int compute_bit_allocation(AC3EncodeContext *s,
+ uint8_t bap[NB_BLOCKS][AC3_MAX_CHANNELS][N/2],
+ uint8_t encoded_exp[NB_BLOCKS][AC3_MAX_CHANNELS][N/2],
+ uint8_t exp_strategy[NB_BLOCKS][AC3_MAX_CHANNELS],
+ int frame_bits)
+{
+ int i, ch;
+ int csnroffst, fsnroffst;
+ uint8_t bap1[NB_BLOCKS][AC3_MAX_CHANNELS][N/2];
+ static int frame_bits_inc[8] = { 0, 0, 2, 2, 2, 4, 2, 4 };
+
+ /* init default parameters */
+ s->sdecaycod = 2;
+ s->fdecaycod = 1;
+ s->sgaincod = 1;
+ s->dbkneecod = 2;
+ s->floorcod = 4;
+ for(ch=0;ch<s->nb_all_channels;ch++)
+ s->fgaincod[ch] = 4;
+
+ /* compute real values */
+ s->bit_alloc.fscod = s->fscod;
+ s->bit_alloc.halfratecod = s->halfratecod;
+ s->bit_alloc.sdecay = sdecaytab[s->sdecaycod] >> s->halfratecod;
+ s->bit_alloc.fdecay = fdecaytab[s->fdecaycod] >> s->halfratecod;
+ s->bit_alloc.sgain = sgaintab[s->sgaincod];
+ s->bit_alloc.dbknee = dbkneetab[s->dbkneecod];
+ s->bit_alloc.floor = floortab[s->floorcod];
+
+ /* header size */
+ frame_bits += 65;
+ // if (s->acmod == 2)
+ // frame_bits += 2;
+ frame_bits += frame_bits_inc[s->acmod];
+
+ /* audio blocks */
+ for(i=0;i<NB_BLOCKS;i++) {
+ frame_bits += s->nb_channels * 2 + 2; /* blksw * c, dithflag * c, dynrnge, cplstre */
+ if (s->acmod == 2) {
+ frame_bits++; /* rematstr */
+ if(i==0) frame_bits += 4;
+ }
+ frame_bits += 2 * s->nb_channels; /* chexpstr[2] * c */
+ if (s->lfe)
+ frame_bits++; /* lfeexpstr */
+ for(ch=0;ch<s->nb_channels;ch++) {
+ if (exp_strategy[i][ch] != EXP_REUSE)
+ frame_bits += 6 + 2; /* chbwcod[6], gainrng[2] */
+ }
+ frame_bits++; /* baie */
+ frame_bits++; /* snr */
+ frame_bits += 2; /* delta / skip */
+ }
+ frame_bits++; /* cplinu for block 0 */
+ /* bit alloc info */
+ /* sdcycod[2], fdcycod[2], sgaincod[2], dbpbcod[2], floorcod[3] */
+ /* csnroffset[6] */
+ /* (fsnoffset[4] + fgaincod[4]) * c */
+ frame_bits += 2*4 + 3 + 6 + s->nb_all_channels * (4 + 3);
+
+ /* auxdatae, crcrsv */
+ frame_bits += 2;
+
+ /* CRC */
+ frame_bits += 16;
+
+ /* now the big work begins : do the bit allocation. Modify the snr
+ offset until we can pack everything in the requested frame size */
+
+ csnroffst = s->csnroffst;
+ while (csnroffst >= 0 &&
+ bit_alloc(s, bap, encoded_exp, exp_strategy, frame_bits, csnroffst, 0) < 0)
+ csnroffst -= SNR_INC1;
+ if (csnroffst < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Bit allocation failed, try increasing the bitrate, -ab 384 for example!\n");
+ return -1;
+ }
+ while ((csnroffst + SNR_INC1) <= 63 &&
+ bit_alloc(s, bap1, encoded_exp, exp_strategy, frame_bits,
+ csnroffst + SNR_INC1, 0) >= 0) {
+ csnroffst += SNR_INC1;
+ memcpy(bap, bap1, sizeof(bap1));
+ }
+ while ((csnroffst + 1) <= 63 &&
+ bit_alloc(s, bap1, encoded_exp, exp_strategy, frame_bits, csnroffst + 1, 0) >= 0) {
+ csnroffst++;
+ memcpy(bap, bap1, sizeof(bap1));
+ }
+
+ fsnroffst = 0;
+ while ((fsnroffst + SNR_INC1) <= 15 &&
+ bit_alloc(s, bap1, encoded_exp, exp_strategy, frame_bits,
+ csnroffst, fsnroffst + SNR_INC1) >= 0) {
+ fsnroffst += SNR_INC1;
+ memcpy(bap, bap1, sizeof(bap1));
+ }
+ while ((fsnroffst + 1) <= 15 &&
+ bit_alloc(s, bap1, encoded_exp, exp_strategy, frame_bits,
+ csnroffst, fsnroffst + 1) >= 0) {
+ fsnroffst++;
+ memcpy(bap, bap1, sizeof(bap1));
+ }
+
+ s->csnroffst = csnroffst;
+ for(ch=0;ch<s->nb_all_channels;ch++)
+ s->fsnroffst[ch] = fsnroffst;
+#if defined(DEBUG_BITALLOC)
+ {
+ int j;
+
+ for(i=0;i<6;i++) {
+ for(ch=0;ch<s->nb_all_channels;ch++) {
+ printf("Block #%d Ch%d:\n", i, ch);
+ printf("bap=");
+ for(j=0;j<s->nb_coefs[ch];j++) {
+ printf("%d ",bap[i][ch][j]);
+ }
+ printf("\n");
+ }
+ }
+ }
+#endif
+ return 0;
+}
+
+void ac3_common_init(void)
+{
+ int i, j, k, l, v;
+ /* compute bndtab and masktab from bandsz */
+ k = 0;
+ l = 0;
+ for(i=0;i<50;i++) {
+ bndtab[i] = l;
+ v = bndsz[i];
+ for(j=0;j<v;j++) masktab[k++]=i;
+ l += v;
+ }
+ bndtab[50] = l;
+}
+
+
+static int AC3_encode_init(AVCodecContext *avctx)
+{
+ int freq = avctx->sample_rate;
+ int bitrate = avctx->bit_rate;
+ int channels = avctx->channels;
+ AC3EncodeContext *s = avctx->priv_data;
+ int i, j, ch;
+ float alpha;
+ static const uint8_t acmod_defs[6] = {
+ 0x01, /* C */
+ 0x02, /* L R */
+ 0x03, /* L C R */
+ 0x06, /* L R SL SR */
+ 0x07, /* L C R SL SR */
+ 0x07, /* L C R SL SR (+LFE) */
+ };
+
+ avctx->frame_size = AC3_FRAME_SIZE;
+
+ /* number of channels */
+ if (channels < 1 || channels > 6)
+ return -1;
+ s->acmod = acmod_defs[channels - 1];
+ s->lfe = (channels == 6) ? 1 : 0;
+ s->nb_all_channels = channels;
+ s->nb_channels = channels > 5 ? 5 : channels;
+ s->lfe_channel = s->lfe ? 5 : -1;
+
+ /* frequency */
+ for(i=0;i<3;i++) {
+ for(j=0;j<3;j++)
+ if ((ac3_freqs[j] >> i) == freq)
+ goto found;
+ }
+ return -1;
+ found:
+ s->sample_rate = freq;
+ s->halfratecod = i;
+ s->fscod = j;
+ s->bsid = 8 + s->halfratecod;
+ s->bsmod = 0; /* complete main audio service */
+
+ /* bitrate & frame size */
+ bitrate /= 1000;
+ for(i=0;i<19;i++) {
+ if ((ac3_bitratetab[i] >> s->halfratecod) == bitrate)
+ break;
+ }
+ if (i == 19)
+ return -1;
+ s->bit_rate = bitrate;
+ s->frmsizecod = i << 1;
+ s->frame_size_min = (bitrate * 1000 * AC3_FRAME_SIZE) / (freq * 16);
+ s->bits_written = 0;
+ s->samples_written = 0;
+ s->frame_size = s->frame_size_min;
+
+ /* bit allocation init */
+ for(ch=0;ch<s->nb_channels;ch++) {
+ /* bandwidth for each channel */
+ /* XXX: should compute the bandwidth according to the frame
+ size, so that we avoid anoying high freq artefacts */
+ s->chbwcod[ch] = 50; /* sample bandwidth as mpeg audio layer 2 table 0 */
+ s->nb_coefs[ch] = ((s->chbwcod[ch] + 12) * 3) + 37;
+ }
+ if (s->lfe) {
+ s->nb_coefs[s->lfe_channel] = 7; /* fixed */
+ }
+ /* initial snr offset */
+ s->csnroffst = 40;
+
+ ac3_common_init();
+
+ /* mdct init */
+ fft_init(MDCT_NBITS - 2);
+ for(i=0;i<N/4;i++) {
+ alpha = 2 * M_PI * (i + 1.0 / 8.0) / (float)N;
+ xcos1[i] = fix15(-cos(alpha));
+ xsin1[i] = fix15(-sin(alpha));
+ }
+
+ avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame->key_frame= 1;
+
+ return 0;
+}
+
+/* output the AC3 frame header */
+static void output_frame_header(AC3EncodeContext *s, unsigned char *frame)
+{
+ init_put_bits(&s->pb, frame, AC3_MAX_CODED_FRAME_SIZE);
+
+ put_bits(&s->pb, 16, 0x0b77); /* frame header */
+ put_bits(&s->pb, 16, 0); /* crc1: will be filled later */
+ put_bits(&s->pb, 2, s->fscod);
+ put_bits(&s->pb, 6, s->frmsizecod + (s->frame_size - s->frame_size_min));
+ put_bits(&s->pb, 5, s->bsid);
+ put_bits(&s->pb, 3, s->bsmod);
+ put_bits(&s->pb, 3, s->acmod);
+ if ((s->acmod & 0x01) && s->acmod != 0x01)
+ put_bits(&s->pb, 2, 1); /* XXX -4.5 dB */
+ if (s->acmod & 0x04)
+ put_bits(&s->pb, 2, 1); /* XXX -6 dB */
+ if (s->acmod == 0x02)
+ put_bits(&s->pb, 2, 0); /* surround not indicated */
+ put_bits(&s->pb, 1, s->lfe); /* LFE */
+ put_bits(&s->pb, 5, 31); /* dialog norm: -31 db */
+ put_bits(&s->pb, 1, 0); /* no compression control word */
+ put_bits(&s->pb, 1, 0); /* no lang code */
+ put_bits(&s->pb, 1, 0); /* no audio production info */
+ put_bits(&s->pb, 1, 0); /* no copyright */
+ put_bits(&s->pb, 1, 1); /* original bitstream */
+ put_bits(&s->pb, 1, 0); /* no time code 1 */
+ put_bits(&s->pb, 1, 0); /* no time code 2 */
+ put_bits(&s->pb, 1, 0); /* no addtional bit stream info */
+}
+
+/* symetric quantization on 'levels' levels */
+static inline int sym_quant(int c, int e, int levels)
+{
+ int v;
+
+ if (c >= 0) {
+ v = (levels * (c << e)) >> 24;
+ v = (v + 1) >> 1;
+ v = (levels >> 1) + v;
+ } else {
+ v = (levels * ((-c) << e)) >> 24;
+ v = (v + 1) >> 1;
+ v = (levels >> 1) - v;
+ }
+ assert (v >= 0 && v < levels);
+ return v;
+}
+
+/* asymetric quantization on 2^qbits levels */
+static inline int asym_quant(int c, int e, int qbits)
+{
+ int lshift, m, v;
+
+ lshift = e + qbits - 24;
+ if (lshift >= 0)
+ v = c << lshift;
+ else
+ v = c >> (-lshift);
+ /* rounding */
+ v = (v + 1) >> 1;
+ m = (1 << (qbits-1));
+ if (v >= m)
+ v = m - 1;
+ assert(v >= -m);
+ return v & ((1 << qbits)-1);
+}
+
+/* Output one audio block. There are NB_BLOCKS audio blocks in one AC3
+ frame */
+static void output_audio_block(AC3EncodeContext *s,
+ uint8_t exp_strategy[AC3_MAX_CHANNELS],
+ uint8_t encoded_exp[AC3_MAX_CHANNELS][N/2],
+ uint8_t bap[AC3_MAX_CHANNELS][N/2],
+ int32_t mdct_coefs[AC3_MAX_CHANNELS][N/2],
+ int8_t global_exp[AC3_MAX_CHANNELS],
+ int block_num)
+{
+ int ch, nb_groups, group_size, i, baie, rbnd;
+ uint8_t *p;
+ uint16_t qmant[AC3_MAX_CHANNELS][N/2];
+ int exp0, exp1;
+ int mant1_cnt, mant2_cnt, mant4_cnt;
+ uint16_t *qmant1_ptr, *qmant2_ptr, *qmant4_ptr;
+ int delta0, delta1, delta2;
+
+ for(ch=0;ch<s->nb_channels;ch++)
+ put_bits(&s->pb, 1, 0); /* 512 point MDCT */
+ for(ch=0;ch<s->nb_channels;ch++)
+ put_bits(&s->pb, 1, 1); /* no dither */
+ put_bits(&s->pb, 1, 0); /* no dynamic range */
+ if (block_num == 0) {
+ /* for block 0, even if no coupling, we must say it. This is a
+ waste of bit :-) */
+ put_bits(&s->pb, 1, 1); /* coupling strategy present */
+ put_bits(&s->pb, 1, 0); /* no coupling strategy */
+ } else {
+ put_bits(&s->pb, 1, 0); /* no new coupling strategy */
+ }
+
+ if (s->acmod == 2)
+ {
+ if(block_num==0)
+ {
+ /* first block must define rematrixing (rematstr) */
+ put_bits(&s->pb, 1, 1);
+
+ /* dummy rematrixing rematflg(1:4)=0 */
+ for (rbnd=0;rbnd<4;rbnd++)
+ put_bits(&s->pb, 1, 0);
+ }
+ else
+ {
+ /* no matrixing (but should be used in the future) */
+ put_bits(&s->pb, 1, 0);
+ }
+ }
+
+#if defined(DEBUG)
+ {
+ static int count = 0;
+ av_log(NULL, AV_LOG_DEBUG, "Block #%d (%d)\n", block_num, count++);
+ }
+#endif
+ /* exponent strategy */
+ for(ch=0;ch<s->nb_channels;ch++) {
+ put_bits(&s->pb, 2, exp_strategy[ch]);
+ }
+
+ if (s->lfe) {
+ put_bits(&s->pb, 1, exp_strategy[s->lfe_channel]);
+ }
+
+ for(ch=0;ch<s->nb_channels;ch++) {
+ if (exp_strategy[ch] != EXP_REUSE)
+ put_bits(&s->pb, 6, s->chbwcod[ch]);
+ }
+
+ /* exponents */
+ for (ch = 0; ch < s->nb_all_channels; ch++) {
+ switch(exp_strategy[ch]) {
+ case EXP_REUSE:
+ continue;
+ case EXP_D15:
+ group_size = 1;
+ break;
+ case EXP_D25:
+ group_size = 2;
+ break;
+ default:
+ case EXP_D45:
+ group_size = 4;
+ break;
+ }
+ nb_groups = (s->nb_coefs[ch] + (group_size * 3) - 4) / (3 * group_size);
+ p = encoded_exp[ch];
+
+ /* first exponent */
+ exp1 = *p++;
+ put_bits(&s->pb, 4, exp1);
+
+ /* next ones are delta encoded */
+ for(i=0;i<nb_groups;i++) {
+ /* merge three delta in one code */
+ exp0 = exp1;
+ exp1 = p[0];
+ p += group_size;
+ delta0 = exp1 - exp0 + 2;
+
+ exp0 = exp1;
+ exp1 = p[0];
+ p += group_size;
+ delta1 = exp1 - exp0 + 2;
+
+ exp0 = exp1;
+ exp1 = p[0];
+ p += group_size;
+ delta2 = exp1 - exp0 + 2;
+
+ put_bits(&s->pb, 7, ((delta0 * 5 + delta1) * 5) + delta2);
+ }
+
+ if (ch != s->lfe_channel)
+ put_bits(&s->pb, 2, 0); /* no gain range info */
+ }
+
+ /* bit allocation info */
+ baie = (block_num == 0);
+ put_bits(&s->pb, 1, baie);
+ if (baie) {
+ put_bits(&s->pb, 2, s->sdecaycod);
+ put_bits(&s->pb, 2, s->fdecaycod);
+ put_bits(&s->pb, 2, s->sgaincod);
+ put_bits(&s->pb, 2, s->dbkneecod);
+ put_bits(&s->pb, 3, s->floorcod);
+ }
+
+ /* snr offset */
+ put_bits(&s->pb, 1, baie); /* always present with bai */
+ if (baie) {
+ put_bits(&s->pb, 6, s->csnroffst);
+ for(ch=0;ch<s->nb_all_channels;ch++) {
+ put_bits(&s->pb, 4, s->fsnroffst[ch]);
+ put_bits(&s->pb, 3, s->fgaincod[ch]);
+ }
+ }
+
+ put_bits(&s->pb, 1, 0); /* no delta bit allocation */
+ put_bits(&s->pb, 1, 0); /* no data to skip */
+
+ /* mantissa encoding : we use two passes to handle the grouping. A
+ one pass method may be faster, but it would necessitate to
+ modify the output stream. */
+
+ /* first pass: quantize */
+ mant1_cnt = mant2_cnt = mant4_cnt = 0;
+ qmant1_ptr = qmant2_ptr = qmant4_ptr = NULL;
+
+ for (ch = 0; ch < s->nb_all_channels; ch++) {
+ int b, c, e, v;
+
+ for(i=0;i<s->nb_coefs[ch];i++) {
+ c = mdct_coefs[ch][i];
+ e = encoded_exp[ch][i] - global_exp[ch];
+ b = bap[ch][i];
+ switch(b) {
+ case 0:
+ v = 0;
+ break;
+ case 1:
+ v = sym_quant(c, e, 3);
+ switch(mant1_cnt) {
+ case 0:
+ qmant1_ptr = &qmant[ch][i];
+ v = 9 * v;
+ mant1_cnt = 1;
+ break;
+ case 1:
+ *qmant1_ptr += 3 * v;
+ mant1_cnt = 2;
+ v = 128;
+ break;
+ default:
+ *qmant1_ptr += v;
+ mant1_cnt = 0;
+ v = 128;
+ break;
+ }
+ break;
+ case 2:
+ v = sym_quant(c, e, 5);
+ switch(mant2_cnt) {
+ case 0:
+ qmant2_ptr = &qmant[ch][i];
+ v = 25 * v;
+ mant2_cnt = 1;
+ break;
+ case 1:
+ *qmant2_ptr += 5 * v;
+ mant2_cnt = 2;
+ v = 128;
+ break;
+ default:
+ *qmant2_ptr += v;
+ mant2_cnt = 0;
+ v = 128;
+ break;
+ }
+ break;
+ case 3:
+ v = sym_quant(c, e, 7);
+ break;
+ case 4:
+ v = sym_quant(c, e, 11);
+ switch(mant4_cnt) {
+ case 0:
+ qmant4_ptr = &qmant[ch][i];
+ v = 11 * v;
+ mant4_cnt = 1;
+ break;
+ default:
+ *qmant4_ptr += v;
+ mant4_cnt = 0;
+ v = 128;
+ break;
+ }
+ break;
+ case 5:
+ v = sym_quant(c, e, 15);
+ break;
+ case 14:
+ v = asym_quant(c, e, 14);
+ break;
+ case 15:
+ v = asym_quant(c, e, 16);
+ break;
+ default:
+ v = asym_quant(c, e, b - 1);
+ break;
+ }
+ qmant[ch][i] = v;
+ }
+ }
+
+ /* second pass : output the values */
+ for (ch = 0; ch < s->nb_all_channels; ch++) {
+ int b, q;
+
+ for(i=0;i<s->nb_coefs[ch];i++) {
+ q = qmant[ch][i];
+ b = bap[ch][i];
+ switch(b) {
+ case 0:
+ break;
+ case 1:
+ if (q != 128)
+ put_bits(&s->pb, 5, q);
+ break;
+ case 2:
+ if (q != 128)
+ put_bits(&s->pb, 7, q);
+ break;
+ case 3:
+ put_bits(&s->pb, 3, q);
+ break;
+ case 4:
+ if (q != 128)
+ put_bits(&s->pb, 7, q);
+ break;
+ case 14:
+ put_bits(&s->pb, 14, q);
+ break;
+ case 15:
+ put_bits(&s->pb, 16, q);
+ break;
+ default:
+ put_bits(&s->pb, b - 1, q);
+ break;
+ }
+ }
+ }
+}
+
+#define CRC16_POLY ((1 << 0) | (1 << 2) | (1 << 15) | (1 << 16))
+
+static unsigned int mul_poly(unsigned int a, unsigned int b, unsigned int poly)
+{
+ unsigned int c;
+
+ c = 0;
+ while (a) {
+ if (a & 1)
+ c ^= b;
+ a = a >> 1;
+ b = b << 1;
+ if (b & (1 << 16))
+ b ^= poly;
+ }
+ return c;
+}
+
+static unsigned int pow_poly(unsigned int a, unsigned int n, unsigned int poly)
+{
+ unsigned int r;
+ r = 1;
+ while (n) {
+ if (n & 1)
+ r = mul_poly(r, a, poly);
+ a = mul_poly(a, a, poly);
+ n >>= 1;
+ }
+ return r;
+}
+
+
+/* compute log2(max(abs(tab[]))) */
+static int log2_tab(int16_t *tab, int n)
+{
+ int i, v;
+
+ v = 0;
+ for(i=0;i<n;i++) {
+ v |= abs(tab[i]);
+ }
+ return av_log2(v);
+}
+
+static void lshift_tab(int16_t *tab, int n, int lshift)
+{
+ int i;
+
+ if (lshift > 0) {
+ for(i=0;i<n;i++) {
+ tab[i] <<= lshift;
+ }
+ } else if (lshift < 0) {
+ lshift = -lshift;
+ for(i=0;i<n;i++) {
+ tab[i] >>= lshift;
+ }
+ }
+}
+
+/* fill the end of the frame and compute the two crcs */
+static int output_frame_end(AC3EncodeContext *s)
+{
+ int frame_size, frame_size_58, n, crc1, crc2, crc_inv;
+ uint8_t *frame;
+
+ frame_size = s->frame_size; /* frame size in words */
+ /* align to 8 bits */
+ flush_put_bits(&s->pb);
+ /* add zero bytes to reach the frame size */
+ frame = s->pb.buf;
+ n = 2 * s->frame_size - (pbBufPtr(&s->pb) - frame) - 2;
+ assert(n >= 0);
+ if(n>0)
+ memset(pbBufPtr(&s->pb), 0, n);
+
+ /* Now we must compute both crcs : this is not so easy for crc1
+ because it is at the beginning of the data... */
+ frame_size_58 = (frame_size >> 1) + (frame_size >> 3);
+ crc1 = bswap_16(av_crc(av_crc8005, 0, frame + 4, 2 * frame_size_58 - 4));
+ /* XXX: could precompute crc_inv */
+ crc_inv = pow_poly((CRC16_POLY >> 1), (16 * frame_size_58) - 16, CRC16_POLY);
+ crc1 = mul_poly(crc_inv, crc1, CRC16_POLY);
+ frame[2] = crc1 >> 8;
+ frame[3] = crc1;
+
+ crc2 = bswap_16(av_crc(av_crc8005, 0, frame + 2 * frame_size_58, (frame_size - frame_size_58) * 2 - 2));
+ frame[2*frame_size - 2] = crc2 >> 8;
+ frame[2*frame_size - 1] = crc2;
+
+ // printf("n=%d frame_size=%d\n", n, frame_size);
+ return frame_size * 2;
+}
+
+static int AC3_encode_frame(AVCodecContext *avctx,
+ unsigned char *frame, int buf_size, void *data)
+{
+ AC3EncodeContext *s = avctx->priv_data;
+ int16_t *samples = data;
+ int i, j, k, v, ch;
+ int16_t input_samples[N];
+ int32_t mdct_coef[NB_BLOCKS][AC3_MAX_CHANNELS][N/2];
+ uint8_t exp[NB_BLOCKS][AC3_MAX_CHANNELS][N/2];
+ uint8_t exp_strategy[NB_BLOCKS][AC3_MAX_CHANNELS];
+ uint8_t encoded_exp[NB_BLOCKS][AC3_MAX_CHANNELS][N/2];
+ uint8_t bap[NB_BLOCKS][AC3_MAX_CHANNELS][N/2];
+ int8_t exp_samples[NB_BLOCKS][AC3_MAX_CHANNELS];
+ int frame_bits;
+
+ frame_bits = 0;
+ for(ch=0;ch<s->nb_all_channels;ch++) {
+ /* fixed mdct to the six sub blocks & exponent computation */
+ for(i=0;i<NB_BLOCKS;i++) {
+ int16_t *sptr;
+ int sinc;
+
+ /* compute input samples */
+ memcpy(input_samples, s->last_samples[ch], N/2 * sizeof(int16_t));
+ sinc = s->nb_all_channels;
+ sptr = samples + (sinc * (N/2) * i) + ch;
+ for(j=0;j<N/2;j++) {
+ v = *sptr;
+ input_samples[j + N/2] = v;
+ s->last_samples[ch][j] = v;
+ sptr += sinc;
+ }
+
+ /* apply the MDCT window */
+ for(j=0;j<N/2;j++) {
+ input_samples[j] = MUL16(input_samples[j],
+ ac3_window[j]) >> 15;
+ input_samples[N-j-1] = MUL16(input_samples[N-j-1],
+ ac3_window[j]) >> 15;
+ }
+
+ /* Normalize the samples to use the maximum available
+ precision */
+ v = 14 - log2_tab(input_samples, N);
+ if (v < 0)
+ v = 0;
+ exp_samples[i][ch] = v - 10;
+ lshift_tab(input_samples, N, v);
+
+ /* do the MDCT */
+ mdct512(mdct_coef[i][ch], input_samples);
+
+ /* compute "exponents". We take into account the
+ normalization there */
+ for(j=0;j<N/2;j++) {
+ int e;
+ v = abs(mdct_coef[i][ch][j]);
+ if (v == 0)
+ e = 24;
+ else {
+ e = 23 - av_log2(v) + exp_samples[i][ch];
+ if (e >= 24) {
+ e = 24;
+ mdct_coef[i][ch][j] = 0;
+ }
+ }
+ exp[i][ch][j] = e;
+ }
+ }
+
+ compute_exp_strategy(exp_strategy, exp, ch, ch == s->lfe_channel);
+
+ /* compute the exponents as the decoder will see them. The
+ EXP_REUSE case must be handled carefully : we select the
+ min of the exponents */
+ i = 0;
+ while (i < NB_BLOCKS) {
+ j = i + 1;
+ while (j < NB_BLOCKS && exp_strategy[j][ch] == EXP_REUSE) {
+ exponent_min(exp[i][ch], exp[j][ch], s->nb_coefs[ch]);
+ j++;
+ }
+ frame_bits += encode_exp(encoded_exp[i][ch],
+ exp[i][ch], s->nb_coefs[ch],
+ exp_strategy[i][ch]);
+ /* copy encoded exponents for reuse case */
+ for(k=i+1;k<j;k++) {
+ memcpy(encoded_exp[k][ch], encoded_exp[i][ch],
+ s->nb_coefs[ch] * sizeof(uint8_t));
+ }
+ i = j;
+ }
+ }
+
+ /* adjust for fractional frame sizes */
+ while(s->bits_written >= s->bit_rate*1000 && s->samples_written >= s->sample_rate) {
+ s->bits_written -= s->bit_rate*1000;
+ s->samples_written -= s->sample_rate;
+ }
+ s->frame_size = s->frame_size_min + (s->bits_written * s->sample_rate < s->samples_written * s->bit_rate*1000);
+ s->bits_written += s->frame_size * 16;
+ s->samples_written += AC3_FRAME_SIZE;
+
+ compute_bit_allocation(s, bap, encoded_exp, exp_strategy, frame_bits);
+ /* everything is known... let's output the frame */
+ output_frame_header(s, frame);
+
+ for(i=0;i<NB_BLOCKS;i++) {
+ output_audio_block(s, exp_strategy[i], encoded_exp[i],
+ bap[i], mdct_coef[i], exp_samples[i], i);
+ }
+ return output_frame_end(s);
+}
+
+static int AC3_encode_close(AVCodecContext *avctx)
+{
+ av_freep(&avctx->coded_frame);
+ return 0;
+}
+
+#if 0
+/*************************************************************************/
+/* TEST */
+
+#define FN (N/4)
+
+void fft_test(void)
+{
+ IComplex in[FN], in1[FN];
+ int k, n, i;
+ float sum_re, sum_im, a;
+
+ /* FFT test */
+
+ for(i=0;i<FN;i++) {
+ in[i].re = random() % 65535 - 32767;
+ in[i].im = random() % 65535 - 32767;
+ in1[i] = in[i];
+ }
+ fft(in, 7);
+
+ /* do it by hand */
+ for(k=0;k<FN;k++) {
+ sum_re = 0;
+ sum_im = 0;
+ for(n=0;n<FN;n++) {
+ a = -2 * M_PI * (n * k) / FN;
+ sum_re += in1[n].re * cos(a) - in1[n].im * sin(a);
+ sum_im += in1[n].re * sin(a) + in1[n].im * cos(a);
+ }
+ printf("%3d: %6d,%6d %6.0f,%6.0f\n",
+ k, in[k].re, in[k].im, sum_re / FN, sum_im / FN);
+ }
+}
+
+void mdct_test(void)
+{
+ int16_t input[N];
+ int32_t output[N/2];
+ float input1[N];
+ float output1[N/2];
+ float s, a, err, e, emax;
+ int i, k, n;
+
+ for(i=0;i<N;i++) {
+ input[i] = (random() % 65535 - 32767) * 9 / 10;
+ input1[i] = input[i];
+ }
+
+ mdct512(output, input);
+
+ /* do it by hand */
+ for(k=0;k<N/2;k++) {
+ s = 0;
+ for(n=0;n<N;n++) {
+ a = (2*M_PI*(2*n+1+N/2)*(2*k+1) / (4 * N));
+ s += input1[n] * cos(a);
+ }
+ output1[k] = -2 * s / N;
+ }
+
+ err = 0;
+ emax = 0;
+ for(i=0;i<N/2;i++) {
+ printf("%3d: %7d %7.0f\n", i, output[i], output1[i]);
+ e = output[i] - output1[i];
+ if (e > emax)
+ emax = e;
+ err += e * e;
+ }
+ printf("err2=%f emax=%f\n", err / (N/2), emax);
+}
+
+void test_ac3(void)
+{
+ AC3EncodeContext ctx;
+ unsigned char frame[AC3_MAX_CODED_FRAME_SIZE];
+ short samples[AC3_FRAME_SIZE];
+ int ret, i;
+
+ AC3_encode_init(&ctx, 44100, 64000, 1);
+
+ fft_test();
+ mdct_test();
+
+ for(i=0;i<AC3_FRAME_SIZE;i++)
+ samples[i] = (int)(sin(2*M_PI*i*1000.0/44100) * 10000);
+ ret = AC3_encode_frame(&ctx, frame, samples);
+ printf("ret=%d\n", ret);
+}
+#endif
+
+AVCodec ac3_encoder = {
+ "ac3",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_AC3,
+ sizeof(AC3EncodeContext),
+ AC3_encode_init,
+ AC3_encode_frame,
+ AC3_encode_close,
+ NULL,
+};
diff --git a/contrib/ffmpeg/libavcodec/ac3tab.h b/contrib/ffmpeg/libavcodec/ac3tab.h
new file mode 100644
index 000000000..9f90ae95c
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ac3tab.h
@@ -0,0 +1,205 @@
+/*
+ * AC3 tables
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file ac3tab.h
+ * tables taken directly from AC3 spec.
+ */
+
+/* possible frequencies */
+static const uint16_t ac3_freqs[3] = { 48000, 44100, 32000 };
+
+/* possible bitrates */
+static const uint16_t ac3_bitratetab[19] = {
+ 32, 40, 48, 56, 64, 80, 96, 112, 128,
+ 160, 192, 224, 256, 320, 384, 448, 512, 576, 640
+};
+
+/* AC3 MDCT window */
+
+/* MDCT window */
+static const int16_t ac3_window[256] = {
+ 4, 7, 12, 16, 21, 28, 34, 42,
+ 51, 61, 72, 84, 97, 111, 127, 145,
+ 164, 184, 207, 231, 257, 285, 315, 347,
+ 382, 419, 458, 500, 544, 591, 641, 694,
+ 750, 810, 872, 937, 1007, 1079, 1155, 1235,
+ 1318, 1406, 1497, 1593, 1692, 1796, 1903, 2016,
+ 2132, 2253, 2379, 2509, 2644, 2783, 2927, 3076,
+ 3230, 3389, 3552, 3721, 3894, 4072, 4255, 4444,
+ 4637, 4835, 5038, 5246, 5459, 5677, 5899, 6127,
+ 6359, 6596, 6837, 7083, 7334, 7589, 7848, 8112,
+ 8380, 8652, 8927, 9207, 9491, 9778,10069,10363,
+10660,10960,11264,11570,11879,12190,12504,12820,
+13138,13458,13780,14103,14427,14753,15079,15407,
+15735,16063,16392,16720,17049,17377,17705,18032,
+18358,18683,19007,19330,19651,19970,20287,20602,
+20914,21225,21532,21837,22139,22438,22733,23025,
+23314,23599,23880,24157,24430,24699,24964,25225,
+25481,25732,25979,26221,26459,26691,26919,27142,
+27359,27572,27780,27983,28180,28373,28560,28742,
+28919,29091,29258,29420,29577,29729,29876,30018,
+30155,30288,30415,30538,30657,30771,30880,30985,
+31086,31182,31274,31363,31447,31528,31605,31678,
+31747,31814,31877,31936,31993,32046,32097,32145,
+32190,32232,32272,32310,32345,32378,32409,32438,
+32465,32490,32513,32535,32556,32574,32592,32608,
+32623,32636,32649,32661,32671,32681,32690,32698,
+32705,32712,32718,32724,32729,32733,32737,32741,
+32744,32747,32750,32752,32754,32756,32757,32759,
+32760,32761,32762,32763,32764,32764,32765,32765,
+32766,32766,32766,32766,32767,32767,32767,32767,
+32767,32767,32767,32767,32767,32767,32767,32767,
+32767,32767,32767,32767,32767,32767,32767,32767,
+};
+
+static uint8_t masktab[253];
+
+static const uint8_t latab[260]= {
+0x0040,0x003f,0x003e,0x003d,0x003c,0x003b,0x003a,0x0039,0x0038,0x0037,
+0x0036,0x0035,0x0034,0x0034,0x0033,0x0032,0x0031,0x0030,0x002f,0x002f,
+0x002e,0x002d,0x002c,0x002c,0x002b,0x002a,0x0029,0x0029,0x0028,0x0027,
+0x0026,0x0026,0x0025,0x0024,0x0024,0x0023,0x0023,0x0022,0x0021,0x0021,
+0x0020,0x0020,0x001f,0x001e,0x001e,0x001d,0x001d,0x001c,0x001c,0x001b,
+0x001b,0x001a,0x001a,0x0019,0x0019,0x0018,0x0018,0x0017,0x0017,0x0016,
+0x0016,0x0015,0x0015,0x0015,0x0014,0x0014,0x0013,0x0013,0x0013,0x0012,
+0x0012,0x0012,0x0011,0x0011,0x0011,0x0010,0x0010,0x0010,0x000f,0x000f,
+0x000f,0x000e,0x000e,0x000e,0x000d,0x000d,0x000d,0x000d,0x000c,0x000c,
+0x000c,0x000c,0x000b,0x000b,0x000b,0x000b,0x000a,0x000a,0x000a,0x000a,
+0x000a,0x0009,0x0009,0x0009,0x0009,0x0009,0x0008,0x0008,0x0008,0x0008,
+0x0008,0x0008,0x0007,0x0007,0x0007,0x0007,0x0007,0x0007,0x0006,0x0006,
+0x0006,0x0006,0x0006,0x0006,0x0006,0x0006,0x0005,0x0005,0x0005,0x0005,
+0x0005,0x0005,0x0005,0x0005,0x0004,0x0004,0x0004,0x0004,0x0004,0x0004,
+0x0004,0x0004,0x0004,0x0004,0x0004,0x0003,0x0003,0x0003,0x0003,0x0003,
+0x0003,0x0003,0x0003,0x0003,0x0003,0x0003,0x0003,0x0003,0x0003,0x0002,
+0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,
+0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0002,0x0001,0x0001,
+0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,
+0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,
+0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,0x0001,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+};
+
+static const uint16_t hth[50][3]= {
+{ 0x04d0,0x04f0,0x0580 },
+{ 0x04d0,0x04f0,0x0580 },
+{ 0x0440,0x0460,0x04b0 },
+{ 0x0400,0x0410,0x0450 },
+{ 0x03e0,0x03e0,0x0420 },
+{ 0x03c0,0x03d0,0x03f0 },
+{ 0x03b0,0x03c0,0x03e0 },
+{ 0x03b0,0x03b0,0x03d0 },
+{ 0x03a0,0x03b0,0x03c0 },
+{ 0x03a0,0x03a0,0x03b0 },
+{ 0x03a0,0x03a0,0x03b0 },
+{ 0x03a0,0x03a0,0x03b0 },
+{ 0x03a0,0x03a0,0x03a0 },
+{ 0x0390,0x03a0,0x03a0 },
+{ 0x0390,0x0390,0x03a0 },
+{ 0x0390,0x0390,0x03a0 },
+{ 0x0380,0x0390,0x03a0 },
+{ 0x0380,0x0380,0x03a0 },
+{ 0x0370,0x0380,0x03a0 },
+{ 0x0370,0x0380,0x03a0 },
+{ 0x0360,0x0370,0x0390 },
+{ 0x0360,0x0370,0x0390 },
+{ 0x0350,0x0360,0x0390 },
+{ 0x0350,0x0360,0x0390 },
+{ 0x0340,0x0350,0x0380 },
+{ 0x0340,0x0350,0x0380 },
+{ 0x0330,0x0340,0x0380 },
+{ 0x0320,0x0340,0x0370 },
+{ 0x0310,0x0320,0x0360 },
+{ 0x0300,0x0310,0x0350 },
+{ 0x02f0,0x0300,0x0340 },
+{ 0x02f0,0x02f0,0x0330 },
+{ 0x02f0,0x02f0,0x0320 },
+{ 0x02f0,0x02f0,0x0310 },
+{ 0x0300,0x02f0,0x0300 },
+{ 0x0310,0x0300,0x02f0 },
+{ 0x0340,0x0320,0x02f0 },
+{ 0x0390,0x0350,0x02f0 },
+{ 0x03e0,0x0390,0x0300 },
+{ 0x0420,0x03e0,0x0310 },
+{ 0x0460,0x0420,0x0330 },
+{ 0x0490,0x0450,0x0350 },
+{ 0x04a0,0x04a0,0x03c0 },
+{ 0x0460,0x0490,0x0410 },
+{ 0x0440,0x0460,0x0470 },
+{ 0x0440,0x0440,0x04a0 },
+{ 0x0520,0x0480,0x0460 },
+{ 0x0800,0x0630,0x0440 },
+{ 0x0840,0x0840,0x0450 },
+{ 0x0840,0x0840,0x04e0 },
+};
+
+static const uint8_t baptab[64]= {
+ 0, 1, 1, 1, 1, 1, 2, 2, 3, 3,
+ 3, 4, 4, 5, 5, 6, 6, 6, 6, 7,
+ 7, 7, 7, 8, 8, 8, 8, 9, 9, 9,
+ 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
+ 12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
+ 14, 14, 14, 14, 14, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15,
+};
+
+static const uint8_t sdecaytab[4]={
+ 0x0f, 0x11, 0x13, 0x15,
+};
+
+static const uint8_t fdecaytab[4]={
+ 0x3f, 0x53, 0x67, 0x7b,
+};
+
+static const uint16_t sgaintab[4]= {
+ 0x540, 0x4d8, 0x478, 0x410,
+};
+
+static const uint16_t dbkneetab[4]= {
+ 0x000, 0x700, 0x900, 0xb00,
+};
+
+static const int16_t floortab[8]= {
+ 0x2f0, 0x2b0, 0x270, 0x230, 0x1f0, 0x170, 0x0f0, 0xf800,
+};
+
+static const uint16_t fgaintab[8]= {
+ 0x080, 0x100, 0x180, 0x200, 0x280, 0x300, 0x380, 0x400,
+};
+
+static const uint8_t bndsz[50]={
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3,
+ 3, 6, 6, 6, 6, 6, 6, 12, 12, 12, 12, 24, 24, 24, 24, 24
+};
+
+static uint8_t bndtab[51];
+
+/* fft & mdct sin cos tables */
+static int16_t costab[64];
+static int16_t sintab[64];
+static int16_t fft_rev[512];
+static int16_t xcos1[128];
+static int16_t xsin1[128];
diff --git a/src/libffmpeg/libavcodec/adpcm.c b/contrib/ffmpeg/libavcodec/adpcm.c
index 796cd267c..ec3fe6f6e 100644
--- a/src/libffmpeg/libavcodec/adpcm.c
+++ b/contrib/ffmpeg/libavcodec/adpcm.c
@@ -2,18 +2,20 @@
* ADPCM codecs
* Copyright (c) 2001-2003 The ffmpeg Project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
diff --git a/src/libffmpeg/libavcodec/adx.c b/contrib/ffmpeg/libavcodec/adx.c
index c8c785590..b449c9124 100644
--- a/src/libffmpeg/libavcodec/adx.c
+++ b/contrib/ffmpeg/libavcodec/adx.c
@@ -2,18 +2,20 @@
* ADX ADPCM codecs
* Copyright (c) 2001,2003 BERO
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
diff --git a/src/libffmpeg/libavcodec/alac.c b/contrib/ffmpeg/libavcodec/alac.c
index 5211e5057..793f71a11 100644
--- a/src/libffmpeg/libavcodec/alac.c
+++ b/contrib/ffmpeg/libavcodec/alac.c
@@ -3,18 +3,20 @@
* Copyright (c) 2005 David Hammerton
* All rights reserved.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -471,9 +473,9 @@ static int alac_decode_frame(AVCodecContext *avctx,
return input_buffer_size;
}
if (alac_set_info(alac)) {
- av_log(NULL, AV_LOG_ERROR, "alac: set_info failed\n");
- return input_buffer_size;
- }
+ av_log(avctx, AV_LOG_ERROR, "alac: set_info failed\n");
+ return input_buffer_size;
+ }
alac->context_initialized = 1;
}
diff --git a/contrib/ffmpeg/libavcodec/allcodecs.c b/contrib/ffmpeg/libavcodec/allcodecs.c
new file mode 100644
index 000000000..9678f6bee
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/allcodecs.c
@@ -0,0 +1,289 @@
+/*
+ * Utils for libavcodec
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file allcodecs.c
+ * Utils for libavcodec.
+ */
+
+#include "avcodec.h"
+
+#define REGISTER_ENCODER(X,x) \
+ if(ENABLE_##X##_ENCODER) register_avcodec(&x##_encoder)
+#define REGISTER_DECODER(X,x) \
+ if(ENABLE_##X##_DECODER) register_avcodec(&x##_decoder)
+#define REGISTER_ENCDEC(X,x) REGISTER_ENCODER(X,x); REGISTER_DECODER(X,x)
+
+#define REGISTER_PARSER(X,x) \
+ if(ENABLE_##X##_PARSER) av_register_codec_parser(&x##_parser)
+
+/* If you do not call this function, then you can select exactly which
+ formats you want to support */
+
+/**
+ * simple call to register all the codecs.
+ */
+void avcodec_register_all(void)
+{
+ static int inited = 0;
+
+ if (inited != 0)
+ return;
+ inited = 1;
+
+ /* video codecs */
+ REGISTER_DECODER(AASC, aasc);
+ REGISTER_ENCDEC (ASV1, asv1);
+ REGISTER_ENCDEC (ASV2, asv2);
+ REGISTER_DECODER(AVS, avs);
+ REGISTER_DECODER(BMP, bmp);
+ REGISTER_DECODER(CAVS, cavs);
+ REGISTER_DECODER(CINEPAK, cinepak);
+ REGISTER_DECODER(CLJR, cljr);
+ REGISTER_DECODER(CSCD, cscd);
+ REGISTER_DECODER(CYUV, cyuv);
+ REGISTER_DECODER(DSICINVIDEO, dsicinvideo);
+ REGISTER_ENCDEC (DVVIDEO, dvvideo);
+ REGISTER_DECODER(EIGHTBPS, eightbps);
+ REGISTER_ENCDEC (FFV1, ffv1);
+ REGISTER_ENCDEC (FFVHUFF, ffvhuff);
+ REGISTER_DECODER(FLASHSV, flashsv);
+ REGISTER_DECODER(FLIC, flic);
+ REGISTER_ENCDEC (FLV, flv);
+ REGISTER_DECODER(FOURXM, fourxm);
+ REGISTER_DECODER(FRAPS, fraps);
+ REGISTER_ENCDEC (GIF, gif);
+ REGISTER_ENCDEC (H261, h261);
+ REGISTER_ENCDEC (H263, h263);
+ REGISTER_DECODER(H263I, h263i);
+ REGISTER_ENCODER(H263P, h263p);
+ REGISTER_DECODER(H264, h264);
+ REGISTER_ENCDEC (HUFFYUV, huffyuv);
+ REGISTER_DECODER(IDCIN, idcin);
+ REGISTER_DECODER(INDEO2, indeo2);
+ REGISTER_DECODER(INDEO3, indeo3);
+ REGISTER_DECODER(INTERPLAY_VIDEO, interplay_video);
+ REGISTER_ENCODER(JPEGLS, jpegls);
+ REGISTER_DECODER(KMVC, kmvc);
+ REGISTER_ENCODER(LJPEG, ljpeg);
+ REGISTER_DECODER(LOCO, loco);
+ REGISTER_DECODER(MDEC, mdec);
+ REGISTER_ENCDEC (MJPEG, mjpeg);
+ REGISTER_DECODER(MJPEGB, mjpegb);
+ REGISTER_DECODER(MMVIDEO, mmvideo);
+#ifdef HAVE_XVMC
+ REGISTER_DECODER(MPEG_XVMC, mpeg_xvmc);
+#endif
+ REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video);
+ REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video);
+ REGISTER_ENCDEC (MPEG4, mpeg4);
+ REGISTER_DECODER(MPEGVIDEO, mpegvideo);
+ REGISTER_ENCDEC (MSMPEG4V1, msmpeg4v1);
+ REGISTER_ENCDEC (MSMPEG4V2, msmpeg4v2);
+ REGISTER_ENCDEC (MSMPEG4V3, msmpeg4v3);
+ REGISTER_DECODER(MSRLE, msrle);
+ REGISTER_DECODER(MSVIDEO1, msvideo1);
+ REGISTER_DECODER(MSZH, mszh);
+ REGISTER_DECODER(NUV, nuv);
+ REGISTER_ENCODER(PAM, pam);
+ REGISTER_ENCODER(PBM, pbm);
+ REGISTER_ENCODER(PGM, pgm);
+ REGISTER_ENCODER(PGMYUV, pgmyuv);
+#ifdef CONFIG_ZLIB
+ REGISTER_ENCDEC (PNG, png);
+#endif
+ REGISTER_ENCODER(PPM, ppm);
+ REGISTER_DECODER(QDRAW, qdraw);
+ REGISTER_DECODER(QPEG, qpeg);
+ REGISTER_DECODER(QTRLE, qtrle);
+ REGISTER_ENCDEC (RAWVIDEO, rawvideo);
+ REGISTER_DECODER(ROQ, roq);
+ REGISTER_DECODER(RPZA, rpza);
+ REGISTER_ENCDEC (RV10, rv10);
+ REGISTER_ENCDEC (RV20, rv20);
+ REGISTER_DECODER(SMACKER, smacker);
+ REGISTER_DECODER(SMC, smc);
+ REGISTER_ENCDEC (SNOW, snow);
+ REGISTER_DECODER(SP5X, sp5x);
+ REGISTER_ENCDEC (SVQ1, svq1);
+ REGISTER_DECODER(SVQ3, svq3);
+ REGISTER_DECODER(TARGA, targa);
+ REGISTER_DECODER(THEORA, theora);
+ REGISTER_DECODER(TIERTEXSEQVIDEO, tiertexseqvideo);
+ REGISTER_DECODER(TIFF, tiff);
+ REGISTER_DECODER(TRUEMOTION1, truemotion1);
+ REGISTER_DECODER(TRUEMOTION2, truemotion2);
+ REGISTER_DECODER(TSCC, tscc);
+ REGISTER_DECODER(ULTI, ulti);
+ REGISTER_DECODER(VC1, vc1);
+ REGISTER_DECODER(VCR1, vcr1);
+ REGISTER_DECODER(VMDVIDEO, vmdvideo);
+ REGISTER_DECODER(VMNC, vmnc);
+ REGISTER_DECODER(VP3, vp3);
+ REGISTER_DECODER(VP5, vp5);
+ REGISTER_DECODER(VP6, vp6);
+ REGISTER_DECODER(VP6F, vp6f);
+ REGISTER_DECODER(VQA, vqa);
+ REGISTER_ENCDEC (WMV1, wmv1);
+ REGISTER_ENCDEC (WMV2, wmv2);
+ REGISTER_DECODER(WMV3, wmv3);
+ REGISTER_DECODER(WNV1, wnv1);
+#ifdef CONFIG_X264
+ REGISTER_ENCODER(X264, x264);
+#endif
+ REGISTER_DECODER(XAN_WC3, xan_wc3);
+ REGISTER_DECODER(XL, xl);
+#ifdef CONFIG_XVID
+ REGISTER_ENCODER(XVID, xvid);
+#endif
+ REGISTER_ENCDEC (ZLIB, zlib);
+ REGISTER_DECODER(ZMBV, zmbv);
+
+ /* audio codecs */
+#ifdef CONFIG_FAAD
+ REGISTER_DECODER(AAC, aac);
+ REGISTER_DECODER(MPEG4AAC, mpeg4aac);
+#endif
+#ifdef CONFIG_A52
+ REGISTER_DECODER(AC3, ac3);
+#endif
+ REGISTER_ENCODER(AC3, ac3);
+ REGISTER_DECODER(ALAC, alac);
+#if defined(CONFIG_AMR_NB) || defined(CONFIG_AMR_NB_FIXED)
+ REGISTER_ENCDEC (AMR_NB, amr_nb);
+#endif
+#ifdef CONFIG_AMR_WB
+ REGISTER_ENCDEC (AMR_WB, amr_wb);
+#endif
+ REGISTER_DECODER(COOK, cook);
+ REGISTER_DECODER(DSICINAUDIO, dsicinaudio);
+#ifdef CONFIG_DTS
+ REGISTER_DECODER(DTS, dts);
+#endif
+#ifdef CONFIG_FAAC
+ REGISTER_ENCODER(FAAC, faac);
+#endif
+ REGISTER_ENCDEC (FLAC, flac);
+ REGISTER_DECODER(IMC, imc);
+#ifdef CONFIG_LIBGSM
+ REGISTER_ENCDEC (LIBGSM, libgsm);
+#endif
+ REGISTER_DECODER(MACE3, mace3);
+ REGISTER_DECODER(MACE6, mace6);
+ REGISTER_ENCDEC (MP2, mp2);
+ REGISTER_DECODER(MP3, mp3);
+ REGISTER_DECODER(MP3ADU, mp3adu);
+#ifdef CONFIG_MP3LAME
+ REGISTER_ENCODER(MP3LAME, mp3lame);
+#endif
+ REGISTER_DECODER(MP3ON4, mp3on4);
+#ifdef CONFIG_LIBVORBIS
+ if (!ENABLE_VORBIS_ENCODER) REGISTER_ENCODER(OGGVORBIS, oggvorbis);
+ if (!ENABLE_VORBIS_DECODER) REGISTER_DECODER(OGGVORBIS, oggvorbis);
+#endif
+ REGISTER_DECODER(QDM2, qdm2);
+ REGISTER_DECODER(RA_144, ra_144);
+ REGISTER_DECODER(RA_288, ra_288);
+ REGISTER_DECODER(SHORTEN, shorten);
+ REGISTER_DECODER(SMACKAUD, smackaud);
+ REGISTER_ENCDEC (SONIC, sonic);
+ REGISTER_ENCODER(SONIC_LS, sonic_ls);
+ REGISTER_DECODER(TRUESPEECH, truespeech);
+ REGISTER_DECODER(TTA, tta);
+ REGISTER_DECODER(VMDAUDIO, vmdaudio);
+ REGISTER_ENCDEC (VORBIS, vorbis);
+ REGISTER_DECODER(WAVPACK, wavpack);
+ REGISTER_DECODER(WMAV1, wmav1);
+ REGISTER_DECODER(WMAV2, wmav2);
+ REGISTER_DECODER(WS_SND1, ws_snd1);
+
+ /* pcm codecs */
+ REGISTER_ENCDEC (PCM_ALAW, pcm_alaw);
+ REGISTER_ENCDEC (PCM_MULAW, pcm_mulaw);
+ REGISTER_ENCDEC (PCM_S8, pcm_s8);
+ REGISTER_ENCDEC (PCM_S16BE, pcm_s16be);
+ REGISTER_ENCDEC (PCM_S16LE, pcm_s16le);
+ REGISTER_ENCDEC (PCM_S24BE, pcm_s24be);
+ REGISTER_ENCDEC (PCM_S24DAUD, pcm_s24daud);
+ REGISTER_ENCDEC (PCM_S24LE, pcm_s24le);
+ REGISTER_ENCDEC (PCM_S32BE, pcm_s32be);
+ REGISTER_ENCDEC (PCM_S32LE, pcm_s32le);
+ REGISTER_ENCDEC (PCM_U8, pcm_u8);
+ REGISTER_ENCDEC (PCM_U16BE, pcm_u16be);
+ REGISTER_ENCDEC (PCM_U16LE, pcm_u16le);
+ REGISTER_ENCDEC (PCM_U24BE, pcm_u24be);
+ REGISTER_ENCDEC (PCM_U24LE, pcm_u24le);
+ REGISTER_ENCDEC (PCM_U32BE, pcm_u32be);
+ REGISTER_ENCDEC (PCM_U32LE, pcm_u32le);
+
+ /* dpcm codecs */
+ REGISTER_DECODER(INTERPLAY_DPCM, interplay_dpcm);
+ REGISTER_DECODER(ROQ_DPCM, roq_dpcm);
+ REGISTER_DECODER(SOL_DPCM, sol_dpcm);
+ REGISTER_DECODER(XAN_DPCM, xan_dpcm);
+
+ /* adpcm codecs */
+ REGISTER_ENCDEC (ADPCM_4XM, adpcm_4xm);
+ REGISTER_ENCDEC (ADPCM_ADX, adpcm_adx);
+ REGISTER_ENCDEC (ADPCM_CT, adpcm_ct);
+ REGISTER_ENCDEC (ADPCM_EA, adpcm_ea);
+ REGISTER_ENCDEC (ADPCM_G726, adpcm_g726);
+ REGISTER_ENCDEC (ADPCM_IMA_DK3, adpcm_ima_dk3);
+ REGISTER_ENCDEC (ADPCM_IMA_DK4, adpcm_ima_dk4);
+ REGISTER_ENCDEC (ADPCM_IMA_QT, adpcm_ima_qt);
+ REGISTER_ENCDEC (ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg);
+ REGISTER_ENCDEC (ADPCM_IMA_WAV, adpcm_ima_wav);
+ REGISTER_ENCDEC (ADPCM_IMA_WS, adpcm_ima_ws);
+ REGISTER_ENCDEC (ADPCM_MS, adpcm_ms);
+ REGISTER_ENCDEC (ADPCM_SBPRO_2, adpcm_sbpro_2);
+ REGISTER_ENCDEC (ADPCM_SBPRO_3, adpcm_sbpro_3);
+ REGISTER_ENCDEC (ADPCM_SBPRO_4, adpcm_sbpro_4);
+ REGISTER_ENCDEC (ADPCM_SWF, adpcm_swf);
+ REGISTER_ENCDEC (ADPCM_XA, adpcm_xa);
+ REGISTER_ENCDEC (ADPCM_YAMAHA, adpcm_yamaha);
+
+ /* subtitles */
+ REGISTER_ENCDEC (DVBSUB, dvbsub);
+ REGISTER_ENCDEC (DVDSUB, dvdsub);
+
+ /* parsers */
+ REGISTER_PARSER (AAC, aac);
+ REGISTER_PARSER (AC3, ac3);
+ REGISTER_PARSER (CAVSVIDEO, cavsvideo);
+ REGISTER_PARSER (DVBSUB, dvbsub);
+ REGISTER_PARSER (DVDSUB, dvdsub);
+ REGISTER_PARSER (H261, h261);
+ REGISTER_PARSER (H263, h263);
+ REGISTER_PARSER (H264, h264);
+ REGISTER_PARSER (MJPEG, mjpeg);
+ REGISTER_PARSER (MPEG4VIDEO, mpeg4video);
+ REGISTER_PARSER (MPEGAUDIO, mpegaudio);
+ REGISTER_PARSER (MPEGVIDEO, mpegvideo);
+ REGISTER_PARSER (PNM, pnm);
+
+ av_register_bitstream_filter(&dump_extradata_bsf);
+ av_register_bitstream_filter(&remove_extradata_bsf);
+ av_register_bitstream_filter(&noise_bsf);
+ av_register_bitstream_filter(&mp3_header_compress_bsf);
+ av_register_bitstream_filter(&mp3_header_decompress_bsf);
+ av_register_bitstream_filter(&mjpega_dump_header_bsf);
+}
+
diff --git a/src/libffmpeg/libavcodec/alpha/asm.h b/contrib/ffmpeg/libavcodec/alpha/asm.h
index 056e043f3..c0ddde528 100644
--- a/src/libffmpeg/libavcodec/alpha/asm.h
+++ b/contrib/ffmpeg/libavcodec/alpha/asm.h
@@ -2,18 +2,20 @@
* Alpha optimized DSP utils
* Copyright (c) 2002 Falk Hueffner <falk@debian.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/alpha/dsputil_alpha.c b/contrib/ffmpeg/libavcodec/alpha/dsputil_alpha.c
index 299a25dc4..c98d6f7ff 100644
--- a/src/libffmpeg/libavcodec/alpha/dsputil_alpha.c
+++ b/contrib/ffmpeg/libavcodec/alpha/dsputil_alpha.c
@@ -2,18 +2,20 @@
* Alpha optimized DSP utils
* Copyright (c) 2002 Falk Hueffner <falk@debian.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/alpha/dsputil_alpha_asm.S b/contrib/ffmpeg/libavcodec/alpha/dsputil_alpha_asm.S
index 29ba9dc02..367f2d743 100644
--- a/src/libffmpeg/libavcodec/alpha/dsputil_alpha_asm.S
+++ b/contrib/ffmpeg/libavcodec/alpha/dsputil_alpha_asm.S
@@ -2,18 +2,20 @@
* Alpha optimized DSP utils
* Copyright (c) 2002 Falk Hueffner <falk@debian.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/alpha/motion_est_alpha.c b/contrib/ffmpeg/libavcodec/alpha/motion_est_alpha.c
index ea8580be7..337ffb38e 100644
--- a/src/libffmpeg/libavcodec/alpha/motion_est_alpha.c
+++ b/contrib/ffmpeg/libavcodec/alpha/motion_est_alpha.c
@@ -2,18 +2,20 @@
* Alpha optimized DSP utils
* Copyright (c) 2002 Falk Hueffner <falk@debian.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/alpha/motion_est_mvi_asm.S b/contrib/ffmpeg/libavcodec/alpha/motion_est_mvi_asm.S
index e043f4371..6015a7824 100644
--- a/src/libffmpeg/libavcodec/alpha/motion_est_mvi_asm.S
+++ b/contrib/ffmpeg/libavcodec/alpha/motion_est_mvi_asm.S
@@ -2,18 +2,20 @@
* Alpha optimized DSP utils
* Copyright (c) 2002 Falk Hueffner <falk@debian.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/alpha/mpegvideo_alpha.c b/contrib/ffmpeg/libavcodec/alpha/mpegvideo_alpha.c
index 4c512451e..8ad264b06 100644
--- a/src/libffmpeg/libavcodec/alpha/mpegvideo_alpha.c
+++ b/contrib/ffmpeg/libavcodec/alpha/mpegvideo_alpha.c
@@ -2,18 +2,20 @@
* Alpha optimized DSP utils
* Copyright (c) 2002 Falk Hueffner <falk@debian.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/alpha/regdef.h b/contrib/ffmpeg/libavcodec/alpha/regdef.h
index 7e7fc06b2..01e263bac 100644
--- a/src/libffmpeg/libavcodec/alpha/regdef.h
+++ b/contrib/ffmpeg/libavcodec/alpha/regdef.h
@@ -1,3 +1,24 @@
+/*
+ * Alpha optimized DSP utils
+ * copyright (c) 2002 Falk Hueffner <falk@debian.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/* Some BSDs don't seem to have regdef.h... sigh */
#ifndef alpha_regdef_h
#define alpha_regdef_h
diff --git a/src/libffmpeg/libavcodec/alpha/simple_idct_alpha.c b/contrib/ffmpeg/libavcodec/alpha/simple_idct_alpha.c
index 3a5db009b..adadd3ab0 100644
--- a/src/libffmpeg/libavcodec/alpha/simple_idct_alpha.c
+++ b/contrib/ffmpeg/libavcodec/alpha/simple_idct_alpha.c
@@ -3,18 +3,20 @@
*
* Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* based upon some outcommented c code from mpeg2dec (idct_mmx.c
diff --git a/contrib/ffmpeg/libavcodec/amr.c b/contrib/ffmpeg/libavcodec/amr.c
new file mode 100644
index 000000000..2d1877b22
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/amr.c
@@ -0,0 +1,715 @@
+/*
+ * AMR Audio decoder stub
+ * Copyright (c) 2003 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+ /*
+ This code implements amr-nb and amr-wb audio encoder/decoder through external reference
+ code from www.3gpp.org. The licence of the code from 3gpp is unclear so you
+ have to download the code separately. Two versions exists: One fixed-point
+ and one with floats. For some reason the float-encoder is significant faster
+ atleast on a P4 1.5GHz (0.9s instead of 9.9s on a 30s audio clip at MR102).
+ Both float and fixed point is supported for amr-nb, but only float for
+ amr-wb.
+
+ --AMR-NB--
+ The fixed-point (TS26.073) can be downloaded from:
+ http://www.3gpp.org/ftp/Specs/archive/26_series/26.073/26073-510.zip
+ Extract the soure into ffmpeg/libavcodec/amr
+ To use the fixed version run "./configure" with "--enable-amr_nb-fixed"
+
+ The float version (default) can be downloaded from:
+ http://www.3gpp.org/ftp/Specs/archive/26_series/26.104/26104-510.zip
+ Extract the soure into ffmpeg/libavcodec/amr_float
+
+ The specification for amr-nb can be found in TS 26.071
+ (http://www.3gpp.org/ftp/Specs/html-info/26071.htm) and some other
+ info at http://www.3gpp.org/ftp/Specs/html-info/26-series.htm
+
+ --AMR-WB--
+ The reference code can be downloaded from:
+ http://www.3gpp.org/ftp/Specs/archive/26_series/26.204/26204-510.zip
+ It should be extracted to "libavcodec/amrwb_float". Enable it with
+ "--enable-amr_wb".
+
+ The specification for amr-wb can be downloaded from:
+ http://www.3gpp.org/ftp/Specs/archive/26_series/26.171/26171-500.zip
+
+ If someone want to use the fixed point version it can be downloaded
+ from: http://www.3gpp.org/ftp/Specs/archive/26_series/26.173/26173-571.zip
+
+ */
+
+#include "avcodec.h"
+
+#ifdef CONFIG_AMR_NB_FIXED
+
+#define MMS_IO
+
+#include "amr/sp_dec.h"
+#include "amr/d_homing.h"
+#include "amr/typedef.h"
+#include "amr/sp_enc.h"
+#include "amr/sid_sync.h"
+#include "amr/e_homing.h"
+
+#else
+#include "amr_float/interf_dec.h"
+#include "amr_float/interf_enc.h"
+#endif
+
+/* Common code for fixed and float version*/
+typedef struct AMR_bitrates
+{
+ int startrate;
+ int stoprate;
+ enum Mode mode;
+
+} AMR_bitrates;
+
+/* Match desired bitrate with closest one*/
+static enum Mode getBitrateMode(int bitrate)
+{
+ /* Adjusted so that all bitrates can be used from commandline where
+ only a multiple of 1000 can be specified*/
+ AMR_bitrates rates[]={ {0,4999,MR475}, //4
+ {5000,5899,MR515},//5
+ {5900,6699,MR59},//6
+ {6700,7000,MR67},//7
+ {7001,7949,MR74},//8
+ {7950,9999,MR795},//9
+ {10000,11999,MR102},//10
+ {12000,64000,MR122},//12
+
+ };
+ int i;
+ for(i=0;i<8;i++)
+ {
+ if(rates[i].startrate<=bitrate && rates[i].stoprate>=bitrate)
+ {
+ return(rates[i].mode);
+ }
+ }
+ /*Return highest possible*/
+ return(MR122);
+}
+
+static void amr_decode_fix_avctx(AVCodecContext * avctx)
+{
+ const int is_amr_wb = 1 + (avctx->codec_id == CODEC_ID_AMR_WB);
+
+ if(avctx->sample_rate == 0)
+ {
+ avctx->sample_rate = 8000 * is_amr_wb;
+ }
+
+ if(avctx->channels == 0)
+ {
+ avctx->channels = 1;
+ }
+
+ avctx->frame_size = 160 * is_amr_wb;
+}
+
+#ifdef CONFIG_AMR_NB_FIXED
+/* fixed point version*/
+/* frame size in serial bitstream file (frame type + serial stream + flags) */
+#define SERIAL_FRAMESIZE (1+MAX_SERIAL_SIZE+5)
+
+typedef struct AMRContext {
+ int frameCount;
+ Speech_Decode_FrameState *speech_decoder_state;
+ enum RXFrameType rx_type;
+ enum Mode mode;
+ Word16 reset_flag;
+ Word16 reset_flag_old;
+
+ enum Mode enc_bitrate;
+ Speech_Encode_FrameState *enstate;
+ sid_syncState *sidstate;
+ enum TXFrameType tx_frametype;
+
+
+} AMRContext;
+
+static int amr_nb_decode_init(AVCodecContext * avctx)
+{
+ AMRContext *s = avctx->priv_data;
+ s->frameCount=0;
+ s->speech_decoder_state=NULL;
+ s->rx_type = (enum RXFrameType)0;
+ s->mode= (enum Mode)0;
+ s->reset_flag=0;
+ s->reset_flag_old=1;
+
+ if(Speech_Decode_Frame_init(&s->speech_decoder_state, "Decoder"))
+ {
+ av_log(avctx, AV_LOG_ERROR, "Speech_Decode_Frame_init error\n");
+ return -1;
+ }
+
+ amr_decode_fix_avctx(avctx);
+
+ if(avctx->channels > 1)
+ {
+ av_log(avctx, AV_LOG_ERROR, "amr_nb: multichannel decoding not supported\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int amr_nb_encode_init(AVCodecContext * avctx)
+{
+ AMRContext *s = avctx->priv_data;
+ s->frameCount=0;
+ s->speech_decoder_state=NULL;
+ s->rx_type = (enum RXFrameType)0;
+ s->mode= (enum Mode)0;
+ s->reset_flag=0;
+ s->reset_flag_old=1;
+
+ if(avctx->sample_rate!=8000)
+ {
+ if(avctx->debug)
+ {
+ av_log(avctx, AV_LOG_DEBUG, "Only 8000Hz sample rate supported\n");
+ }
+ return -1;
+ }
+
+ if(avctx->channels!=1)
+ {
+ if(avctx->debug)
+ {
+ av_log(avctx, AV_LOG_DEBUG, "Only mono supported\n");
+ }
+ return -1;
+ }
+
+ avctx->frame_size=160;
+ avctx->coded_frame= avcodec_alloc_frame();
+
+ if(Speech_Encode_Frame_init(&s->enstate, 0, "encoder") || sid_sync_init (&s->sidstate))
+ {
+ if(avctx->debug)
+ {
+ av_log(avctx, AV_LOG_DEBUG, "Speech_Encode_Frame_init error\n");
+ }
+ return -1;
+ }
+
+ s->enc_bitrate=getBitrateMode(avctx->bit_rate);
+
+ return 0;
+}
+
+static int amr_nb_encode_close(AVCodecContext * avctx)
+{
+ AMRContext *s = avctx->priv_data;
+ Speech_Encode_Frame_exit(&s->enstate);
+ sid_sync_exit (&s->sidstate);
+ av_freep(&avctx->coded_frame);
+ return 0;
+}
+
+static int amr_nb_decode_close(AVCodecContext * avctx)
+{
+ AMRContext *s = avctx->priv_data;
+ Speech_Decode_Frame_exit(&s->speech_decoder_state);
+ return 0;
+}
+
+static int amr_nb_decode_frame(AVCodecContext * avctx,
+ void *data, int *data_size,
+ uint8_t * buf, int buf_size)
+{
+ AMRContext *s = avctx->priv_data;
+
+ uint8_t*amrData=buf;
+ int offset=0;
+
+ UWord8 toc, q, ft;
+
+ Word16 serial[SERIAL_FRAMESIZE]; /* coded bits */
+ Word16 *synth;
+ UWord8 *packed_bits;
+
+ static Word16 packed_size[16] = {12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0};
+ int i;
+
+ //printf("amr_decode_frame data_size=%i buf=0x%X buf_size=%d frameCount=%d!!\n",*data_size,buf,buf_size,s->frameCount);
+
+ synth=data;
+
+// while(offset<buf_size)
+ {
+ toc=amrData[offset];
+ /* read rest of the frame based on ToC byte */
+ q = (toc >> 2) & 0x01;
+ ft = (toc >> 3) & 0x0F;
+
+ //printf("offset=%d, packet_size=%d amrData= 0x%X %X %X %X\n",offset,packed_size[ft],amrData[offset],amrData[offset+1],amrData[offset+2],amrData[offset+3]);
+
+ offset++;
+
+ packed_bits=amrData+offset;
+
+ offset+=packed_size[ft];
+
+ //Unsort and unpack bits
+ s->rx_type = UnpackBits(q, ft, packed_bits, &s->mode, &serial[1]);
+
+ //We have a new frame
+ s->frameCount++;
+
+ if (s->rx_type == RX_NO_DATA)
+ {
+ s->mode = s->speech_decoder_state->prev_mode;
+ }
+ else {
+ s->speech_decoder_state->prev_mode = s->mode;
+ }
+
+ /* if homed: check if this frame is another homing frame */
+ if (s->reset_flag_old == 1)
+ {
+ /* only check until end of first subframe */
+ s->reset_flag = decoder_homing_frame_test_first(&serial[1], s->mode);
+ }
+ /* produce encoder homing frame if homed & input=decoder homing frame */
+ if ((s->reset_flag != 0) && (s->reset_flag_old != 0))
+ {
+ for (i = 0; i < L_FRAME; i++)
+ {
+ synth[i] = EHF_MASK;
+ }
+ }
+ else
+ {
+ /* decode frame */
+ Speech_Decode_Frame(s->speech_decoder_state, s->mode, &serial[1], s->rx_type, synth);
+ }
+
+ //Each AMR-frame results in 160 16-bit samples
+ *data_size+=160*2;
+ synth+=160;
+
+ /* if not homed: check whether current frame is a homing frame */
+ if (s->reset_flag_old == 0)
+ {
+ /* check whole frame */
+ s->reset_flag = decoder_homing_frame_test(&serial[1], s->mode);
+ }
+ /* reset decoder if current frame is a homing frame */
+ if (s->reset_flag != 0)
+ {
+ Speech_Decode_Frame_reset(s->speech_decoder_state);
+ }
+ s->reset_flag_old = s->reset_flag;
+
+ }
+ return offset;
+}
+
+
+static int amr_nb_encode_frame(AVCodecContext *avctx,
+ unsigned char *frame/*out*/, int buf_size, void *data/*in*/)
+{
+ short serial_data[250] = {0};
+
+ AMRContext *s = avctx->priv_data;
+ int written;
+
+ s->reset_flag = encoder_homing_frame_test(data);
+
+ Speech_Encode_Frame(s->enstate, s->enc_bitrate, data, &serial_data[1], &s->mode);
+
+ /* add frame type and mode */
+ sid_sync (s->sidstate, s->mode, &s->tx_frametype);
+
+ written = PackBits(s->mode, s->enc_bitrate, s->tx_frametype, &serial_data[1], frame);
+
+ if (s->reset_flag != 0)
+ {
+ Speech_Encode_Frame_reset(s->enstate);
+ sid_sync_reset(s->sidstate);
+ }
+ return written;
+}
+
+
+#elif defined(CONFIG_AMR_NB) /* Float point version*/
+
+typedef struct AMRContext {
+ int frameCount;
+ void * decState;
+ int *enstate;
+ enum Mode enc_bitrate;
+} AMRContext;
+
+static int amr_nb_decode_init(AVCodecContext * avctx)
+{
+ AMRContext *s = avctx->priv_data;
+ s->frameCount=0;
+ s->decState=Decoder_Interface_init();
+ if(!s->decState)
+ {
+ av_log(avctx, AV_LOG_ERROR, "Decoder_Interface_init error\r\n");
+ return -1;
+ }
+
+ amr_decode_fix_avctx(avctx);
+
+ if(avctx->channels > 1)
+ {
+ av_log(avctx, AV_LOG_ERROR, "amr_nb: multichannel decoding not supported\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int amr_nb_encode_init(AVCodecContext * avctx)
+{
+ AMRContext *s = avctx->priv_data;
+ s->frameCount=0;
+
+ if(avctx->sample_rate!=8000)
+ {
+ if(avctx->debug)
+ {
+ av_log(avctx, AV_LOG_DEBUG, "Only 8000Hz sample rate supported\n");
+ }
+ return -1;
+ }
+
+ if(avctx->channels!=1)
+ {
+ if(avctx->debug)
+ {
+ av_log(avctx, AV_LOG_DEBUG, "Only mono supported\n");
+ }
+ return -1;
+ }
+
+ avctx->frame_size=160;
+ avctx->coded_frame= avcodec_alloc_frame();
+
+ s->enstate=Encoder_Interface_init(0);
+ if(!s->enstate)
+ {
+ if(avctx->debug)
+ {
+ av_log(avctx, AV_LOG_DEBUG, "Encoder_Interface_init error\n");
+ }
+ return -1;
+ }
+
+ s->enc_bitrate=getBitrateMode(avctx->bit_rate);
+
+ return 0;
+}
+
+static int amr_nb_decode_close(AVCodecContext * avctx)
+{
+ AMRContext *s = avctx->priv_data;
+ Decoder_Interface_exit(s->decState);
+ return 0;
+}
+
+static int amr_nb_encode_close(AVCodecContext * avctx)
+{
+ AMRContext *s = avctx->priv_data;
+ Encoder_Interface_exit(s->enstate);
+ av_freep(&avctx->coded_frame);
+ return 0;
+}
+
+static int amr_nb_decode_frame(AVCodecContext * avctx,
+ void *data, int *data_size,
+ uint8_t * buf, int buf_size)
+{
+ AMRContext *s = (AMRContext*)avctx->priv_data;
+
+ uint8_t*amrData=buf;
+ static short block_size[16]={ 12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0 };
+ enum Mode dec_mode;
+ int packet_size;
+
+ /* av_log(NULL,AV_LOG_DEBUG,"amr_decode_frame buf=%p buf_size=%d frameCount=%d!!\n",buf,buf_size,s->frameCount); */
+
+ if(buf_size==0) {
+ /* nothing to do */
+ return 0;
+ }
+
+ dec_mode = (buf[0] >> 3) & 0x000F;
+ packet_size = block_size[dec_mode]+1;
+
+ if(packet_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "amr frame too short (%u, should be %u)\n", buf_size, packet_size);
+ return -1;
+ }
+
+ s->frameCount++;
+ /* av_log(NULL,AV_LOG_DEBUG,"packet_size=%d amrData= 0x%X %X %X %X\n",packet_size,amrData[0],amrData[1],amrData[2],amrData[3]); */
+ /* call decoder */
+ Decoder_Interface_Decode(s->decState, amrData, data, 0);
+ *data_size=160*2;
+
+ return packet_size;
+}
+
+static int amr_nb_encode_frame(AVCodecContext *avctx,
+ unsigned char *frame/*out*/, int buf_size, void *data/*in*/)
+{
+ AMRContext *s = (AMRContext*)avctx->priv_data;
+ int written;
+
+ s->enc_bitrate=getBitrateMode(avctx->bit_rate);
+
+ written = Encoder_Interface_Encode(s->enstate,
+ s->enc_bitrate,
+ data,
+ frame,
+ 0);
+ /* av_log(NULL,AV_LOG_DEBUG,"amr_nb_encode_frame encoded %u bytes, bitrate %u, first byte was %#02x\n",written, s->enc_bitrate, frame[0] ); */
+
+ return written;
+}
+
+#endif
+
+#if defined(CONFIG_AMR_NB) || defined(CONFIG_AMR_NB_FIXED)
+
+AVCodec amr_nb_decoder =
+{
+ "amr_nb",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_AMR_NB,
+ sizeof(AMRContext),
+ amr_nb_decode_init,
+ NULL,
+ amr_nb_decode_close,
+ amr_nb_decode_frame,
+};
+
+AVCodec amr_nb_encoder =
+{
+ "amr_nb",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_AMR_NB,
+ sizeof(AMRContext),
+ amr_nb_encode_init,
+ amr_nb_encode_frame,
+ amr_nb_encode_close,
+ NULL,
+};
+
+#endif
+
+/* -----------AMR wideband ------------*/
+#ifdef CONFIG_AMR_WB
+
+#ifdef _TYPEDEF_H
+//To avoid duplicate typedefs from typdef in amr-nb
+#define typedef_h
+#endif
+
+#include "amrwb_float/enc_if.h"
+#include "amrwb_float/dec_if.h"
+
+/* Common code for fixed and float version*/
+typedef struct AMRWB_bitrates
+{
+ int startrate;
+ int stoprate;
+ int mode;
+
+} AMRWB_bitrates;
+
+static int getWBBitrateMode(int bitrate)
+{
+ /* Adjusted so that all bitrates can be used from commandline where
+ only a multiple of 1000 can be specified*/
+ AMRWB_bitrates rates[]={ {0,7999,0}, //6.6kHz
+ {8000,9999,1},//8.85
+ {10000,13000,2},//12.65
+ {13001,14999,3},//14.25
+ {15000,17000,4},//15.85
+ {17001,18000,5},//18.25
+ {18001,22000,6},//19.85
+ {22001,23000,7},//23.05
+ {23001,24000,8},//23.85
+
+ };
+ int i;
+
+ for(i=0;i<9;i++)
+ {
+ if(rates[i].startrate<=bitrate && rates[i].stoprate>=bitrate)
+ {
+ return(rates[i].mode);
+ }
+ }
+ /*Return highest possible*/
+ return(8);
+}
+
+
+typedef struct AMRWBContext {
+ int frameCount;
+ void *state;
+ int mode;
+ Word16 allow_dtx;
+} AMRWBContext;
+
+static int amr_wb_encode_init(AVCodecContext * avctx)
+{
+ AMRWBContext *s = (AMRWBContext*)avctx->priv_data;
+ s->frameCount=0;
+
+ if(avctx->sample_rate!=16000)
+ {
+ if(avctx->debug)
+ {
+ av_log(avctx, AV_LOG_DEBUG, "Only 16000Hz sample rate supported\n");
+ }
+ return -1;
+ }
+
+ if(avctx->channels!=1)
+ {
+ if(avctx->debug)
+ {
+ av_log(avctx, AV_LOG_DEBUG, "Only mono supported\n");
+ }
+ return -1;
+ }
+
+ avctx->frame_size=320;
+ avctx->coded_frame= avcodec_alloc_frame();
+
+ s->state = E_IF_init();
+ s->mode=getWBBitrateMode(avctx->bit_rate);
+ s->allow_dtx=0;
+
+ return 0;
+}
+
+static int amr_wb_encode_close(AVCodecContext * avctx)
+{
+ AMRWBContext *s = (AMRWBContext*) avctx->priv_data;
+ E_IF_exit(s->state);
+ av_freep(&avctx->coded_frame);
+ s->frameCount++;
+ return 0;
+}
+
+static int amr_wb_encode_frame(AVCodecContext *avctx,
+ unsigned char *frame/*out*/, int buf_size, void *data/*in*/)
+{
+ AMRWBContext *s;
+ int size;
+ s = (AMRWBContext*) avctx->priv_data;
+ s->mode=getWBBitrateMode(avctx->bit_rate);
+ size = E_IF_encode(s->state, s->mode, data, frame, s->allow_dtx);
+ return size;
+}
+
+static int amr_wb_decode_init(AVCodecContext * avctx)
+{
+ AMRWBContext *s = (AMRWBContext *)avctx->priv_data;
+ s->frameCount=0;
+ s->state = D_IF_init();
+
+ amr_decode_fix_avctx(avctx);
+
+ if(avctx->channels > 1)
+ {
+ av_log(avctx, AV_LOG_ERROR, "amr_wb: multichannel decoding not supported\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+extern const UWord8 block_size[];
+
+static int amr_wb_decode_frame(AVCodecContext * avctx,
+ void *data, int *data_size,
+ uint8_t * buf, int buf_size)
+{
+ AMRWBContext *s = (AMRWBContext*)avctx->priv_data;
+
+ uint8_t*amrData=buf;
+ int mode;
+ int packet_size;
+
+ if(buf_size==0) {
+ /* nothing to do */
+ return 0;
+ }
+
+ mode = (amrData[0] >> 3) & 0x000F;
+ packet_size = block_size[mode];
+
+ if(packet_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "amr frame too short (%u, should be %u)\n", buf_size, packet_size+1);
+ return -1;
+ }
+
+ s->frameCount++;
+ D_IF_decode( s->state, amrData, data, _good_frame);
+ *data_size=320*2;
+ return packet_size;
+}
+
+static int amr_wb_decode_close(AVCodecContext * avctx)
+{
+ AMRWBContext *s = (AMRWBContext *)avctx->priv_data;
+ D_IF_exit(s->state);
+ return 0;
+}
+
+AVCodec amr_wb_decoder =
+{
+ "amr_wb",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_AMR_WB,
+ sizeof(AMRWBContext),
+ amr_wb_decode_init,
+ NULL,
+ amr_wb_decode_close,
+ amr_wb_decode_frame,
+};
+
+AVCodec amr_wb_encoder =
+{
+ "amr_wb",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_AMR_WB,
+ sizeof(AMRWBContext),
+ amr_wb_encode_init,
+ amr_wb_encode_frame,
+ amr_wb_encode_close,
+ NULL,
+};
+
+#endif //CONFIG_AMR_WB
diff --git a/contrib/ffmpeg/libavcodec/apiexample.c b/contrib/ffmpeg/libavcodec/apiexample.c
new file mode 100644
index 000000000..484c77876
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/apiexample.c
@@ -0,0 +1,457 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file apiexample.c
+ * avcodec API use example.
+ *
+ * Note that this library only handles codecs (mpeg, mpeg4, etc...),
+ * not file formats (avi, vob, etc...). See library 'libavformat' for the
+ * format handling
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+
+#ifdef HAVE_AV_CONFIG_H
+#undef HAVE_AV_CONFIG_H
+#endif
+
+#include "avcodec.h"
+
+#define INBUF_SIZE 4096
+
+/*
+ * Audio encoding example
+ */
+void audio_encode_example(const char *filename)
+{
+ AVCodec *codec;
+ AVCodecContext *c= NULL;
+ int frame_size, i, j, out_size, outbuf_size;
+ FILE *f;
+ short *samples;
+ float t, tincr;
+ uint8_t *outbuf;
+
+ printf("Audio encoding\n");
+
+ /* find the MP2 encoder */
+ codec = avcodec_find_encoder(CODEC_ID_MP2);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ c= avcodec_alloc_context();
+
+ /* put sample parameters */
+ c->bit_rate = 64000;
+ c->sample_rate = 44100;
+ c->channels = 2;
+
+ /* open it */
+ if (avcodec_open(c, codec) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ /* the codec gives us the frame size, in samples */
+ frame_size = c->frame_size;
+ samples = malloc(frame_size * 2 * c->channels);
+ outbuf_size = 10000;
+ outbuf = malloc(outbuf_size);
+
+ f = fopen(filename, "wb");
+ if (!f) {
+ fprintf(stderr, "could not open %s\n", filename);
+ exit(1);
+ }
+
+ /* encode a single tone sound */
+ t = 0;
+ tincr = 2 * M_PI * 440.0 / c->sample_rate;
+ for(i=0;i<200;i++) {
+ for(j=0;j<frame_size;j++) {
+ samples[2*j] = (int)(sin(t) * 10000);
+ samples[2*j+1] = samples[2*j];
+ t += tincr;
+ }
+ /* encode the samples */
+ out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples);
+ fwrite(outbuf, 1, out_size, f);
+ }
+ fclose(f);
+ free(outbuf);
+ free(samples);
+
+ avcodec_close(c);
+ av_free(c);
+}
+
+/*
+ * Audio decoding.
+ */
+void audio_decode_example(const char *outfilename, const char *filename)
+{
+ AVCodec *codec;
+ AVCodecContext *c= NULL;
+ int out_size, size, len;
+ FILE *f, *outfile;
+ uint8_t *outbuf;
+ uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE], *inbuf_ptr;
+
+ printf("Audio decoding\n");
+
+ /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
+ memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+
+ /* find the mpeg audio decoder */
+ codec = avcodec_find_decoder(CODEC_ID_MP2);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ c= avcodec_alloc_context();
+
+ /* open it */
+ if (avcodec_open(c, codec) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ outbuf = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
+
+ f = fopen(filename, "rb");
+ if (!f) {
+ fprintf(stderr, "could not open %s\n", filename);
+ exit(1);
+ }
+ outfile = fopen(outfilename, "wb");
+ if (!outfile) {
+ av_free(c);
+ exit(1);
+ }
+
+ /* decode until eof */
+ inbuf_ptr = inbuf;
+ for(;;) {
+ size = fread(inbuf, 1, INBUF_SIZE, f);
+ if (size == 0)
+ break;
+
+ inbuf_ptr = inbuf;
+ while (size > 0) {
+ len = avcodec_decode_audio(c, (short *)outbuf, &out_size,
+ inbuf_ptr, size);
+ if (len < 0) {
+ fprintf(stderr, "Error while decoding\n");
+ exit(1);
+ }
+ if (out_size > 0) {
+ /* if a frame has been decoded, output it */
+ fwrite(outbuf, 1, out_size, outfile);
+ }
+ size -= len;
+ inbuf_ptr += len;
+ }
+ }
+
+ fclose(outfile);
+ fclose(f);
+ free(outbuf);
+
+ avcodec_close(c);
+ av_free(c);
+}
+
+/*
+ * Video encoding example
+ */
+void video_encode_example(const char *filename)
+{
+ AVCodec *codec;
+ AVCodecContext *c= NULL;
+ int i, out_size, size, x, y, outbuf_size;
+ FILE *f;
+ AVFrame *picture;
+ uint8_t *outbuf, *picture_buf;
+
+ printf("Video encoding\n");
+
+ /* find the mpeg1 video encoder */
+ codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ c= avcodec_alloc_context();
+ picture= avcodec_alloc_frame();
+
+ /* put sample parameters */
+ c->bit_rate = 400000;
+ /* resolution must be a multiple of two */
+ c->width = 352;
+ c->height = 288;
+ /* frames per second */
+ c->time_base= (AVRational){1,25};
+ c->gop_size = 10; /* emit one intra frame every ten frames */
+ c->max_b_frames=1;
+ c->pix_fmt = PIX_FMT_YUV420P;
+
+ /* open it */
+ if (avcodec_open(c, codec) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ /* the codec gives us the frame size, in samples */
+
+ f = fopen(filename, "wb");
+ if (!f) {
+ fprintf(stderr, "could not open %s\n", filename);
+ exit(1);
+ }
+
+ /* alloc image and output buffer */
+ outbuf_size = 100000;
+ outbuf = malloc(outbuf_size);
+ size = c->width * c->height;
+ picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */
+
+ picture->data[0] = picture_buf;
+ picture->data[1] = picture->data[0] + size;
+ picture->data[2] = picture->data[1] + size / 4;
+ picture->linesize[0] = c->width;
+ picture->linesize[1] = c->width / 2;
+ picture->linesize[2] = c->width / 2;
+
+ /* encode 1 second of video */
+ for(i=0;i<25;i++) {
+ fflush(stdout);
+ /* prepare a dummy image */
+ /* Y */
+ for(y=0;y<c->height;y++) {
+ for(x=0;x<c->width;x++) {
+ picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
+ }
+ }
+
+ /* Cb and Cr */
+ for(y=0;y<c->height/2;y++) {
+ for(x=0;x<c->width/2;x++) {
+ picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
+ picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
+ }
+ }
+
+ /* encode the image */
+ out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
+ printf("encoding frame %3d (size=%5d)\n", i, out_size);
+ fwrite(outbuf, 1, out_size, f);
+ }
+
+ /* get the delayed frames */
+ for(; out_size; i++) {
+ fflush(stdout);
+
+ out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
+ printf("write frame %3d (size=%5d)\n", i, out_size);
+ fwrite(outbuf, 1, out_size, f);
+ }
+
+ /* add sequence end code to have a real mpeg file */
+ outbuf[0] = 0x00;
+ outbuf[1] = 0x00;
+ outbuf[2] = 0x01;
+ outbuf[3] = 0xb7;
+ fwrite(outbuf, 1, 4, f);
+ fclose(f);
+ free(picture_buf);
+ free(outbuf);
+
+ avcodec_close(c);
+ av_free(c);
+ av_free(picture);
+ printf("\n");
+}
+
+/*
+ * Video decoding example
+ */
+
+void pgm_save(unsigned char *buf,int wrap, int xsize,int ysize,char *filename)
+{
+ FILE *f;
+ int i;
+
+ f=fopen(filename,"w");
+ fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
+ for(i=0;i<ysize;i++)
+ fwrite(buf + i * wrap,1,xsize,f);
+ fclose(f);
+}
+
+void video_decode_example(const char *outfilename, const char *filename)
+{
+ AVCodec *codec;
+ AVCodecContext *c= NULL;
+ int frame, size, got_picture, len;
+ FILE *f;
+ AVFrame *picture;
+ uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE], *inbuf_ptr;
+ char buf[1024];
+
+ /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
+ memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+
+ printf("Video decoding\n");
+
+ /* find the mpeg1 video decoder */
+ codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ c= avcodec_alloc_context();
+ picture= avcodec_alloc_frame();
+
+ if(codec->capabilities&CODEC_CAP_TRUNCATED)
+ c->flags|= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */
+
+ /* for some codecs, such as msmpeg4 and mpeg4, width and height
+ MUST be initialized there because these info are not available
+ in the bitstream */
+
+ /* open it */
+ if (avcodec_open(c, codec) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ /* the codec gives us the frame size, in samples */
+
+ f = fopen(filename, "rb");
+ if (!f) {
+ fprintf(stderr, "could not open %s\n", filename);
+ exit(1);
+ }
+
+ frame = 0;
+ for(;;) {
+ size = fread(inbuf, 1, INBUF_SIZE, f);
+ if (size == 0)
+ break;
+
+ /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
+ and this is the only method to use them because you cannot
+ know the compressed data size before analysing it.
+
+ BUT some other codecs (msmpeg4, mpeg4) are inherently frame
+ based, so you must call them with all the data for one
+ frame exactly. You must also initialize 'width' and
+ 'height' before initializing them. */
+
+ /* NOTE2: some codecs allow the raw parameters (frame size,
+ sample rate) to be changed at any frame. We handle this, so
+ you should also take care of it */
+
+ /* here, we use a stream based decoder (mpeg1video), so we
+ feed decoder and see if it could decode a frame */
+ inbuf_ptr = inbuf;
+ while (size > 0) {
+ len = avcodec_decode_video(c, picture, &got_picture,
+ inbuf_ptr, size);
+ if (len < 0) {
+ fprintf(stderr, "Error while decoding frame %d\n", frame);
+ exit(1);
+ }
+ if (got_picture) {
+ printf("saving frame %3d\n", frame);
+ fflush(stdout);
+
+ /* the picture is allocated by the decoder. no need to
+ free it */
+ snprintf(buf, sizeof(buf), outfilename, frame);
+ pgm_save(picture->data[0], picture->linesize[0],
+ c->width, c->height, buf);
+ frame++;
+ }
+ size -= len;
+ inbuf_ptr += len;
+ }
+ }
+
+ /* some codecs, such as MPEG, transmit the I and P frame with a
+ latency of one frame. You must do the following to have a
+ chance to get the last frame of the video */
+ len = avcodec_decode_video(c, picture, &got_picture,
+ NULL, 0);
+ if (got_picture) {
+ printf("saving last frame %3d\n", frame);
+ fflush(stdout);
+
+ /* the picture is allocated by the decoder. no need to
+ free it */
+ snprintf(buf, sizeof(buf), outfilename, frame);
+ pgm_save(picture->data[0], picture->linesize[0],
+ c->width, c->height, buf);
+ frame++;
+ }
+
+ fclose(f);
+
+ avcodec_close(c);
+ av_free(c);
+ av_free(picture);
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ const char *filename;
+
+ /* must be called before using avcodec lib */
+ avcodec_init();
+
+ /* register all the codecs (you can also register only the codec
+ you wish to have smaller code */
+ avcodec_register_all();
+
+ if (argc <= 1) {
+ audio_encode_example("/tmp/test.mp2");
+ audio_decode_example("/tmp/test.sw", "/tmp/test.mp2");
+
+ video_encode_example("/tmp/test.mpg");
+ filename = "/tmp/test.mpg";
+ } else {
+ filename = argv[1];
+ }
+
+ // audio_decode_example("/tmp/test.sw", filename);
+ video_decode_example("/tmp/test%d.pgm", filename);
+
+ return 0;
+}
diff --git a/src/libffmpeg/libavcodec/armv4l/dsputil_arm.c b/contrib/ffmpeg/libavcodec/armv4l/dsputil_arm.c
index cebd176b3..9f0bfa2af 100644
--- a/src/libffmpeg/libavcodec/armv4l/dsputil_arm.c
+++ b/contrib/ffmpeg/libavcodec/armv4l/dsputil_arm.c
@@ -2,18 +2,20 @@
* ARMv4L optimized DSP utils
* Copyright (c) 2001 Lionel Ulmer.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,6 +29,12 @@ extern void dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx);
extern void j_rev_dct_ARM(DCTELEM *data);
extern void simple_idct_ARM(DCTELEM *data);
+extern void simple_idct_armv5te(DCTELEM *data);
+extern void simple_idct_put_armv5te(uint8_t *dest, int line_size,
+ DCTELEM *data);
+extern void simple_idct_add_armv5te(uint8_t *dest, int line_size,
+ DCTELEM *data);
+
/* XXX: local hack */
static void (*ff_put_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
static void (*ff_add_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
@@ -164,45 +172,48 @@ static void simple_idct_ARM_add(uint8_t *dest, int line_size, DCTELEM *block)
simple_idct_ARM (block);
ff_add_pixels_clamped(block, dest, line_size);
}
+
+#ifdef HAVE_IPP
static void simple_idct_ipp(DCTELEM *block)
{
-#ifdef HAVE_IPP
ippiDCT8x8Inv_Video_16s_C1I(block);
-#endif
}
static void simple_idct_ipp_put(uint8_t *dest, int line_size, DCTELEM *block)
{
-#ifdef HAVE_IPP
ippiDCT8x8Inv_Video_16s8u_C1R(block, dest, line_size);
-#endif
}
void add_pixels_clamped_iwmmxt(const DCTELEM *block, uint8_t *pixels, int line_size);
static void simple_idct_ipp_add(uint8_t *dest, int line_size, DCTELEM *block)
{
-#ifdef HAVE_IPP
ippiDCT8x8Inv_Video_16s_C1I(block);
#ifdef HAVE_IWMMXT
add_pixels_clamped_iwmmxt(block, dest, line_size);
#else
add_pixels_clamped_ARM(block, dest, line_size);
#endif
-#endif
}
+#endif
void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx)
{
- const int idct_algo= avctx->idct_algo;
+ int idct_algo= avctx->idct_algo;
ff_put_pixels_clamped = c->put_pixels_clamped;
ff_add_pixels_clamped = c->add_pixels_clamped;
-#ifdef HAVE_IPP
- if(idct_algo==FF_IDCT_ARM){
+ if(idct_algo == FF_IDCT_AUTO){
+#if defined(HAVE_IPP)
+ idct_algo = FF_IDCT_IPP;
+#elif defined(HAVE_ARMV5TE)
+ idct_algo = FF_IDCT_SIMPLEARMV5TE;
#else
- if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_ARM){
+ idct_algo = FF_IDCT_ARM;
#endif
+ }
+
+ if(idct_algo==FF_IDCT_ARM){
c->idct_put= j_rev_dct_ARM_put;
c->idct_add= j_rev_dct_ARM_add;
c->idct = j_rev_dct_ARM;
@@ -212,22 +223,27 @@ void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx)
c->idct_add= simple_idct_ARM_add;
c->idct = simple_idct_ARM;
c->idct_permutation_type= FF_NO_IDCT_PERM;
+#ifdef HAVE_ARMV5TE
+ } else if (idct_algo==FF_IDCT_SIMPLEARMV5TE){
+ c->idct_put= simple_idct_put_armv5te;
+ c->idct_add= simple_idct_add_armv5te;
+ c->idct = simple_idct_armv5te;
+ c->idct_permutation_type = FF_NO_IDCT_PERM;
+#endif
#ifdef HAVE_IPP
- } else if (idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_IPP){
-#else
} else if (idct_algo==FF_IDCT_IPP){
-#endif
c->idct_put= simple_idct_ipp_put;
c->idct_add= simple_idct_ipp_add;
c->idct = simple_idct_ipp;
c->idct_permutation_type= FF_NO_IDCT_PERM;
+#endif
}
/* c->put_pixels_tab[0][0] = put_pixels16_arm; */ // NG!
c->put_pixels_tab[0][1] = put_pixels16_x2_arm; //OK!
c->put_pixels_tab[0][2] = put_pixels16_y2_arm; //OK!
/* c->put_pixels_tab[0][3] = put_pixels16_xy2_arm; /\* NG *\/ */
-/* c->put_no_rnd_pixels_tab[0][0] = put_pixels16_arm; // ?(»È¤ï¤ì¤Ê¤¤) */
+/* c->put_no_rnd_pixels_tab[0][0] = put_pixels16_arm; */
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_arm; // OK
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_arm; //OK
/* c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_arm; //NG */
diff --git a/contrib/ffmpeg/libavcodec/armv4l/dsputil_arm_s.S b/contrib/ffmpeg/libavcodec/armv4l/dsputil_arm_s.S
new file mode 100644
index 000000000..2a3ee9c50
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/armv4l/dsputil_arm_s.S
@@ -0,0 +1,696 @@
+@
+@ ARMv4L optimized DSP utils
+@ Copyright (c) 2004 AGAWA Koji <i (AT) atty (DOT) jp>
+@
+@ This file is part of FFmpeg.
+@
+@ FFmpeg is free software; you can redistribute it and/or
+@ modify it under the terms of the GNU Lesser General Public
+@ License as published by the Free Software Foundation; either
+@ version 2.1 of the License, or (at your option) any later version.
+@
+@ FFmpeg is distributed in the hope that it will be useful,
+@ but WITHOUT ANY WARRANTY; without even the implied warranty of
+@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+@ Lesser General Public License for more details.
+@
+@ You should have received a copy of the GNU Lesser General Public
+@ License along with FFmpeg; if not, write to the Free Software
+@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+@
+
+.macro ADJ_ALIGN_QUADWORD_D shift, Rd0, Rd1, Rd2, Rd3, Rn0, Rn1, Rn2, Rn3, Rn4
+ mov \Rd0, \Rn0, lsr #(\shift * 8)
+ mov \Rd1, \Rn1, lsr #(\shift * 8)
+ mov \Rd2, \Rn2, lsr #(\shift * 8)
+ mov \Rd3, \Rn3, lsr #(\shift * 8)
+ orr \Rd0, \Rd0, \Rn1, lsl #(32 - \shift * 8)
+ orr \Rd1, \Rd1, \Rn2, lsl #(32 - \shift * 8)
+ orr \Rd2, \Rd2, \Rn3, lsl #(32 - \shift * 8)
+ orr \Rd3, \Rd3, \Rn4, lsl #(32 - \shift * 8)
+.endm
+.macro ADJ_ALIGN_DOUBLEWORD shift, R0, R1, R2
+ mov \R0, \R0, lsr #(\shift * 8)
+ orr \R0, \R0, \R1, lsl #(32 - \shift * 8)
+ mov \R1, \R1, lsr #(\shift * 8)
+ orr \R1, \R1, \R2, lsl #(32 - \shift * 8)
+.endm
+.macro ADJ_ALIGN_DOUBLEWORD_D shift, Rdst0, Rdst1, Rsrc0, Rsrc1, Rsrc2
+ mov \Rdst0, \Rsrc0, lsr #(\shift * 8)
+ mov \Rdst1, \Rsrc1, lsr #(\shift * 8)
+ orr \Rdst0, \Rdst0, \Rsrc1, lsl #(32 - (\shift * 8))
+ orr \Rdst1, \Rdst1, \Rsrc2, lsl #(32 - (\shift * 8))
+.endm
+
+.macro RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
+ @ Rd = (Rn | Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
+ @ Rmask = 0xFEFEFEFE
+ @ Rn = destroy
+ eor \Rd0, \Rn0, \Rm0
+ eor \Rd1, \Rn1, \Rm1
+ orr \Rn0, \Rn0, \Rm0
+ orr \Rn1, \Rn1, \Rm1
+ and \Rd0, \Rd0, \Rmask
+ and \Rd1, \Rd1, \Rmask
+ sub \Rd0, \Rn0, \Rd0, lsr #1
+ sub \Rd1, \Rn1, \Rd1, lsr #1
+.endm
+
+.macro NO_RND_AVG32 Rd0, Rd1, Rn0, Rn1, Rm0, Rm1, Rmask
+ @ Rd = (Rn & Rm) - (((Rn ^ Rm) & ~0x01010101) >> 1)
+ @ Rmask = 0xFEFEFEFE
+ @ Rn = destroy
+ eor \Rd0, \Rn0, \Rm0
+ eor \Rd1, \Rn1, \Rm1
+ and \Rn0, \Rn0, \Rm0
+ and \Rn1, \Rn1, \Rm1
+ and \Rd0, \Rd0, \Rmask
+ and \Rd1, \Rd1, \Rmask
+ add \Rd0, \Rn0, \Rd0, lsr #1
+ add \Rd1, \Rn1, \Rd1, lsr #1
+.endm
+
+@ ----------------------------------------------------------------
+ .align 8
+ .global put_pixels16_arm
+put_pixels16_arm:
+ @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+ @ block = word aligned, pixles = unaligned
+ pld [r1]
+ stmfd sp!, {r4-r11, lr} @ R14 is also called LR
+ adr r5, 5f
+ ands r4, r1, #3
+ bic r1, r1, #3
+ add r5, r5, r4, lsl #2
+ ldrne pc, [r5]
+1:
+ ldmia r1, {r4-r7}
+ add r1, r1, r2
+ stmia r0, {r4-r7}
+ pld [r1]
+ subs r3, r3, #1
+ add r0, r0, r2
+ bne 1b
+ ldmfd sp!, {r4-r11, pc}
+ .align 8
+2:
+ ldmia r1, {r4-r8}
+ add r1, r1, r2
+ ADJ_ALIGN_QUADWORD_D 1, r9, r10, r11, r12, r4, r5, r6, r7, r8
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r9-r12}
+ add r0, r0, r2
+ bne 2b
+ ldmfd sp!, {r4-r11, pc}
+ .align 8
+3:
+ ldmia r1, {r4-r8}
+ add r1, r1, r2
+ ADJ_ALIGN_QUADWORD_D 2, r9, r10, r11, r12, r4, r5, r6, r7, r8
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r9-r12}
+ add r0, r0, r2
+ bne 3b
+ ldmfd sp!, {r4-r11, pc}
+ .align 8
+4:
+ ldmia r1, {r4-r8}
+ add r1, r1, r2
+ ADJ_ALIGN_QUADWORD_D 3, r9, r10, r11, r12, r4, r5, r6, r7, r8
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r9-r12}
+ add r0, r0, r2
+ bne 4b
+ ldmfd sp!, {r4-r11,pc}
+ .align 8
+5:
+ .word 1b
+ .word 2b
+ .word 3b
+ .word 4b
+
+@ ----------------------------------------------------------------
+ .align 8
+ .global put_pixels8_arm
+put_pixels8_arm:
+ @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+ @ block = word aligned, pixles = unaligned
+ pld [r1]
+ stmfd sp!, {r4-r5,lr} @ R14 is also called LR
+ adr r5, 5f
+ ands r4, r1, #3
+ bic r1, r1, #3
+ add r5, r5, r4, lsl #2
+ ldrne pc, [r5]
+1:
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+ subs r3, r3, #1
+ pld [r1]
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 1b
+ ldmfd sp!, {r4-r5,pc}
+ .align 8
+2:
+ ldmia r1, {r4-r5, r12}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r12
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 2b
+ ldmfd sp!, {r4-r5,pc}
+ .align 8
+3:
+ ldmia r1, {r4-r5, r12}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r12
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 3b
+ ldmfd sp!, {r4-r5,pc}
+ .align 8
+4:
+ ldmia r1, {r4-r5, r12}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r12
+ pld [r1]
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 4b
+ ldmfd sp!, {r4-r5,pc}
+ .align 8
+5:
+ .word 1b
+ .word 2b
+ .word 3b
+ .word 4b
+
+@ ----------------------------------------------------------------
+ .align 8
+ .global put_pixels8_x2_arm
+put_pixels8_x2_arm:
+ @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+ @ block = word aligned, pixles = unaligned
+ pld [r1]
+ stmfd sp!, {r4-r10,lr} @ R14 is also called LR
+ adr r5, 5f
+ ands r4, r1, #3
+ ldr r12, [r5]
+ add r5, r5, r4, lsl #2
+ bic r1, r1, #3
+ ldrne pc, [r5]
+1:
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
+ pld [r1]
+ RND_AVG32 r8, r9, r4, r5, r6, r7, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 1b
+ ldmfd sp!, {r4-r10,pc}
+ .align 8
+2:
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
+ ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
+ pld [r1]
+ RND_AVG32 r4, r5, r6, r7, r8, r9, r12
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 2b
+ ldmfd sp!, {r4-r10,pc}
+ .align 8
+3:
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
+ ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
+ pld [r1]
+ RND_AVG32 r4, r5, r6, r7, r8, r9, r12
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 3b
+ ldmfd sp!, {r4-r10,pc}
+ .align 8
+4:
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
+ pld [r1]
+ RND_AVG32 r8, r9, r6, r7, r5, r10, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 4b
+ ldmfd sp!, {r4-r10,pc} @@ update PC with LR content.
+ .align 8
+5:
+ .word 0xFEFEFEFE
+ .word 2b
+ .word 3b
+ .word 4b
+
+ .align 8
+ .global put_no_rnd_pixels8_x2_arm
+put_no_rnd_pixels8_x2_arm:
+ @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+ @ block = word aligned, pixles = unaligned
+ pld [r1]
+ stmfd sp!, {r4-r10,lr} @ R14 is also called LR
+ adr r5, 5f
+ ands r4, r1, #3
+ ldr r12, [r5]
+ add r5, r5, r4, lsl #2
+ bic r1, r1, #3
+ ldrne pc, [r5]
+1:
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
+ pld [r1]
+ NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 1b
+ ldmfd sp!, {r4-r10,pc}
+ .align 8
+2:
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 1, r6, r7, r4, r5, r10
+ ADJ_ALIGN_DOUBLEWORD_D 2, r8, r9, r4, r5, r10
+ pld [r1]
+ NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 2b
+ ldmfd sp!, {r4-r10,pc}
+ .align 8
+3:
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r4, r5, r10
+ ADJ_ALIGN_DOUBLEWORD_D 3, r8, r9, r4, r5, r10
+ pld [r1]
+ NO_RND_AVG32 r4, r5, r6, r7, r8, r9, r12
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 3b
+ ldmfd sp!, {r4-r10,pc}
+ .align 8
+4:
+ ldmia r1, {r4-r5, r10}
+ add r1, r1, r2
+ ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r4, r5, r10
+ pld [r1]
+ NO_RND_AVG32 r8, r9, r6, r7, r5, r10, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 4b
+ ldmfd sp!, {r4-r10,pc} @@ update PC with LR content.
+ .align 8
+5:
+ .word 0xFEFEFEFE
+ .word 2b
+ .word 3b
+ .word 4b
+
+
+@ ----------------------------------------------------------------
+ .align 8
+ .global put_pixels8_y2_arm
+put_pixels8_y2_arm:
+ @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+ @ block = word aligned, pixles = unaligned
+ pld [r1]
+ stmfd sp!, {r4-r11,lr} @ R14 is also called LR
+ adr r5, 5f
+ ands r4, r1, #3
+ mov r3, r3, lsr #1
+ ldr r12, [r5]
+ add r5, r5, r4, lsl #2
+ bic r1, r1, #3
+ ldrne pc, [r5]
+1:
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+6: ldmia r1, {r6-r7}
+ add r1, r1, r2
+ pld [r1]
+ RND_AVG32 r8, r9, r4, r5, r6, r7, r12
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ pld [r1]
+ RND_AVG32 r8, r9, r6, r7, r4, r5, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
+ .align 8
+2:
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9
+ RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
+ subs r3, r3, #1
+ RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
+ .align 8
+3:
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9
+ RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
+ subs r3, r3, #1
+ RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
+ .align 8
+4:
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9
+ RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
+ subs r3, r3, #1
+ RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
+
+ .align 8
+5:
+ .word 0xFEFEFEFE
+ .word 2b
+ .word 3b
+ .word 4b
+
+ .align 8
+ .global put_no_rnd_pixels8_y2_arm
+put_no_rnd_pixels8_y2_arm:
+ @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+ @ block = word aligned, pixles = unaligned
+ pld [r1]
+ stmfd sp!, {r4-r11,lr} @ R14 is also called LR
+ adr r5, 5f
+ ands r4, r1, #3
+ mov r3, r3, lsr #1
+ ldr r12, [r5]
+ add r5, r5, r4, lsl #2
+ bic r1, r1, #3
+ ldrne pc, [r5]
+1:
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+6: ldmia r1, {r6-r7}
+ add r1, r1, r2
+ pld [r1]
+ NO_RND_AVG32 r8, r9, r4, r5, r6, r7, r12
+ ldmia r1, {r4-r5}
+ add r1, r1, r2
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ pld [r1]
+ NO_RND_AVG32 r8, r9, r6, r7, r4, r5, r12
+ subs r3, r3, #1
+ stmia r0, {r8-r9}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
+ .align 8
+2:
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r7, r8, r9
+ NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 1, r4, r5, r6
+ subs r3, r3, #1
+ NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
+ .align 8
+3:
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r7, r8, r9
+ NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 2, r4, r5, r6
+ subs r3, r3, #1
+ NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
+ .align 8
+4:
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
+6: ldmia r1, {r7-r9}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r7, r8, r9
+ NO_RND_AVG32 r10, r11, r4, r5, r7, r8, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ ldmia r1, {r4-r6}
+ add r1, r1, r2
+ pld [r1]
+ ADJ_ALIGN_DOUBLEWORD 3, r4, r5, r6
+ subs r3, r3, #1
+ NO_RND_AVG32 r10, r11, r7, r8, r4, r5, r12
+ stmia r0, {r10-r11}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
+ .align 8
+5:
+ .word 0xFEFEFEFE
+ .word 2b
+ .word 3b
+ .word 4b
+
+@ ----------------------------------------------------------------
+.macro RND_XY2_IT align, rnd
+ @ l1= (a & 0x03030303) + (b & 0x03030303) ?(+ 0x02020202)
+ @ h1= ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2)
+.if \align == 0
+ ldmia r1, {r6-r8}
+.elseif \align == 3
+ ldmia r1, {r5-r7}
+.else
+ ldmia r1, {r8-r10}
+.endif
+ add r1, r1, r2
+ pld [r1]
+.if \align == 0
+ ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r6, r7, r8
+.elseif \align == 1
+ ADJ_ALIGN_DOUBLEWORD_D 1, r4, r5, r8, r9, r10
+ ADJ_ALIGN_DOUBLEWORD_D 2, r6, r7, r8, r9, r10
+.elseif \align == 2
+ ADJ_ALIGN_DOUBLEWORD_D 2, r4, r5, r8, r9, r10
+ ADJ_ALIGN_DOUBLEWORD_D 3, r6, r7, r8, r9, r10
+.elseif \align == 3
+ ADJ_ALIGN_DOUBLEWORD_D 3, r4, r5, r5, r6, r7
+.endif
+ ldr r14, [r12, #0] @ 0x03030303
+ tst r3, #1
+ and r8, r4, r14
+ and r9, r5, r14
+ and r10, r6, r14
+ and r11, r7, r14
+.if \rnd == 1
+ ldreq r14, [r12, #16] @ 0x02020202
+.else
+ ldreq r14, [r12, #28] @ 0x01010101
+.endif
+ add r8, r8, r10
+ add r9, r9, r11
+ addeq r8, r8, r14
+ addeq r9, r9, r14
+ ldr r14, [r12, #20] @ 0xFCFCFCFC >> 2
+ and r4, r14, r4, lsr #2
+ and r5, r14, r5, lsr #2
+ and r6, r14, r6, lsr #2
+ and r7, r14, r7, lsr #2
+ add r10, r4, r6
+ add r11, r5, r7
+.endm
+
+.macro RND_XY2_EXPAND align, rnd
+ RND_XY2_IT \align, \rnd
+6: stmfd sp!, {r8-r11}
+ RND_XY2_IT \align, \rnd
+ ldmfd sp!, {r4-r7}
+ add r4, r4, r8
+ add r5, r5, r9
+ add r6, r6, r10
+ add r7, r7, r11
+ ldr r14, [r12, #24] @ 0x0F0F0F0F
+ and r4, r14, r4, lsr #2
+ and r5, r14, r5, lsr #2
+ add r4, r4, r6
+ add r5, r5, r7
+ subs r3, r3, #1
+ stmia r0, {r4-r5}
+ add r0, r0, r2
+ bne 6b
+ ldmfd sp!, {r4-r11,pc}
+.endm
+
+ .align 8
+ .global put_pixels8_xy2_arm
+put_pixels8_xy2_arm:
+ @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+ @ block = word aligned, pixles = unaligned
+ pld [r1]
+ stmfd sp!, {r4-r11,lr} @ R14 is also called LR
+ adrl r12, 5f
+ ands r4, r1, #3
+ add r5, r12, r4, lsl #2
+ bic r1, r1, #3
+ ldrne pc, [r5]
+1:
+ RND_XY2_EXPAND 0, 1
+
+ .align 8
+2:
+ RND_XY2_EXPAND 1, 1
+
+ .align 8
+3:
+ RND_XY2_EXPAND 2, 1
+
+ .align 8
+4:
+ RND_XY2_EXPAND 3, 1
+
+5:
+ .word 0x03030303
+ .word 2b
+ .word 3b
+ .word 4b
+ .word 0x02020202
+ .word 0xFCFCFCFC >> 2
+ .word 0x0F0F0F0F
+ .word 0x01010101
+
+ .align 8
+ .global put_no_rnd_pixels8_xy2_arm
+put_no_rnd_pixels8_xy2_arm:
+ @ void func(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+ @ block = word aligned, pixles = unaligned
+ pld [r1]
+ stmfd sp!, {r4-r11,lr} @ R14 is also called LR
+ adrl r12, 5f
+ ands r4, r1, #3
+ add r5, r12, r4, lsl #2
+ bic r1, r1, #3
+ ldrne pc, [r5]
+1:
+ RND_XY2_EXPAND 0, 0
+
+ .align 8
+2:
+ RND_XY2_EXPAND 1, 0
+
+ .align 8
+3:
+ RND_XY2_EXPAND 2, 0
+
+ .align 8
+4:
+ RND_XY2_EXPAND 3, 0
+
+5:
+ .word 0x03030303
+ .word 2b
+ .word 3b
+ .word 4b
+ .word 0x02020202
+ .word 0xFCFCFCFC >> 2
+ .word 0x0F0F0F0F
+ .word 0x01010101
diff --git a/contrib/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt.c b/contrib/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt.c
new file mode 100644
index 000000000..d7401e760
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt.c
@@ -0,0 +1,188 @@
+/*
+ * iWMMXt optimized DSP utils
+ * Copyright (c) 2004 AGAWA Koji
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../dsputil.h"
+
+#define DEF(x, y) x ## _no_rnd_ ## y ##_iwmmxt
+#define SET_RND(regd) __asm__ __volatile__ ("mov r12, #1 \n\t tbcsth " #regd ", r12":::"r12");
+#define WAVG2B "wavg2b"
+#include "dsputil_iwmmxt_rnd.h"
+#undef DEF
+#undef SET_RND
+#undef WAVG2B
+
+#define DEF(x, y) x ## _ ## y ##_iwmmxt
+#define SET_RND(regd) __asm__ __volatile__ ("mov r12, #2 \n\t tbcsth " #regd ", r12":::"r12");
+#define WAVG2B "wavg2br"
+#include "dsputil_iwmmxt_rnd.h"
+#undef DEF
+#undef SET_RND
+#undef WAVG2BR
+
+// need scheduling
+#define OP(AVG) \
+ asm volatile ( \
+ /* alignment */ \
+ "and r12, %[pixels], #7 \n\t" \
+ "bic %[pixels], %[pixels], #7 \n\t" \
+ "tmcr wcgr1, r12 \n\t" \
+ \
+ "wldrd wr0, [%[pixels]] \n\t" \
+ "wldrd wr1, [%[pixels], #8] \n\t" \
+ "add %[pixels], %[pixels], %[line_size] \n\t" \
+ "walignr1 wr4, wr0, wr1 \n\t" \
+ \
+ "1: \n\t" \
+ \
+ "wldrd wr2, [%[pixels]] \n\t" \
+ "wldrd wr3, [%[pixels], #8] \n\t" \
+ "add %[pixels], %[pixels], %[line_size] \n\t" \
+ "pld [%[pixels]] \n\t" \
+ "walignr1 wr5, wr2, wr3 \n\t" \
+ AVG " wr6, wr4, wr5 \n\t" \
+ "wstrd wr6, [%[block]] \n\t" \
+ "add %[block], %[block], %[line_size] \n\t" \
+ \
+ "wldrd wr0, [%[pixels]] \n\t" \
+ "wldrd wr1, [%[pixels], #8] \n\t" \
+ "add %[pixels], %[pixels], %[line_size] \n\t" \
+ "walignr1 wr4, wr0, wr1 \n\t" \
+ "pld [%[pixels]] \n\t" \
+ AVG " wr6, wr4, wr5 \n\t" \
+ "wstrd wr6, [%[block]] \n\t" \
+ "add %[block], %[block], %[line_size] \n\t" \
+ \
+ "subs %[h], %[h], #2 \n\t" \
+ "bne 1b \n\t" \
+ : [block]"+r"(block), [pixels]"+r"(pixels), [h]"+r"(h) \
+ : [line_size]"r"(line_size) \
+ : "memory", "r12");
+void put_pixels8_y2_iwmmxt(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ OP("wavg2br");
+}
+void put_no_rnd_pixels8_y2_iwmmxt(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ OP("wavg2b");
+}
+#undef OP
+
+void add_pixels_clamped_iwmmxt(const DCTELEM *block, uint8_t *pixels, int line_size)
+{
+ uint8_t *pixels2 = pixels + line_size;
+
+ __asm__ __volatile__ (
+ "mov r12, #4 \n\t"
+ "1: \n\t"
+ "pld [%[pixels], %[line_size2]] \n\t"
+ "pld [%[pixels2], %[line_size2]] \n\t"
+ "wldrd wr4, [%[pixels]] \n\t"
+ "wldrd wr5, [%[pixels2]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "wunpckelub wr6, wr4 \n\t"
+ "wldrd wr0, [%[block]] \n\t"
+ "wunpckehub wr7, wr4 \n\t"
+ "wldrd wr1, [%[block], #8] \n\t"
+ "wunpckelub wr8, wr5 \n\t"
+ "wldrd wr2, [%[block], #16] \n\t"
+ "wunpckehub wr9, wr5 \n\t"
+ "wldrd wr3, [%[block], #24] \n\t"
+ "add %[block], %[block], #32 \n\t"
+ "waddhss wr10, wr0, wr6 \n\t"
+ "waddhss wr11, wr1, wr7 \n\t"
+ "waddhss wr12, wr2, wr8 \n\t"
+ "waddhss wr13, wr3, wr9 \n\t"
+ "wpackhus wr14, wr10, wr11 \n\t"
+ "wpackhus wr15, wr12, wr13 \n\t"
+ "wstrd wr14, [%[pixels]] \n\t"
+ "add %[pixels], %[pixels], %[line_size2] \n\t"
+ "subs r12, r12, #1 \n\t"
+ "wstrd wr15, [%[pixels2]] \n\t"
+ "add %[pixels2], %[pixels2], %[line_size2] \n\t"
+ "bne 1b \n\t"
+ : [block]"+r"(block), [pixels]"+r"(pixels), [pixels2]"+r"(pixels2)
+ : [line_size2]"r"(line_size << 1)
+ : "cc", "memory", "r12");
+}
+
+static void nop(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ return;
+}
+
+int mm_flags; /* multimedia extension flags */
+
+int mm_support(void)
+{
+ return 0; /* TODO, implement proper detection */
+}
+
+void dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx)
+{
+ mm_flags = mm_support();
+
+ if (avctx->dsp_mask) {
+ if (avctx->dsp_mask & FF_MM_FORCE)
+ mm_flags |= (avctx->dsp_mask & 0xffff);
+ else
+ mm_flags &= ~(avctx->dsp_mask & 0xffff);
+ }
+
+ if (!(mm_flags & MM_IWMMXT)) return;
+
+ c->add_pixels_clamped = add_pixels_clamped_iwmmxt;
+
+ c->put_pixels_tab[0][0] = put_pixels16_iwmmxt;
+ c->put_pixels_tab[0][1] = put_pixels16_x2_iwmmxt;
+ c->put_pixels_tab[0][2] = put_pixels16_y2_iwmmxt;
+ c->put_pixels_tab[0][3] = put_pixels16_xy2_iwmmxt;
+ c->put_no_rnd_pixels_tab[0][0] = put_pixels16_iwmmxt;
+ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_iwmmxt;
+ c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_iwmmxt;
+ c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_iwmmxt;
+
+ c->put_pixels_tab[1][0] = put_pixels8_iwmmxt;
+ c->put_pixels_tab[1][1] = put_pixels8_x2_iwmmxt;
+ c->put_pixels_tab[1][2] = put_pixels8_y2_iwmmxt;
+ c->put_pixels_tab[1][3] = put_pixels8_xy2_iwmmxt;
+ c->put_no_rnd_pixels_tab[1][0] = put_pixels8_iwmmxt;
+ c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_iwmmxt;
+ c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_iwmmxt;
+ c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_iwmmxt;
+
+ c->avg_pixels_tab[0][0] = avg_pixels16_iwmmxt;
+ c->avg_pixels_tab[0][1] = avg_pixels16_x2_iwmmxt;
+ c->avg_pixels_tab[0][2] = avg_pixels16_y2_iwmmxt;
+ c->avg_pixels_tab[0][3] = avg_pixels16_xy2_iwmmxt;
+ c->avg_no_rnd_pixels_tab[0][0] = avg_pixels16_iwmmxt;
+ c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_iwmmxt;
+ c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_iwmmxt;
+ c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_iwmmxt;
+
+ c->avg_pixels_tab[1][0] = avg_pixels8_iwmmxt;
+ c->avg_pixels_tab[1][1] = avg_pixels8_x2_iwmmxt;
+ c->avg_pixels_tab[1][2] = avg_pixels8_y2_iwmmxt;
+ c->avg_pixels_tab[1][3] = avg_pixels8_xy2_iwmmxt;
+ c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_iwmmxt;
+ c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_iwmmxt;
+ c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_iwmmxt;
+ c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_iwmmxt;
+}
diff --git a/contrib/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt_rnd.h b/contrib/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt_rnd.h
new file mode 100644
index 000000000..51ba61c47
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/armv4l/dsputil_iwmmxt_rnd.h
@@ -0,0 +1,1114 @@
+/*
+ * iWMMXt optimized DSP utils
+ * copyright (c) 2004 AGAWA Koji
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ __asm__ __volatile__ (
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r4, %[pixels], %[line_size] \n\t"
+ "add r5, %[block], %[line_size] \n\t"
+ "mov %[line_size], %[line_size], lsl #1 \n\t"
+ "1: \n\t"
+ "wldrd wr0, [%[pixels]] \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "wldrd wr1, [%[pixels], #8] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "wldrd wr3, [r4] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "wldrd wr4, [r4, #8] \n\t"
+ "add r4, r4, %[line_size] \n\t"
+ "walignr1 wr8, wr0, wr1 \n\t"
+ "pld [r4] \n\t"
+ "pld [r4, #32] \n\t"
+ "walignr1 wr10, wr3, wr4 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "wstrd wr10, [r5] \n\t"
+ "add r5, r5, %[line_size] \n\t"
+ "bne 1b \n\t"
+ : [block]"+r"(block), [pixels]"+r"(pixels), [line_size]"+r"(stride), [h]"+r"(h)
+ :
+ : "memory", "r4", "r5", "r12");
+}
+
+void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ __asm__ __volatile__ (
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r4, %[pixels], %[line_size] \n\t"
+ "add r5, %[block], %[line_size] \n\t"
+ "mov %[line_size], %[line_size], lsl #1 \n\t"
+ "1: \n\t"
+ "wldrd wr0, [%[pixels]] \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "wldrd wr1, [%[pixels], #8] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "wldrd wr3, [r4] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "wldrd wr4, [r4, #8] \n\t"
+ "add r4, r4, %[line_size] \n\t"
+ "walignr1 wr8, wr0, wr1 \n\t"
+ "wldrd wr0, [%[block]] \n\t"
+ "wldrd wr2, [r5] \n\t"
+ "pld [r4] \n\t"
+ "pld [r4, #32] \n\t"
+ "walignr1 wr10, wr3, wr4 \n\t"
+ WAVG2B" wr8, wr8, wr0 \n\t"
+ WAVG2B" wr10, wr10, wr2 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "wstrd wr10, [r5] \n\t"
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "add r5, r5, %[line_size] \n\t"
+ "pld [r5] \n\t"
+ "pld [r5, #32] \n\t"
+ "bne 1b \n\t"
+ : [block]"+r"(block), [pixels]"+r"(pixels), [line_size]"+r"(stride), [h]"+r"(h)
+ :
+ : "memory", "r4", "r5", "r12");
+}
+
+void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ __asm__ __volatile__ (
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r4, %[pixels], %[line_size] \n\t"
+ "add r5, %[block], %[line_size] \n\t"
+ "mov %[line_size], %[line_size], lsl #1 \n\t"
+ "1: \n\t"
+ "wldrd wr0, [%[pixels]] \n\t"
+ "wldrd wr1, [%[pixels], #8] \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "wldrd wr2, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "wldrd wr3, [r4] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr8, wr0, wr1 \n\t"
+ "wldrd wr4, [r4, #8] \n\t"
+ "walignr1 wr9, wr1, wr2 \n\t"
+ "wldrd wr5, [r4, #16] \n\t"
+ "add r4, r4, %[line_size] \n\t"
+ "pld [r4] \n\t"
+ "pld [r4, #32] \n\t"
+ "walignr1 wr10, wr3, wr4 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "walignr1 wr11, wr4, wr5 \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "wstrd wr10, [r5] \n\t"
+ "wstrd wr11, [r5, #8] \n\t"
+ "add r5, r5, %[line_size] \n\t"
+ "bne 1b \n\t"
+ : [block]"+r"(block), [pixels]"+r"(pixels), [line_size]"+r"(stride), [h]"+r"(h)
+ :
+ : "memory", "r4", "r5", "r12");
+}
+
+void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ __asm__ __volatile__ (
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r4, %[pixels], %[line_size]\n\t"
+ "add r5, %[block], %[line_size] \n\t"
+ "mov %[line_size], %[line_size], lsl #1 \n\t"
+ "1: \n\t"
+ "wldrd wr0, [%[pixels]] \n\t"
+ "wldrd wr1, [%[pixels], #8] \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "wldrd wr2, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "wldrd wr3, [r4] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr8, wr0, wr1 \n\t"
+ "wldrd wr4, [r4, #8] \n\t"
+ "walignr1 wr9, wr1, wr2 \n\t"
+ "wldrd wr5, [r4, #16] \n\t"
+ "add r4, r4, %[line_size] \n\t"
+ "wldrd wr0, [%[block]] \n\t"
+ "pld [r4] \n\t"
+ "wldrd wr1, [%[block], #8] \n\t"
+ "pld [r4, #32] \n\t"
+ "wldrd wr2, [r5] \n\t"
+ "walignr1 wr10, wr3, wr4 \n\t"
+ "wldrd wr3, [r5, #8] \n\t"
+ WAVG2B" wr8, wr8, wr0 \n\t"
+ WAVG2B" wr9, wr9, wr1 \n\t"
+ WAVG2B" wr10, wr10, wr2 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "walignr1 wr11, wr4, wr5 \n\t"
+ WAVG2B" wr11, wr11, wr3 \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "wstrd wr10, [r5] \n\t"
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "wstrd wr11, [r5, #8] \n\t"
+ "add r5, r5, %[line_size] \n\t"
+ "pld [r5] \n\t"
+ "pld [r5, #32] \n\t"
+ "bne 1b \n\t"
+ : [block]"+r"(block), [pixels]"+r"(pixels), [line_size]"+r"(stride), [h]"+r"(h)
+ :
+ : "memory", "r4", "r5", "r12");
+}
+
+void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
+ __asm__ __volatile__(
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r12, r12, #1 \n\t"
+ "add r4, %[pixels], %[line_size]\n\t"
+ "tmcr wcgr2, r12 \n\t"
+ "add r5, %[block], %[line_size] \n\t"
+ "mov %[line_size], %[line_size], lsl #1 \n\t"
+
+ "1: \n\t"
+ "wldrd wr10, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "wldrd wr13, [r4] \n\t"
+ "pld [%[pixels]] \n\t"
+ "wldrd wr14, [r4, #8] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "add r4, r4, %[line_size] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "pld [r4] \n\t"
+ "pld [r4, #32] \n\t"
+ "walignr1 wr2, wr13, wr14 \n\t"
+ "wmoveq wr4, wr11 \n\t"
+ "wmoveq wr6, wr14 \n\t"
+ "walignr2ne wr4, wr10, wr11 \n\t"
+ "walignr2ne wr6, wr13, wr14 \n\t"
+ WAVG2B" wr0, wr0, wr4 \n\t"
+ WAVG2B" wr2, wr2, wr6 \n\t"
+ "wstrd wr0, [%[block]] \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "wstrd wr2, [r5] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "add r5, r5, %[line_size] \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block), [line_size]"+r"(stride)
+ :
+ : "r4", "r5", "r12", "memory");
+}
+
+void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
+ __asm__ __volatile__(
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r12, r12, #1 \n\t"
+ "add r4, %[pixels], %[line_size]\n\t"
+ "tmcr wcgr2, r12 \n\t"
+ "add r5, %[block], %[line_size] \n\t"
+ "mov %[line_size], %[line_size], lsl #1 \n\t"
+
+ "1: \n\t"
+ "wldrd wr10, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "wldrd wr12, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "wldrd wr13, [r4] \n\t"
+ "pld [%[pixels]] \n\t"
+ "wldrd wr14, [r4, #8] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "wldrd wr15, [r4, #16] \n\t"
+ "add r4, r4, %[line_size] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "pld [r4] \n\t"
+ "pld [r4, #32] \n\t"
+ "walignr1 wr1, wr11, wr12 \n\t"
+ "walignr1 wr2, wr13, wr14 \n\t"
+ "walignr1 wr3, wr14, wr15 \n\t"
+ "wmoveq wr4, wr11 \n\t"
+ "wmoveq wr5, wr12 \n\t"
+ "wmoveq wr6, wr14 \n\t"
+ "wmoveq wr7, wr15 \n\t"
+ "walignr2ne wr4, wr10, wr11 \n\t"
+ "walignr2ne wr5, wr11, wr12 \n\t"
+ "walignr2ne wr6, wr13, wr14 \n\t"
+ "walignr2ne wr7, wr14, wr15 \n\t"
+ WAVG2B" wr0, wr0, wr4 \n\t"
+ WAVG2B" wr1, wr1, wr5 \n\t"
+ "wstrd wr0, [%[block]] \n\t"
+ WAVG2B" wr2, wr2, wr6 \n\t"
+ "wstrd wr1, [%[block], #8] \n\t"
+ WAVG2B" wr3, wr3, wr7 \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "wstrd wr2, [r5] \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "wstrd wr3, [r5, #8] \n\t"
+ "add r5, r5, %[line_size] \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block), [line_size]"+r"(stride)
+ :
+ : "r4", "r5", "r12", "memory");
+}
+
+void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
+ __asm__ __volatile__(
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r12, r12, #1 \n\t"
+ "add r4, %[pixels], %[line_size]\n\t"
+ "tmcr wcgr2, r12 \n\t"
+ "add r5, %[block], %[line_size] \n\t"
+ "mov %[line_size], %[line_size], lsl #1 \n\t"
+ "pld [r5] \n\t"
+ "pld [r5, #32] \n\t"
+
+ "1: \n\t"
+ "wldrd wr10, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "wldrd wr13, [r4] \n\t"
+ "pld [%[pixels]] \n\t"
+ "wldrd wr14, [r4, #8] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "add r4, r4, %[line_size] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "pld [r4] \n\t"
+ "pld [r4, #32] \n\t"
+ "walignr1 wr2, wr13, wr14 \n\t"
+ "wmoveq wr4, wr11 \n\t"
+ "wmoveq wr6, wr14 \n\t"
+ "walignr2ne wr4, wr10, wr11 \n\t"
+ "wldrd wr10, [%[block]] \n\t"
+ "walignr2ne wr6, wr13, wr14 \n\t"
+ "wldrd wr12, [r5] \n\t"
+ WAVG2B" wr0, wr0, wr4 \n\t"
+ WAVG2B" wr2, wr2, wr6 \n\t"
+ WAVG2B" wr0, wr0, wr10 \n\t"
+ WAVG2B" wr2, wr2, wr12 \n\t"
+ "wstrd wr0, [%[block]] \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "wstrd wr2, [r5] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "add r5, r5, %[line_size] \n\t"
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "pld [r5] \n\t"
+ "pld [r5, #32] \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block), [line_size]"+r"(stride)
+ :
+ : "r4", "r5", "r12", "memory");
+}
+
+void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
+ __asm__ __volatile__(
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r12, r12, #1 \n\t"
+ "add r4, %[pixels], %[line_size]\n\t"
+ "tmcr wcgr2, r12 \n\t"
+ "add r5, %[block], %[line_size] \n\t"
+ "mov %[line_size], %[line_size], lsl #1 \n\t"
+ "pld [r5] \n\t"
+ "pld [r5, #32] \n\t"
+
+ "1: \n\t"
+ "wldrd wr10, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "wldrd wr12, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "wldrd wr13, [r4] \n\t"
+ "pld [%[pixels]] \n\t"
+ "wldrd wr14, [r4, #8] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "wldrd wr15, [r4, #16] \n\t"
+ "add r4, r4, %[line_size] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "pld [r4] \n\t"
+ "pld [r4, #32] \n\t"
+ "walignr1 wr1, wr11, wr12 \n\t"
+ "walignr1 wr2, wr13, wr14 \n\t"
+ "walignr1 wr3, wr14, wr15 \n\t"
+ "wmoveq wr4, wr11 \n\t"
+ "wmoveq wr5, wr12 \n\t"
+ "wmoveq wr6, wr14 \n\t"
+ "wmoveq wr7, wr15 \n\t"
+ "walignr2ne wr4, wr10, wr11 \n\t"
+ "walignr2ne wr5, wr11, wr12 \n\t"
+ "walignr2ne wr6, wr13, wr14 \n\t"
+ "walignr2ne wr7, wr14, wr15 \n\t"
+ "wldrd wr10, [%[block]] \n\t"
+ WAVG2B" wr0, wr0, wr4 \n\t"
+ "wldrd wr11, [%[block], #8] \n\t"
+ WAVG2B" wr1, wr1, wr5 \n\t"
+ "wldrd wr12, [r5] \n\t"
+ WAVG2B" wr2, wr2, wr6 \n\t"
+ "wldrd wr13, [r5, #8] \n\t"
+ WAVG2B" wr3, wr3, wr7 \n\t"
+ WAVG2B" wr0, wr0, wr10 \n\t"
+ WAVG2B" wr1, wr1, wr11 \n\t"
+ WAVG2B" wr2, wr2, wr12 \n\t"
+ WAVG2B" wr3, wr3, wr13 \n\t"
+ "wstrd wr0, [%[block]] \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "wstrd wr1, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "wstrd wr2, [r5] \n\t"
+ "pld [%[block]] \n\t"
+ "wstrd wr3, [r5, #8] \n\t"
+ "add r5, r5, %[line_size] \n\t"
+ "pld [%[block], #32] \n\t"
+ "pld [r5] \n\t"
+ "pld [r5, #32] \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block), [line_size]"+r"(stride)
+ :
+ :"r4", "r5", "r12", "memory");
+}
+
+void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ __asm__ __volatile__(
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "and r12, %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+
+ "wldrd wr10, [%[pixels]] \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "pld [%[block]] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+
+ "1: \n\t"
+ "wldrd wr10, [%[pixels]] \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr4, wr10, wr11 \n\t"
+ "wldrd wr10, [%[block]] \n\t"
+ WAVG2B" wr8, wr0, wr4 \n\t"
+ WAVG2B" wr8, wr8, wr10 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ "wldrd wr10, [%[pixels]] \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "pld [%[block]] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "wldrd wr10, [%[block]] \n\t"
+ WAVG2B" wr8, wr0, wr4 \n\t"
+ WAVG2B" wr8, wr8, wr10 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ "subs %[h], %[h], #2 \n\t"
+ "pld [%[block]] \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block), [line_size]"+r"(stride)
+ :
+ : "cc", "memory", "r12");
+}
+
+void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ __asm__ __volatile__(
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "and r12, %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+
+ "wldrd wr10, [%[pixels]] \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "wldrd wr12, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "walignr1 wr1, wr11, wr12 \n\t"
+
+ "1: \n\t"
+ "wldrd wr10, [%[pixels]] \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "wldrd wr12, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr4, wr10, wr11 \n\t"
+ "walignr1 wr5, wr11, wr12 \n\t"
+ WAVG2B" wr8, wr0, wr4 \n\t"
+ WAVG2B" wr9, wr1, wr5 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ "wldrd wr10, [%[pixels]] \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "wldrd wr12, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "walignr1 wr1, wr11, wr12 \n\t"
+ WAVG2B" wr8, wr0, wr4 \n\t"
+ WAVG2B" wr9, wr1, wr5 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ "subs %[h], %[h], #2 \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block), [line_size]"+r"(stride)
+ :
+ : "r4", "r5", "r12", "memory");
+}
+
+void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ int stride = line_size;
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ __asm__ __volatile__(
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "and r12, %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+
+ "wldrd wr10, [%[pixels]] \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "pld [%[block]] \n\t"
+ "wldrd wr12, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "walignr1 wr1, wr11, wr12 \n\t"
+
+ "1: \n\t"
+ "wldrd wr10, [%[pixels]] \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "wldrd wr12, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr4, wr10, wr11 \n\t"
+ "walignr1 wr5, wr11, wr12 \n\t"
+ "wldrd wr10, [%[block]] \n\t"
+ "wldrd wr11, [%[block], #8] \n\t"
+ WAVG2B" wr8, wr0, wr4 \n\t"
+ WAVG2B" wr9, wr1, wr5 \n\t"
+ WAVG2B" wr8, wr8, wr10 \n\t"
+ WAVG2B" wr9, wr9, wr11 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ "wldrd wr10, [%[pixels]] \n\t"
+ "wldrd wr11, [%[pixels], #8] \n\t"
+ "pld [%[block]] \n\t"
+ "wldrd wr12, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr0, wr10, wr11 \n\t"
+ "walignr1 wr1, wr11, wr12 \n\t"
+ "wldrd wr10, [%[block]] \n\t"
+ "wldrd wr11, [%[block], #8] \n\t"
+ WAVG2B" wr8, wr0, wr4 \n\t"
+ WAVG2B" wr9, wr1, wr5 \n\t"
+ WAVG2B" wr8, wr8, wr10 \n\t"
+ WAVG2B" wr9, wr9, wr11 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ "subs %[h], %[h], #2 \n\t"
+ "pld [%[block]] \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block), [line_size]"+r"(stride)
+ :
+ : "r4", "r5", "r12", "memory");
+}
+
+void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
+ __asm__ __volatile__(
+ "pld [%[pixels]] \n\t"
+ "mov r12, #2 \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "tmcr wcgr0, r12 \n\t" /* for shift value */
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+
+ // [wr0 wr1 wr2 wr3] <= *
+ // [wr4 wr5 wr6 wr7]
+ "wldrd wr12, [%[pixels]] \n\t"
+ "add r12, r12, #1 \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "tmcr wcgr2, r12 \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "cmp r12, #8 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr2, wr12, wr13 \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "wunpckelub wr0, wr2 \n\t"
+ "wunpckehub wr1, wr2 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "waddhus wr0, wr0, wr8 \n\t"
+ "waddhus wr1, wr1, wr9 \n\t"
+
+ "1: \n\t"
+ // [wr0 wr1 wr2 wr3]
+ // [wr4 wr5 wr6 wr7] <= *
+ "wldrd wr12, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "walignr1 wr6, wr12, wr13 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "wunpckelub wr4, wr6 \n\t"
+ "wunpckehub wr5, wr6 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "waddhus wr4, wr4, wr8 \n\t"
+ "waddhus wr5, wr5, wr9 \n\t"
+ "waddhus wr8, wr0, wr4 \n\t"
+ "waddhus wr9, wr1, wr5 \n\t"
+ "waddhus wr8, wr8, wr15 \n\t"
+ "waddhus wr9, wr9, wr15 \n\t"
+ "wsrlhg wr8, wr8, wcgr0 \n\t"
+ "wsrlhg wr9, wr9, wcgr0 \n\t"
+ "wpackhus wr8, wr8, wr9 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ // [wr0 wr1 wr2 wr3] <= *
+ // [wr4 wr5 wr6 wr7]
+ "wldrd wr12, [%[pixels]] \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "walignr1 wr2, wr12, wr13 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "wunpckelub wr0, wr2 \n\t"
+ "wunpckehub wr1, wr2 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "waddhus wr0, wr0, wr8 \n\t"
+ "waddhus wr1, wr1, wr9 \n\t"
+ "waddhus wr8, wr0, wr4 \n\t"
+ "waddhus wr9, wr1, wr5 \n\t"
+ "waddhus wr8, wr8, wr15 \n\t"
+ "waddhus wr9, wr9, wr15 \n\t"
+ "wsrlhg wr8, wr8, wcgr0 \n\t"
+ "wsrlhg wr9, wr9, wcgr0 \n\t"
+ "wpackhus wr8, wr8, wr9 \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block)
+ : [line_size]"r"(line_size)
+ : "r12", "memory");
+}
+
+void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
+ __asm__ __volatile__(
+ "pld [%[pixels]] \n\t"
+ "mov r12, #2 \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "tmcr wcgr0, r12 \n\t" /* for shift value */
+ /* alignment */
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r12, r12, #1 \n\t"
+ "tmcr wcgr2, r12 \n\t"
+
+ // [wr0 wr1 wr2 wr3] <= *
+ // [wr4 wr5 wr6 wr7]
+ "wldrd wr12, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "wldrd wr14, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "walignr1 wr2, wr12, wr13 \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr3, wr13, wr14 \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "wmoveq wr11, wr14 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "walignr2ne wr11, wr13, wr14 \n\t"
+ "wunpckelub wr0, wr2 \n\t"
+ "wunpckehub wr1, wr2 \n\t"
+ "wunpckelub wr2, wr3 \n\t"
+ "wunpckehub wr3, wr3 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "wunpckelub wr10, wr11 \n\t"
+ "wunpckehub wr11, wr11 \n\t"
+ "waddhus wr0, wr0, wr8 \n\t"
+ "waddhus wr1, wr1, wr9 \n\t"
+ "waddhus wr2, wr2, wr10 \n\t"
+ "waddhus wr3, wr3, wr11 \n\t"
+
+ "1: \n\t"
+ // [wr0 wr1 wr2 wr3]
+ // [wr4 wr5 wr6 wr7] <= *
+ "wldrd wr12, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "wldrd wr14, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "walignr1 wr6, wr12, wr13 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr7, wr13, wr14 \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "wmoveq wr11, wr14 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "walignr2ne wr11, wr13, wr14 \n\t"
+ "wunpckelub wr4, wr6 \n\t"
+ "wunpckehub wr5, wr6 \n\t"
+ "wunpckelub wr6, wr7 \n\t"
+ "wunpckehub wr7, wr7 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "wunpckelub wr10, wr11 \n\t"
+ "wunpckehub wr11, wr11 \n\t"
+ "waddhus wr4, wr4, wr8 \n\t"
+ "waddhus wr5, wr5, wr9 \n\t"
+ "waddhus wr6, wr6, wr10 \n\t"
+ "waddhus wr7, wr7, wr11 \n\t"
+ "waddhus wr8, wr0, wr4 \n\t"
+ "waddhus wr9, wr1, wr5 \n\t"
+ "waddhus wr10, wr2, wr6 \n\t"
+ "waddhus wr11, wr3, wr7 \n\t"
+ "waddhus wr8, wr8, wr15 \n\t"
+ "waddhus wr9, wr9, wr15 \n\t"
+ "waddhus wr10, wr10, wr15 \n\t"
+ "waddhus wr11, wr11, wr15 \n\t"
+ "wsrlhg wr8, wr8, wcgr0 \n\t"
+ "wsrlhg wr9, wr9, wcgr0 \n\t"
+ "wsrlhg wr10, wr10, wcgr0 \n\t"
+ "wsrlhg wr11, wr11, wcgr0 \n\t"
+ "wpackhus wr8, wr8, wr9 \n\t"
+ "wpackhus wr9, wr10, wr11 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ // [wr0 wr1 wr2 wr3] <= *
+ // [wr4 wr5 wr6 wr7]
+ "wldrd wr12, [%[pixels]] \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "wldrd wr14, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "walignr1 wr2, wr12, wr13 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr3, wr13, wr14 \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "wmoveq wr11, wr14 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "walignr2ne wr11, wr13, wr14 \n\t"
+ "wunpckelub wr0, wr2 \n\t"
+ "wunpckehub wr1, wr2 \n\t"
+ "wunpckelub wr2, wr3 \n\t"
+ "wunpckehub wr3, wr3 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "wunpckelub wr10, wr11 \n\t"
+ "wunpckehub wr11, wr11 \n\t"
+ "waddhus wr0, wr0, wr8 \n\t"
+ "waddhus wr1, wr1, wr9 \n\t"
+ "waddhus wr2, wr2, wr10 \n\t"
+ "waddhus wr3, wr3, wr11 \n\t"
+ "waddhus wr8, wr0, wr4 \n\t"
+ "waddhus wr9, wr1, wr5 \n\t"
+ "waddhus wr10, wr2, wr6 \n\t"
+ "waddhus wr11, wr3, wr7 \n\t"
+ "waddhus wr8, wr8, wr15 \n\t"
+ "waddhus wr9, wr9, wr15 \n\t"
+ "waddhus wr10, wr10, wr15 \n\t"
+ "waddhus wr11, wr11, wr15 \n\t"
+ "wsrlhg wr8, wr8, wcgr0 \n\t"
+ "wsrlhg wr9, wr9, wcgr0 \n\t"
+ "wsrlhg wr10, wr10, wcgr0 \n\t"
+ "wsrlhg wr11, wr11, wcgr0 \n\t"
+ "wpackhus wr8, wr8, wr9 \n\t"
+ "wpackhus wr9, wr10, wr11 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ "subs %[h], %[h], #2 \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block)
+ : [line_size]"r"(line_size)
+ : "r12", "memory");
+}
+
+void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
+ __asm__ __volatile__(
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "pld [%[pixels]] \n\t"
+ "mov r12, #2 \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "tmcr wcgr0, r12 \n\t" /* for shift value */
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+
+ // [wr0 wr1 wr2 wr3] <= *
+ // [wr4 wr5 wr6 wr7]
+ "wldrd wr12, [%[pixels]] \n\t"
+ "add r12, r12, #1 \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "tmcr wcgr2, r12 \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "cmp r12, #8 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr2, wr12, wr13 \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "wunpckelub wr0, wr2 \n\t"
+ "wunpckehub wr1, wr2 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "waddhus wr0, wr0, wr8 \n\t"
+ "waddhus wr1, wr1, wr9 \n\t"
+
+ "1: \n\t"
+ // [wr0 wr1 wr2 wr3]
+ // [wr4 wr5 wr6 wr7] <= *
+ "wldrd wr12, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "walignr1 wr6, wr12, wr13 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "wunpckelub wr4, wr6 \n\t"
+ "wunpckehub wr5, wr6 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "waddhus wr4, wr4, wr8 \n\t"
+ "waddhus wr5, wr5, wr9 \n\t"
+ "waddhus wr8, wr0, wr4 \n\t"
+ "waddhus wr9, wr1, wr5 \n\t"
+ "waddhus wr8, wr8, wr15 \n\t"
+ "waddhus wr9, wr9, wr15 \n\t"
+ "wldrd wr12, [%[block]] \n\t"
+ "wsrlhg wr8, wr8, wcgr0 \n\t"
+ "wsrlhg wr9, wr9, wcgr0 \n\t"
+ "wpackhus wr8, wr8, wr9 \n\t"
+ WAVG2B" wr8, wr8, wr12 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "wldrd wr12, [%[pixels]] \n\t"
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+
+ // [wr0 wr1 wr2 wr3] <= *
+ // [wr4 wr5 wr6 wr7]
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "walignr1 wr2, wr12, wr13 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "wunpckelub wr0, wr2 \n\t"
+ "wunpckehub wr1, wr2 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "waddhus wr0, wr0, wr8 \n\t"
+ "waddhus wr1, wr1, wr9 \n\t"
+ "waddhus wr8, wr0, wr4 \n\t"
+ "waddhus wr9, wr1, wr5 \n\t"
+ "waddhus wr8, wr8, wr15 \n\t"
+ "waddhus wr9, wr9, wr15 \n\t"
+ "wldrd wr12, [%[block]] \n\t"
+ "wsrlhg wr8, wr8, wcgr0 \n\t"
+ "wsrlhg wr9, wr9, wcgr0 \n\t"
+ "wpackhus wr8, wr8, wr9 \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ WAVG2B" wr8, wr8, wr12 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block)
+ : [line_size]"r"(line_size)
+ : "r12", "memory");
+}
+
+void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
+{
+ // [wr0 wr1 wr2 wr3] for previous line
+ // [wr4 wr5 wr6 wr7] for current line
+ SET_RND(wr15); // =2 for rnd and =1 for no_rnd version
+ __asm__ __volatile__(
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "pld [%[pixels]] \n\t"
+ "mov r12, #2 \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "tmcr wcgr0, r12 \n\t" /* for shift value */
+ /* alignment */
+ "and r12, %[pixels], #7 \n\t"
+ "bic %[pixels], %[pixels], #7 \n\t"
+ "tmcr wcgr1, r12 \n\t"
+ "add r12, r12, #1 \n\t"
+ "tmcr wcgr2, r12 \n\t"
+
+ // [wr0 wr1 wr2 wr3] <= *
+ // [wr4 wr5 wr6 wr7]
+ "wldrd wr12, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "wldrd wr14, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "pld [%[pixels]] \n\t"
+ "walignr1 wr2, wr12, wr13 \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr3, wr13, wr14 \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "wmoveq wr11, wr14 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "walignr2ne wr11, wr13, wr14 \n\t"
+ "wunpckelub wr0, wr2 \n\t"
+ "wunpckehub wr1, wr2 \n\t"
+ "wunpckelub wr2, wr3 \n\t"
+ "wunpckehub wr3, wr3 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "wunpckelub wr10, wr11 \n\t"
+ "wunpckehub wr11, wr11 \n\t"
+ "waddhus wr0, wr0, wr8 \n\t"
+ "waddhus wr1, wr1, wr9 \n\t"
+ "waddhus wr2, wr2, wr10 \n\t"
+ "waddhus wr3, wr3, wr11 \n\t"
+
+ "1: \n\t"
+ // [wr0 wr1 wr2 wr3]
+ // [wr4 wr5 wr6 wr7] <= *
+ "wldrd wr12, [%[pixels]] \n\t"
+ "cmp r12, #8 \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "wldrd wr14, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "walignr1 wr6, wr12, wr13 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr7, wr13, wr14 \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "wmoveq wr11, wr14 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "walignr2ne wr11, wr13, wr14 \n\t"
+ "wunpckelub wr4, wr6 \n\t"
+ "wunpckehub wr5, wr6 \n\t"
+ "wunpckelub wr6, wr7 \n\t"
+ "wunpckehub wr7, wr7 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "wunpckelub wr10, wr11 \n\t"
+ "wunpckehub wr11, wr11 \n\t"
+ "waddhus wr4, wr4, wr8 \n\t"
+ "waddhus wr5, wr5, wr9 \n\t"
+ "waddhus wr6, wr6, wr10 \n\t"
+ "waddhus wr7, wr7, wr11 \n\t"
+ "waddhus wr8, wr0, wr4 \n\t"
+ "waddhus wr9, wr1, wr5 \n\t"
+ "waddhus wr10, wr2, wr6 \n\t"
+ "waddhus wr11, wr3, wr7 \n\t"
+ "waddhus wr8, wr8, wr15 \n\t"
+ "waddhus wr9, wr9, wr15 \n\t"
+ "waddhus wr10, wr10, wr15 \n\t"
+ "waddhus wr11, wr11, wr15 \n\t"
+ "wsrlhg wr8, wr8, wcgr0 \n\t"
+ "wsrlhg wr9, wr9, wcgr0 \n\t"
+ "wldrd wr12, [%[block]] \n\t"
+ "wldrd wr13, [%[block], #8] \n\t"
+ "wsrlhg wr10, wr10, wcgr0 \n\t"
+ "wsrlhg wr11, wr11, wcgr0 \n\t"
+ "wpackhus wr8, wr8, wr9 \n\t"
+ "wpackhus wr9, wr10, wr11 \n\t"
+ WAVG2B" wr8, wr8, wr12 \n\t"
+ WAVG2B" wr9, wr9, wr13 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+
+ // [wr0 wr1 wr2 wr3] <= *
+ // [wr4 wr5 wr6 wr7]
+ "wldrd wr12, [%[pixels]] \n\t"
+ "pld [%[block]] \n\t"
+ "wldrd wr13, [%[pixels], #8] \n\t"
+ "pld [%[block], #32] \n\t"
+ "wldrd wr14, [%[pixels], #16] \n\t"
+ "add %[pixels], %[pixels], %[line_size] \n\t"
+ "walignr1 wr2, wr12, wr13 \n\t"
+ "pld [%[pixels]] \n\t"
+ "pld [%[pixels], #32] \n\t"
+ "walignr1 wr3, wr13, wr14 \n\t"
+ "wmoveq wr10, wr13 \n\t"
+ "wmoveq wr11, wr14 \n\t"
+ "walignr2ne wr10, wr12, wr13 \n\t"
+ "walignr2ne wr11, wr13, wr14 \n\t"
+ "wunpckelub wr0, wr2 \n\t"
+ "wunpckehub wr1, wr2 \n\t"
+ "wunpckelub wr2, wr3 \n\t"
+ "wunpckehub wr3, wr3 \n\t"
+ "wunpckelub wr8, wr10 \n\t"
+ "wunpckehub wr9, wr10 \n\t"
+ "wunpckelub wr10, wr11 \n\t"
+ "wunpckehub wr11, wr11 \n\t"
+ "waddhus wr0, wr0, wr8 \n\t"
+ "waddhus wr1, wr1, wr9 \n\t"
+ "waddhus wr2, wr2, wr10 \n\t"
+ "waddhus wr3, wr3, wr11 \n\t"
+ "waddhus wr8, wr0, wr4 \n\t"
+ "waddhus wr9, wr1, wr5 \n\t"
+ "waddhus wr10, wr2, wr6 \n\t"
+ "waddhus wr11, wr3, wr7 \n\t"
+ "waddhus wr8, wr8, wr15 \n\t"
+ "waddhus wr9, wr9, wr15 \n\t"
+ "waddhus wr10, wr10, wr15 \n\t"
+ "waddhus wr11, wr11, wr15 \n\t"
+ "wsrlhg wr8, wr8, wcgr0 \n\t"
+ "wsrlhg wr9, wr9, wcgr0 \n\t"
+ "wldrd wr12, [%[block]] \n\t"
+ "wldrd wr13, [%[block], #8] \n\t"
+ "wsrlhg wr10, wr10, wcgr0 \n\t"
+ "wsrlhg wr11, wr11, wcgr0 \n\t"
+ "wpackhus wr8, wr8, wr9 \n\t"
+ "wpackhus wr9, wr10, wr11 \n\t"
+ WAVG2B" wr8, wr8, wr12 \n\t"
+ WAVG2B" wr9, wr9, wr13 \n\t"
+ "wstrd wr8, [%[block]] \n\t"
+ "wstrd wr9, [%[block], #8] \n\t"
+ "add %[block], %[block], %[line_size] \n\t"
+ "subs %[h], %[h], #2 \n\t"
+ "pld [%[block]] \n\t"
+ "pld [%[block], #32] \n\t"
+ "bne 1b \n\t"
+ : [h]"+r"(h), [pixels]"+r"(pixels), [block]"+r"(block)
+ : [line_size]"r"(line_size)
+ : "r12", "memory");
+}
diff --git a/src/libffmpeg/libavcodec/armv4l/jrevdct_arm.S b/contrib/ffmpeg/libavcodec/armv4l/jrevdct_arm.S
index 294ea4750..294ea4750 100644
--- a/src/libffmpeg/libavcodec/armv4l/jrevdct_arm.S
+++ b/contrib/ffmpeg/libavcodec/armv4l/jrevdct_arm.S
diff --git a/contrib/ffmpeg/libavcodec/armv4l/mathops.h b/contrib/ffmpeg/libavcodec/armv4l/mathops.h
new file mode 100644
index 000000000..7ddd0ec6e
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/armv4l/mathops.h
@@ -0,0 +1,49 @@
+/*
+ * simple math operations
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifdef FRAC_BITS
+# define MULL(a, b) \
+ ({ int lo, hi;\
+ asm("smull %0, %1, %2, %3 \n\t"\
+ "mov %0, %0, lsr %4\n\t"\
+ "add %1, %0, %1, lsl %5\n\t"\
+ : "=&r"(lo), "=&r"(hi)\
+ : "r"(b), "r"(a), "i"(FRAC_BITS), "i"(32-FRAC_BITS));\
+ hi; })
+#endif
+
+#define MULH(a, b) \
+ ({ int lo, hi;\
+ asm ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\
+ hi; })
+
+#if defined(HAVE_ARMV5TE)
+
+/* signed 16x16 -> 32 multiply add accumulate */
+# define MAC16(rt, ra, rb) \
+ asm ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
+/* signed 16x16 -> 32 multiply */
+# define MUL16(ra, rb) \
+ ({ int __rt; \
+ asm ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); \
+ __rt; })
+
+#endif
diff --git a/src/libffmpeg/libavcodec/armv4l/mpegvideo_arm.c b/contrib/ffmpeg/libavcodec/armv4l/mpegvideo_arm.c
index 263e3c5bc..10a005cd3 100644
--- a/src/libffmpeg/libavcodec/armv4l/mpegvideo_arm.c
+++ b/contrib/ffmpeg/libavcodec/armv4l/mpegvideo_arm.c
@@ -1,18 +1,20 @@
/*
* Copyright (c) 2002 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/contrib/ffmpeg/libavcodec/armv4l/mpegvideo_iwmmxt.c b/contrib/ffmpeg/libavcodec/armv4l/mpegvideo_iwmmxt.c
new file mode 100644
index 000000000..1336ac5f8
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/armv4l/mpegvideo_iwmmxt.c
@@ -0,0 +1,119 @@
+/*
+ * copyright (c) 2004 AGAWA Koji
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../dsputil.h"
+#include "../mpegvideo.h"
+#include "../avcodec.h"
+
+static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int level, qmul, qadd;
+ int nCoeffs;
+ DCTELEM *block_orig = block;
+
+ assert(s->block_last_index[n]>=0);
+
+ qmul = qscale << 1;
+
+ if (!s->h263_aic) {
+ if (n < 4)
+ level = block[0] * s->y_dc_scale;
+ else
+ level = block[0] * s->c_dc_scale;
+ qadd = (qscale - 1) | 1;
+ }else{
+ qadd = 0;
+ level = block[0];
+ }
+ if(s->ac_pred)
+ nCoeffs=63;
+ else
+ nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+
+ __asm__ __volatile__ (
+/* "movd %1, %%mm6 \n\t" //qmul */
+/* "packssdw %%mm6, %%mm6 \n\t" */
+/* "packssdw %%mm6, %%mm6 \n\t" */
+ "tbcsth wr6, %[qmul] \n\t"
+/* "movd %2, %%mm5 \n\t" //qadd */
+/* "packssdw %%mm5, %%mm5 \n\t" */
+/* "packssdw %%mm5, %%mm5 \n\t" */
+ "tbcsth wr5, %[qadd] \n\t"
+ "wzero wr7 \n\t" /* "pxor %%mm7, %%mm7 \n\t" */
+ "wzero wr4 \n\t" /* "pxor %%mm4, %%mm4 \n\t" */
+ "wsubh wr7, wr5, wr7 \n\t" /* "psubw %%mm5, %%mm7 \n\t" */
+ "1: \n\t"
+ "wldrd wr2, [%[block]] \n\t" /* "movq (%0, %3), %%mm0 \n\t" */
+ "wldrd wr3, [%[block], #8] \n\t" /* "movq 8(%0, %3), %%mm1 \n\t" */
+ "wmulsl wr0, wr6, wr2 \n\t" /* "pmullw %%mm6, %%mm0 \n\t" */
+ "wmulsl wr1, wr6, wr3 \n\t" /* "pmullw %%mm6, %%mm1 \n\t" */
+/* "movq (%0, %3), %%mm2 \n\t" */
+/* "movq 8(%0, %3), %%mm3 \n\t" */
+ "wcmpgtsh wr2, wr4, wr2 \n\t" /* "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 */
+ "wcmpgtsh wr3, wr4, wr2 \n\t" /* "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 */
+ "wxor wr0, wr2, wr0 \n\t" /* "pxor %%mm2, %%mm0 \n\t" */
+ "wxor wr1, wr3, wr1 \n\t" /* "pxor %%mm3, %%mm1 \n\t" */
+ "waddh wr0, wr7, wr0 \n\t" /* "paddw %%mm7, %%mm0 \n\t" */
+ "waddh wr1, wr7, wr1 \n\t" /* "paddw %%mm7, %%mm1 \n\t" */
+ "wxor wr2, wr0, wr2 \n\t" /* "pxor %%mm0, %%mm2 \n\t" */
+ "wxor wr3, wr1, wr3 \n\t" /* "pxor %%mm1, %%mm3 \n\t" */
+ "wcmpeqh wr0, wr7, wr0 \n\t" /* "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0 */
+ "wcmpeqh wr1, wr7, wr1 \n\t" /* "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0 */
+ "wandn wr0, wr2, wr0 \n\t" /* "pandn %%mm2, %%mm0 \n\t" */
+ "wandn wr1, wr3, wr1 \n\t" /* "pandn %%mm3, %%mm1 \n\t" */
+ "wstrd wr0, [%[block]] \n\t" /* "movq %%mm0, (%0, %3) \n\t" */
+ "wstrd wr1, [%[block], #8] \n\t" /* "movq %%mm1, 8(%0, %3) \n\t" */
+ "add %[block], %[block], #16 \n\t" /* "addl $16, %3 \n\t" */
+ "subs %[i], %[i], #1 \n\t"
+ "bne 1b \n\t" /* "jng 1b \n\t" */
+ :[block]"+r"(block)
+ :[i]"r"((nCoeffs + 8) / 8), [qmul]"r"(qmul), [qadd]"r"(qadd)
+ :"memory");
+
+ block_orig[0] = level;
+}
+
+#if 0
+static void dct_unquantize_h263_inter_iwmmxt(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int nCoeffs;
+
+ assert(s->block_last_index[n]>=0);
+
+ if(s->ac_pred)
+ nCoeffs=63;
+ else
+ nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
+
+ ippiQuantInvInter_Compact_H263_16s_I(block, nCoeffs+1, qscale);
+}
+#endif
+
+void MPV_common_init_iwmmxt(MpegEncContext *s)
+{
+ if (!(mm_flags & MM_IWMMXT)) return;
+
+ s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_iwmmxt;
+#if 0
+ s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_iwmmxt;
+#endif
+}
diff --git a/src/libffmpeg/libavcodec/armv4l/simple_idct_arm.S b/contrib/ffmpeg/libavcodec/armv4l/simple_idct_arm.S
index 43751896d..b5a20f6da 100644
--- a/src/libffmpeg/libavcodec/armv4l/simple_idct_arm.S
+++ b/contrib/ffmpeg/libavcodec/armv4l/simple_idct_arm.S
@@ -5,20 +5,22 @@
*
* Author: Frederic Boulay <dilb@handhelds.org>
*
- * You can redistribute this file and/or modify
- * it under the terms of the GNU General Public License (version 2)
- * as published by the Free Software Foundation.
+ * This file is part of FFmpeg.
*
- * This file is distributed in the hope that it will be useful,
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this library; if not, write to the Free Software
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
- *
* The function defined in this file, is derived from the simple_idct function
* from the libavcodec library part of the ffmpeg project.
*/
diff --git a/contrib/ffmpeg/libavcodec/armv4l/simple_idct_armv5te.S b/contrib/ffmpeg/libavcodec/armv4l/simple_idct_armv5te.S
new file mode 100644
index 000000000..28bee0643
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/armv4l/simple_idct_armv5te.S
@@ -0,0 +1,718 @@
+/*
+ * Simple IDCT
+ *
+ * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2006 Mans Rullgard <mru@inprovide.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
+#define ROW_SHIFT 11
+#define COL_SHIFT 20
+
+#define W13 (W1 | (W3 << 16))
+#define W26 (W2 | (W6 << 16))
+#define W57 (W5 | (W7 << 16))
+
+ .text
+ .align
+w13: .long W13
+w26: .long W26
+w57: .long W57
+
+ .align
+ .func idct_row_armv5te
+idct_row_armv5te:
+ str lr, [sp, #-4]!
+
+ ldrd v1, [a1, #8]
+ ldrd a3, [a1] /* a3 = row[1:0], a4 = row[3:2] */
+ orrs v1, v1, v2
+ cmpeq v1, a4
+ cmpeq v1, a3, lsr #16
+ beq row_dc_only
+
+ mov v1, #(1<<(ROW_SHIFT-1))
+ mov ip, #16384
+ sub ip, ip, #1 /* ip = W4 */
+ smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */
+ ldr ip, [pc, #(w26-.-8)] /* ip = W2 | (W6 << 16) */
+ smultb a2, ip, a4
+ smulbb lr, ip, a4
+ add v2, v1, a2
+ sub v3, v1, a2
+ sub v4, v1, lr
+ add v1, v1, lr
+
+ ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */
+ ldr lr, [pc, #(w57-.-8)] /* lr = W5 | (W7 << 16) */
+ smulbt v5, ip, a3
+ smultt v6, lr, a4
+ smlatt v5, ip, a4, v5
+ smultt a2, ip, a3
+ smulbt v7, lr, a3
+ sub v6, v6, a2
+ smulbt a2, ip, a4
+ smultt fp, lr, a3
+ sub v7, v7, a2
+ smulbt a2, lr, a4
+ ldrd a3, [a1, #8] /* a3=row[5:4] a4=row[7:6] */
+ sub fp, fp, a2
+
+ orrs a2, a3, a4
+ beq 1f
+
+ smlabt v5, lr, a3, v5
+ smlabt v6, ip, a3, v6
+ smlatt v5, lr, a4, v5
+ smlabt v6, lr, a4, v6
+ smlatt v7, lr, a3, v7
+ smlatt fp, ip, a3, fp
+ smulbt a2, ip, a4
+ smlatt v7, ip, a4, v7
+ sub fp, fp, a2
+
+ ldr ip, [pc, #(w26-.-8)] /* ip = W2 | (W6 << 16) */
+ mov a2, #16384
+ sub a2, a2, #1 /* a2 = W4 */
+ smulbb a2, a2, a3 /* a2 = W4*row[4] */
+ smultb lr, ip, a4 /* lr = W6*row[6] */
+ add v1, v1, a2 /* v1 += W4*row[4] */
+ add v1, v1, lr /* v1 += W6*row[6] */
+ add v4, v4, a2 /* v4 += W4*row[4] */
+ sub v4, v4, lr /* v4 -= W6*row[6] */
+ smulbb lr, ip, a4 /* lr = W2*row[6] */
+ sub v2, v2, a2 /* v2 -= W4*row[4] */
+ sub v2, v2, lr /* v2 -= W2*row[6] */
+ sub v3, v3, a2 /* v3 -= W4*row[4] */
+ add v3, v3, lr /* v3 += W2*row[6] */
+
+1: add a2, v1, v5
+ mov a3, a2, lsr #11
+ bic a3, a3, #0x1f0000
+ sub a2, v2, v6
+ mov a2, a2, lsr #11
+ add a3, a3, a2, lsl #16
+ add a2, v3, v7
+ mov a4, a2, lsr #11
+ bic a4, a4, #0x1f0000
+ add a2, v4, fp
+ mov a2, a2, lsr #11
+ add a4, a4, a2, lsl #16
+ strd a3, [a1]
+
+ sub a2, v4, fp
+ mov a3, a2, lsr #11
+ bic a3, a3, #0x1f0000
+ sub a2, v3, v7
+ mov a2, a2, lsr #11
+ add a3, a3, a2, lsl #16
+ add a2, v2, v6
+ mov a4, a2, lsr #11
+ bic a4, a4, #0x1f0000
+ sub a2, v1, v5
+ mov a2, a2, lsr #11
+ add a4, a4, a2, lsl #16
+ strd a3, [a1, #8]
+
+ ldr pc, [sp], #4
+
+row_dc_only:
+ orr a3, a3, a3, lsl #16
+ bic a3, a3, #0xe000
+ mov a3, a3, lsl #3
+ mov a4, a3
+ strd a3, [a1]
+ strd a3, [a1, #8]
+
+ ldr pc, [sp], #4
+ .endfunc
+
+ .macro idct_col
+ ldr a4, [a1] /* a4 = col[1:0] */
+ mov ip, #16384
+ sub ip, ip, #1 /* ip = W4 */
+#if 0
+ mov v1, #(1<<(COL_SHIFT-1))
+ smlabt v2, ip, a4, v1 /* v2 = W4*col[1] + (1<<(COL_SHIFT-1)) */
+ smlabb v1, ip, a4, v1 /* v1 = W4*col[0] + (1<<(COL_SHIFT-1)) */
+ ldr a4, [a1, #(16*4)]
+#else
+ mov v1, #((1<<(COL_SHIFT-1))/W4) /* this matches the C version */
+ add v2, v1, a4, asr #16
+ rsb v2, v2, v2, lsl #14
+ mov a4, a4, lsl #16
+ add v1, v1, a4, asr #16
+ ldr a4, [a1, #(16*4)]
+ rsb v1, v1, v1, lsl #14
+#endif
+
+ smulbb lr, ip, a4
+ smulbt a3, ip, a4
+ sub v3, v1, lr
+ sub v5, v1, lr
+ add v7, v1, lr
+ add v1, v1, lr
+ sub v4, v2, a3
+ sub v6, v2, a3
+ add fp, v2, a3
+ ldr ip, [pc, #(w26-.-8)]
+ ldr a4, [a1, #(16*2)]
+ add v2, v2, a3
+
+ smulbb lr, ip, a4
+ smultb a3, ip, a4
+ add v1, v1, lr
+ sub v7, v7, lr
+ add v3, v3, a3
+ sub v5, v5, a3
+ smulbt lr, ip, a4
+ smultt a3, ip, a4
+ add v2, v2, lr
+ sub fp, fp, lr
+ add v4, v4, a3
+ ldr a4, [a1, #(16*6)]
+ sub v6, v6, a3
+
+ smultb lr, ip, a4
+ smulbb a3, ip, a4
+ add v1, v1, lr
+ sub v7, v7, lr
+ sub v3, v3, a3
+ add v5, v5, a3
+ smultt lr, ip, a4
+ smulbt a3, ip, a4
+ add v2, v2, lr
+ sub fp, fp, lr
+ sub v4, v4, a3
+ add v6, v6, a3
+
+ stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp}
+
+ ldr ip, [pc, #(w13-.-8)]
+ ldr a4, [a1, #(16*1)]
+ ldr lr, [pc, #(w57-.-8)]
+ smulbb v1, ip, a4
+ smultb v3, ip, a4
+ smulbb v5, lr, a4
+ smultb v7, lr, a4
+ smulbt v2, ip, a4
+ smultt v4, ip, a4
+ smulbt v6, lr, a4
+ smultt fp, lr, a4
+ rsb v4, v4, #0
+ ldr a4, [a1, #(16*3)]
+ rsb v3, v3, #0
+
+ smlatb v1, ip, a4, v1
+ smlatb v3, lr, a4, v3
+ smulbb a3, ip, a4
+ smulbb a2, lr, a4
+ sub v5, v5, a3
+ sub v7, v7, a2
+ smlatt v2, ip, a4, v2
+ smlatt v4, lr, a4, v4
+ smulbt a3, ip, a4
+ smulbt a2, lr, a4
+ sub v6, v6, a3
+ ldr a4, [a1, #(16*5)]
+ sub fp, fp, a2
+
+ smlabb v1, lr, a4, v1
+ smlabb v3, ip, a4, v3
+ smlatb v5, lr, a4, v5
+ smlatb v7, ip, a4, v7
+ smlabt v2, lr, a4, v2
+ smlabt v4, ip, a4, v4
+ smlatt v6, lr, a4, v6
+ ldr a3, [a1, #(16*7)]
+ smlatt fp, ip, a4, fp
+
+ smlatb v1, lr, a3, v1
+ smlabb v3, lr, a3, v3
+ smlatb v5, ip, a3, v5
+ smulbb a4, ip, a3
+ smlatt v2, lr, a3, v2
+ sub v7, v7, a4
+ smlabt v4, lr, a3, v4
+ smulbt a4, ip, a3
+ smlatt v6, ip, a3, v6
+ sub fp, fp, a4
+ .endm
+
+ .align
+ .func idct_col_armv5te
+idct_col_armv5te:
+ str lr, [sp, #-4]!
+
+ idct_col
+
+ ldmfd sp!, {a3, a4}
+ adds a2, a3, v1
+ mov a2, a2, lsr #20
+ orrmi a2, a2, #0xf000
+ add ip, a4, v2
+ mov ip, ip, asr #20
+ orr a2, a2, ip, lsl #16
+ str a2, [a1]
+ subs a3, a3, v1
+ mov a2, a3, lsr #20
+ orrmi a2, a2, #0xf000
+ sub a4, a4, v2
+ mov a4, a4, asr #20
+ orr a2, a2, a4, lsl #16
+ ldmfd sp!, {a3, a4}
+ str a2, [a1, #(16*7)]
+
+ subs a2, a3, v3
+ mov a2, a2, lsr #20
+ orrmi a2, a2, #0xf000
+ sub ip, a4, v4
+ mov ip, ip, asr #20
+ orr a2, a2, ip, lsl #16
+ str a2, [a1, #(16*1)]
+ adds a3, a3, v3
+ mov a2, a3, lsr #20
+ orrmi a2, a2, #0xf000
+ add a4, a4, v4
+ mov a4, a4, asr #20
+ orr a2, a2, a4, lsl #16
+ ldmfd sp!, {a3, a4}
+ str a2, [a1, #(16*6)]
+
+ adds a2, a3, v5
+ mov a2, a2, lsr #20
+ orrmi a2, a2, #0xf000
+ add ip, a4, v6
+ mov ip, ip, asr #20
+ orr a2, a2, ip, lsl #16
+ str a2, [a1, #(16*2)]
+ subs a3, a3, v5
+ mov a2, a3, lsr #20
+ orrmi a2, a2, #0xf000
+ sub a4, a4, v6
+ mov a4, a4, asr #20
+ orr a2, a2, a4, lsl #16
+ ldmfd sp!, {a3, a4}
+ str a2, [a1, #(16*5)]
+
+ adds a2, a3, v7
+ mov a2, a2, lsr #20
+ orrmi a2, a2, #0xf000
+ add ip, a4, fp
+ mov ip, ip, asr #20
+ orr a2, a2, ip, lsl #16
+ str a2, [a1, #(16*3)]
+ subs a3, a3, v7
+ mov a2, a3, lsr #20
+ orrmi a2, a2, #0xf000
+ sub a4, a4, fp
+ mov a4, a4, asr #20
+ orr a2, a2, a4, lsl #16
+ str a2, [a1, #(16*4)]
+
+ ldr pc, [sp], #4
+ .endfunc
+
+ .align
+ .func idct_col_put_armv5te
+idct_col_put_armv5te:
+ str lr, [sp, #-4]!
+
+ idct_col
+
+ ldmfd sp!, {a3, a4}
+ ldr lr, [sp, #32]
+ add a2, a3, v1
+ movs a2, a2, asr #20
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ add ip, a4, v2
+ movs ip, ip, asr #20
+ movmi ip, #0
+ cmp ip, #255
+ movgt ip, #255
+ orr a2, a2, ip, lsl #8
+ sub a3, a3, v1
+ movs a3, a3, asr #20
+ movmi a3, #0
+ cmp a3, #255
+ movgt a3, #255
+ sub a4, a4, v2
+ movs a4, a4, asr #20
+ movmi a4, #0
+ cmp a4, #255
+ ldr v1, [sp, #28]
+ movgt a4, #255
+ strh a2, [v1]
+ add a2, v1, #2
+ str a2, [sp, #28]
+ orr a2, a3, a4, lsl #8
+ rsb v2, lr, lr, lsl #3
+ ldmfd sp!, {a3, a4}
+ strh a2, [v2, v1]!
+
+ sub a2, a3, v3
+ movs a2, a2, asr #20
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ sub ip, a4, v4
+ movs ip, ip, asr #20
+ movmi ip, #0
+ cmp ip, #255
+ movgt ip, #255
+ orr a2, a2, ip, lsl #8
+ strh a2, [v1, lr]!
+ add a3, a3, v3
+ movs a2, a3, asr #20
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ add a4, a4, v4
+ movs a4, a4, asr #20
+ movmi a4, #0
+ cmp a4, #255
+ movgt a4, #255
+ orr a2, a2, a4, lsl #8
+ ldmfd sp!, {a3, a4}
+ strh a2, [v2, -lr]!
+
+ add a2, a3, v5
+ movs a2, a2, asr #20
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ add ip, a4, v6
+ movs ip, ip, asr #20
+ movmi ip, #0
+ cmp ip, #255
+ movgt ip, #255
+ orr a2, a2, ip, lsl #8
+ strh a2, [v1, lr]!
+ sub a3, a3, v5
+ movs a2, a3, asr #20
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ sub a4, a4, v6
+ movs a4, a4, asr #20
+ movmi a4, #0
+ cmp a4, #255
+ movgt a4, #255
+ orr a2, a2, a4, lsl #8
+ ldmfd sp!, {a3, a4}
+ strh a2, [v2, -lr]!
+
+ add a2, a3, v7
+ movs a2, a2, asr #20
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ add ip, a4, fp
+ movs ip, ip, asr #20
+ movmi ip, #0
+ cmp ip, #255
+ movgt ip, #255
+ orr a2, a2, ip, lsl #8
+ strh a2, [v1, lr]
+ sub a3, a3, v7
+ movs a2, a3, asr #20
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ sub a4, a4, fp
+ movs a4, a4, asr #20
+ movmi a4, #0
+ cmp a4, #255
+ movgt a4, #255
+ orr a2, a2, a4, lsl #8
+ strh a2, [v2, -lr]
+
+ ldr pc, [sp], #4
+ .endfunc
+
+ .align
+ .func idct_col_add_armv5te
+idct_col_add_armv5te:
+ str lr, [sp, #-4]!
+
+ idct_col
+
+ ldr lr, [sp, #36]
+
+ ldmfd sp!, {a3, a4}
+ ldrh ip, [lr]
+ add a2, a3, v1
+ mov a2, a2, asr #20
+ sub a3, a3, v1
+ and v1, ip, #255
+ adds a2, a2, v1
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ add v1, a4, v2
+ mov v1, v1, asr #20
+ adds v1, v1, ip, lsr #8
+ movmi v1, #0
+ cmp v1, #255
+ movgt v1, #255
+ orr a2, a2, v1, lsl #8
+ ldr v1, [sp, #32]
+ sub a4, a4, v2
+ rsb v2, v1, v1, lsl #3
+ ldrh ip, [v2, lr]!
+ strh a2, [lr]
+ mov a3, a3, asr #20
+ and a2, ip, #255
+ adds a3, a3, a2
+ movmi a3, #0
+ cmp a3, #255
+ movgt a3, #255
+ mov a4, a4, asr #20
+ adds a4, a4, ip, lsr #8
+ movmi a4, #0
+ cmp a4, #255
+ movgt a4, #255
+ add a2, lr, #2
+ str a2, [sp, #28]
+ orr a2, a3, a4, lsl #8
+ strh a2, [v2]
+
+ ldmfd sp!, {a3, a4}
+ ldrh ip, [lr, v1]!
+ sub a2, a3, v3
+ mov a2, a2, asr #20
+ add a3, a3, v3
+ and v3, ip, #255
+ adds a2, a2, v3
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ sub v3, a4, v4
+ mov v3, v3, asr #20
+ adds v3, v3, ip, lsr #8
+ movmi v3, #0
+ cmp v3, #255
+ movgt v3, #255
+ orr a2, a2, v3, lsl #8
+ add a4, a4, v4
+ ldrh ip, [v2, -v1]!
+ strh a2, [lr]
+ mov a3, a3, asr #20
+ and a2, ip, #255
+ adds a3, a3, a2
+ movmi a3, #0
+ cmp a3, #255
+ movgt a3, #255
+ mov a4, a4, asr #20
+ adds a4, a4, ip, lsr #8
+ movmi a4, #0
+ cmp a4, #255
+ movgt a4, #255
+ orr a2, a3, a4, lsl #8
+ strh a2, [v2]
+
+ ldmfd sp!, {a3, a4}
+ ldrh ip, [lr, v1]!
+ add a2, a3, v5
+ mov a2, a2, asr #20
+ sub a3, a3, v5
+ and v3, ip, #255
+ adds a2, a2, v3
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ add v3, a4, v6
+ mov v3, v3, asr #20
+ adds v3, v3, ip, lsr #8
+ movmi v3, #0
+ cmp v3, #255
+ movgt v3, #255
+ orr a2, a2, v3, lsl #8
+ sub a4, a4, v6
+ ldrh ip, [v2, -v1]!
+ strh a2, [lr]
+ mov a3, a3, asr #20
+ and a2, ip, #255
+ adds a3, a3, a2
+ movmi a3, #0
+ cmp a3, #255
+ movgt a3, #255
+ mov a4, a4, asr #20
+ adds a4, a4, ip, lsr #8
+ movmi a4, #0
+ cmp a4, #255
+ movgt a4, #255
+ orr a2, a3, a4, lsl #8
+ strh a2, [v2]
+
+ ldmfd sp!, {a3, a4}
+ ldrh ip, [lr, v1]!
+ add a2, a3, v7
+ mov a2, a2, asr #20
+ sub a3, a3, v7
+ and v3, ip, #255
+ adds a2, a2, v3
+ movmi a2, #0
+ cmp a2, #255
+ movgt a2, #255
+ add v3, a4, fp
+ mov v3, v3, asr #20
+ adds v3, v3, ip, lsr #8
+ movmi v3, #0
+ cmp v3, #255
+ movgt v3, #255
+ orr a2, a2, v3, lsl #8
+ sub a4, a4, fp
+ ldrh ip, [v2, -v1]!
+ strh a2, [lr]
+ mov a3, a3, asr #20
+ and a2, ip, #255
+ adds a3, a3, a2
+ movmi a3, #0
+ cmp a3, #255
+ movgt a3, #255
+ mov a4, a4, asr #20
+ adds a4, a4, ip, lsr #8
+ movmi a4, #0
+ cmp a4, #255
+ movgt a4, #255
+ orr a2, a3, a4, lsl #8
+ strh a2, [v2]
+
+ ldr pc, [sp], #4
+ .endfunc
+
+ .align
+ .global simple_idct_armv5te
+ .func simple_idct_armv5te
+simple_idct_armv5te:
+ stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
+
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+
+ sub a1, a1, #(16*7)
+
+ bl idct_col_armv5te
+ add a1, a1, #4
+ bl idct_col_armv5te
+ add a1, a1, #4
+ bl idct_col_armv5te
+ add a1, a1, #4
+ bl idct_col_armv5te
+
+ ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
+ .endfunc
+
+ .align
+ .global simple_idct_add_armv5te
+ .func simple_idct_add_armv5te
+simple_idct_add_armv5te:
+ stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
+
+ mov a1, a3
+
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+
+ sub a1, a1, #(16*7)
+
+ bl idct_col_add_armv5te
+ add a1, a1, #4
+ bl idct_col_add_armv5te
+ add a1, a1, #4
+ bl idct_col_add_armv5te
+ add a1, a1, #4
+ bl idct_col_add_armv5te
+
+ add sp, sp, #8
+ ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
+ .endfunc
+
+ .align
+ .global simple_idct_put_armv5te
+ .func simple_idct_put_armv5te
+simple_idct_put_armv5te:
+ stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
+
+ mov a1, a3
+
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+ add a1, a1, #16
+ bl idct_row_armv5te
+
+ sub a1, a1, #(16*7)
+
+ bl idct_col_put_armv5te
+ add a1, a1, #4
+ bl idct_col_put_armv5te
+ add a1, a1, #4
+ bl idct_col_put_armv5te
+ add a1, a1, #4
+ bl idct_col_put_armv5te
+
+ add sp, sp, #8
+ ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
+ .endfunc
diff --git a/src/libffmpeg/libavcodec/asv1.c b/contrib/ffmpeg/libavcodec/asv1.c
index 3cfb76e65..ec6bbb9ba 100644
--- a/src/libffmpeg/libavcodec/asv1.c
+++ b/contrib/ffmpeg/libavcodec/asv1.c
@@ -2,18 +2,20 @@
* ASUS V1/V2 codec
* Copyright (c) 2003 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -462,6 +464,7 @@ for(i=0; i<s->avctx->extradata_size; i++){
return (get_bits_count(&a->gb)+31)/32*4;
}
+#ifdef CONFIG_ENCODERS
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
ASV1Context * const a = avctx->priv_data;
AVFrame *pict = data;
@@ -515,6 +518,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
return size*4;
}
+#endif /* CONFIG_ENCODERS */
static void common_init(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
@@ -564,6 +568,7 @@ static int decode_init(AVCodecContext *avctx){
return 0;
}
+#ifdef CONFIG_ENCODERS
static int encode_init(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
int i;
@@ -587,6 +592,7 @@ static int encode_init(AVCodecContext *avctx){
return 0;
}
+#endif
static int decode_end(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
@@ -632,6 +638,7 @@ AVCodec asv1_encoder = {
encode_init,
encode_frame,
//encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
AVCodec asv2_encoder = {
@@ -642,6 +649,7 @@ AVCodec asv2_encoder = {
encode_init,
encode_frame,
//encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
#endif //CONFIG_ENCODERS
diff --git a/contrib/ffmpeg/libavcodec/audioconvert.c b/contrib/ffmpeg/libavcodec/audioconvert.c
new file mode 100644
index 000000000..e6291ac6d
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/audioconvert.c
@@ -0,0 +1,79 @@
+/*
+ * audio conversion
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+/**
+ * @file audioconvert.c
+ * audio conversion
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "avcodec.h"
+
+int av_audio_convert(void *maybe_dspcontext_or_something_av_convert_specific,
+ void *out[6], int out_stride[6], enum SampleFormat out_fmt,
+ void * in[6], int in_stride[6], enum SampleFormat in_fmt, int len){
+ int ch;
+ const int isize= FFMIN( in_fmt+1, 4);
+ const int osize= FFMIN(out_fmt+1, 4);
+ const int fmt_pair= out_fmt + 5*in_fmt;
+
+ //FIXME optimize common cases
+
+ for(ch=0; ch<6; ch++){
+ const int is= in_stride[ch] * isize;
+ const int os= out_stride[ch] * osize;
+ uint8_t *pi= in[ch];
+ uint8_t *po= out[ch];
+ uint8_t *end= po + os;
+ if(!out[ch])
+ continue;
+
+#define CONV(ofmt, otype, ifmt, expr)\
+if(fmt_pair == ofmt + 5*ifmt){\
+ do{\
+ *(otype*)po = expr; pi += is; po += os;\
+ }while(po < end);\
+}
+
+//FIXME put things below under ifdefs so we dont waste space for cases no codec will need
+//FIXME rounding and cliping ?
+
+ CONV(SAMPLE_FMT_U8 , uint8_t, SAMPLE_FMT_U8 , *(uint8_t*)pi)
+ else CONV(SAMPLE_FMT_S16, int16_t, SAMPLE_FMT_U8 , (*(uint8_t*)pi - 0x80)<<8)
+ else CONV(SAMPLE_FMT_S32, int32_t, SAMPLE_FMT_U8 , (*(uint8_t*)pi - 0x80)<<24)
+ else CONV(SAMPLE_FMT_FLT, float , SAMPLE_FMT_U8 , (*(uint8_t*)pi - 0x80)*(1.0 / (1<<7)))
+ else CONV(SAMPLE_FMT_U8 , uint8_t, SAMPLE_FMT_S16, (*(int16_t*)pi>>8) + 0x80)
+ else CONV(SAMPLE_FMT_S16, int16_t, SAMPLE_FMT_S16, *(int16_t*)pi)
+ else CONV(SAMPLE_FMT_S32, int32_t, SAMPLE_FMT_S16, *(int16_t*)pi<<16)
+ else CONV(SAMPLE_FMT_FLT, float , SAMPLE_FMT_S16, *(int16_t*)pi*(1.0 / (1<<15)))
+ else CONV(SAMPLE_FMT_U8 , uint8_t, SAMPLE_FMT_S32, (*(int32_t*)pi>>24) + 0x80)
+ else CONV(SAMPLE_FMT_S16, int16_t, SAMPLE_FMT_S32, *(int32_t*)pi>>16)
+ else CONV(SAMPLE_FMT_S32, int32_t, SAMPLE_FMT_S32, *(int32_t*)pi)
+ else CONV(SAMPLE_FMT_FLT, float , SAMPLE_FMT_S32, *(int32_t*)pi*(1.0 / (1<<31)))
+ else CONV(SAMPLE_FMT_U8 , uint8_t, SAMPLE_FMT_FLT, lrintf(*(float*)pi * (1<<7)) + 0x80)
+ else CONV(SAMPLE_FMT_S16, int16_t, SAMPLE_FMT_FLT, lrintf(*(float*)pi * (1<<15)))
+ else CONV(SAMPLE_FMT_S32, int32_t, SAMPLE_FMT_FLT, lrintf(*(float*)pi * (1<<31)))
+ else CONV(SAMPLE_FMT_FLT, float , SAMPLE_FMT_FLT, *(float*)pi)
+ else return -1;
+ }
+ return 0;
+}
diff --git a/src/libffmpeg/libavcodec/avcodec.h b/contrib/ffmpeg/libavcodec/avcodec.h
index 9be5dcf6e..da063b562 100644
--- a/src/libffmpeg/libavcodec/avcodec.h
+++ b/contrib/ffmpeg/libavcodec/avcodec.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#ifndef AVCODEC_H
#define AVCODEC_H
@@ -17,8 +37,8 @@ extern "C" {
#define AV_STRINGIFY(s) AV_TOSTRING(s)
#define AV_TOSTRING(s) #s
-#define LIBAVCODEC_VERSION_INT ((51<<16)+(11<<8)+0)
-#define LIBAVCODEC_VERSION 51.11.0
+#define LIBAVCODEC_VERSION_INT ((51<<16)+(25<<8)+0)
+#define LIBAVCODEC_VERSION 51.25.0
#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
@@ -27,13 +47,6 @@ extern "C" {
#define AV_TIME_BASE 1000000
#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}
-/* FIXME: We cannot use ffmpeg's XvMC capabilities, since that would require
- * linking the ffmpeg plugin against XvMC libraries, which is a bad thing,
- * since they are output dependend.
- * The correct fix would be to reimplement the XvMC functions libavcodec uses
- * and do the necessary talking with our XvMC output plugin there. */
-#undef HAVE_XVMC
-
enum CodecID {
CODEC_ID_NONE,
CODEC_ID_MPEG1VIDEO,
@@ -126,6 +139,16 @@ enum CodecID {
CODEC_ID_KMVC,
CODEC_ID_FLASHSV,
CODEC_ID_CAVS,
+ CODEC_ID_JPEG2000,
+ CODEC_ID_VMNC,
+ CODEC_ID_VP5,
+ CODEC_ID_VP6,
+ CODEC_ID_VP6F,
+ CODEC_ID_TARGA,
+ CODEC_ID_DSICINVIDEO,
+ CODEC_ID_TIERTEXSEQVIDEO,
+ CODEC_ID_TIFF,
+ CODEC_ID_GIF,
/* various pcm "codecs" */
CODEC_ID_PCM_S16LE= 0x10000,
@@ -183,7 +206,9 @@ enum CodecID {
CODEC_ID_MP2= 0x15000,
CODEC_ID_MP3, /* prefered ID for MPEG Audio layer 1, 2 or3 decoding */
CODEC_ID_AAC,
+#if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
CODEC_ID_MPEG4AAC,
+#endif
CODEC_ID_AC3,
CODEC_ID_DTS,
CODEC_ID_VORBIS,
@@ -207,6 +232,10 @@ enum CodecID {
CODEC_ID_TRUESPEECH,
CODEC_ID_TTA,
CODEC_ID_SMACKAUDIO,
+ CODEC_ID_QCELP,
+ CODEC_ID_WAVPACK,
+ CODEC_ID_DSICINAUDIO,
+ CODEC_ID_IMC,
/* subtitle codecs */
CODEC_ID_DVD_SUBTITLE= 0x17000,
@@ -343,6 +372,7 @@ typedef struct RcOverride{
#define CODEC_FLAG2_BRDO 0x00000400 ///< b-frame rate-distortion optimization
#define CODEC_FLAG2_INTRA_VLC 0x00000800 ///< use MPEG-2 intra VLC table
#define CODEC_FLAG2_MEMC_ONLY 0x00001000 ///< only do ME/MC (I frames -> ref, P frame -> ME+MC)
+#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format
/* Unsupported options :
* Syntax Arithmetic coding (SAC)
@@ -723,7 +753,7 @@ typedef struct AVCodecContext {
* - encoding: set/allocated/freed by lavc.
* - decoding: set/allocated/freed by user.
*/
- void *extradata;
+ uint8_t *extradata;
int extradata_size;
/**
@@ -1200,6 +1230,7 @@ typedef struct AVCodecContext {
#define FF_IDCT_IPP 13
#define FF_IDCT_XVIDMMX 14
#define FF_IDCT_CAVS 15
+#define FF_IDCT_SIMPLEARMV5TE 16
/**
* slice count.
@@ -1863,7 +1894,7 @@ typedef struct AVCodecContext {
* - encoding: set by user.
* - decoding: unused
*/
- int crf;
+ float crf;
/**
* constant quantization parameter rate control method
@@ -2029,6 +2060,13 @@ typedef struct AVCodecContext {
* - decoding: unused.
*/
int max_partition_order;
+
+ /**
+ * GOP timecode frame start number, in non drop frame format
+ * - encoding: set by user.
+ * - decoding: unused.
+ */
+ int64_t timecode_frame_start;
} AVCodecContext;
/**
@@ -2067,6 +2105,7 @@ typedef struct AVPicture {
* AVPaletteControl
* This structure defines a method for communicating palette changes
* between and demuxer and a decoder.
+ * this is totally broken, palette changes should be sent as AVPackets
*/
#define AVPALETTE_SIZE 1024
#define AVPALETTE_COUNT 256
@@ -2082,7 +2121,7 @@ typedef struct AVPaletteControl {
* data is probably 6 bits in size and needs to be scaled */
unsigned int palette[AVPALETTE_COUNT];
-} AVPaletteControl;
+} AVPaletteControl attribute_deprecated;
typedef struct AVSubtitleRect {
uint16_t x;
@@ -2109,6 +2148,7 @@ extern AVCodec mp3lame_encoder;
extern AVCodec oggvorbis_encoder;
extern AVCodec faac_encoder;
extern AVCodec flac_encoder;
+extern AVCodec gif_encoder;
extern AVCodec xvid_encoder;
extern AVCodec mpeg1video_encoder;
extern AVCodec mpeg2video_encoder;
@@ -2142,6 +2182,7 @@ extern AVCodec asv2_encoder;
extern AVCodec vcr1_encoder;
extern AVCodec ffv1_encoder;
extern AVCodec snow_encoder;
+extern AVCodec vorbis_encoder;
extern AVCodec mdec_encoder;
extern AVCodec zlib_encoder;
extern AVCodec sonic_encoder;
@@ -2149,6 +2190,7 @@ extern AVCodec sonic_ls_encoder;
extern AVCodec svq1_encoder;
extern AVCodec x264_encoder;
+extern AVCodec gif_decoder;
extern AVCodec h263_decoder;
extern AVCodec h261_decoder;
extern AVCodec mpeg4_decoder;
@@ -2196,6 +2238,9 @@ extern AVCodec h264_decoder;
extern AVCodec indeo3_decoder;
extern AVCodec vp3_decoder;
extern AVCodec theora_decoder;
+extern AVCodec vp5_decoder;
+extern AVCodec vp6_decoder;
+extern AVCodec vp6f_decoder;
extern AVCodec amr_nb_decoder;
extern AVCodec amr_nb_encoder;
extern AVCodec amr_wb_encoder;
@@ -2264,6 +2309,14 @@ extern AVCodec smackaud_decoder;
extern AVCodec kmvc_decoder;
extern AVCodec flashsv_decoder;
extern AVCodec cavs_decoder;
+extern AVCodec vmnc_decoder;
+extern AVCodec wavpack_decoder;
+extern AVCodec targa_decoder;
+extern AVCodec dsicinvideo_decoder;
+extern AVCodec dsicinaudio_decoder;
+extern AVCodec tiertexseqvideo_decoder;
+extern AVCodec tiff_decoder;
+extern AVCodec imc_decoder;
/* pcm codecs */
#define PCM_CODEC(id, name) \
@@ -2456,6 +2509,17 @@ int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, v
*/
int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
+/**
+ * Decode an audio frame.
+ *
+ * @param avctx the codec context.
+ * @param samples output buffer, 16 byte aligned
+ * @param frame_size_ptr the output buffer size in bytes, zero if no frame could be compressed
+ * @param buf input buffer, 16 byte aligned
+ * @param buf_size the input buffer size
+ * @return 0 if successful, -1 if not.
+ */
+
int avcodec_decode_audio(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
uint8_t *buf, int buf_size);
@@ -2598,12 +2662,12 @@ void av_bitstream_filter_close(AVBitStreamFilterContext *bsf);
extern AVBitStreamFilter dump_extradata_bsf;
extern AVBitStreamFilter remove_extradata_bsf;
extern AVBitStreamFilter noise_bsf;
+extern AVBitStreamFilter mp3_header_compress_bsf;
+extern AVBitStreamFilter mp3_header_decompress_bsf;
+extern AVBitStreamFilter mjpega_dump_header_bsf;
/* memory */
-void *av_mallocz(unsigned int size);
-char *av_strdup(const char *s);
-void av_freep(void *ptr);
void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size);
/* for static data only */
/* call av_free_static to release all staticaly allocated tables */
@@ -2636,13 +2700,6 @@ int img_pad(AVPicture *dst, const AVPicture *src, int height, int width, int pix
extern unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
-/* unused static macro */
-#if defined(__GNUC__) && !defined(DEBUG)
-/* since we do not compile the encoder part of ffmpeg, some static
- * functions will be unused; this is ok, the compiler will take care */
-# define static static __attribute__((__unused__))
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/src/libffmpeg/libavcodec/avs.c b/contrib/ffmpeg/libavcodec/avs.c
index 557e9becb..953aea1be 100644
--- a/src/libffmpeg/libavcodec/avs.c
+++ b/contrib/ffmpeg/libavcodec/avs.c
@@ -2,18 +2,20 @@
* AVS video decoder.
* Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/contrib/ffmpeg/libavcodec/beosthread.c b/contrib/ffmpeg/libavcodec/beosthread.c
new file mode 100644
index 000000000..3d059912b
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/beosthread.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2004 François Revol <revol@free.fr>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+//#define DEBUG
+
+#include "avcodec.h"
+#include "common.h"
+
+#include <OS.h>
+
+typedef struct ThreadContext{
+ AVCodecContext *avctx;
+ thread_id thread;
+ sem_id work_sem;
+ sem_id done_sem;
+ int (*func)(AVCodecContext *c, void *arg);
+ void *arg;
+ int ret;
+}ThreadContext;
+
+// it's odd Be never patented that :D
+struct benaphore {
+ vint32 atom;
+ sem_id sem;
+};
+static inline int lock_ben(struct benaphore *ben)
+{
+ if (atomic_add(&ben->atom, 1) > 0)
+ return acquire_sem(ben->sem);
+ return B_OK;
+}
+static inline int unlock_ben(struct benaphore *ben)
+{
+ if (atomic_add(&ben->atom, -1) > 1)
+ return release_sem(ben->sem);
+ return B_OK;
+}
+
+static struct benaphore av_thread_lib_ben;
+
+static int32 ff_thread_func(void *v){
+ ThreadContext *c= v;
+
+ for(;;){
+//printf("thread_func %X enter wait\n", (int)v); fflush(stdout);
+ acquire_sem(c->work_sem);
+//printf("thread_func %X after wait (func=%X)\n", (int)v, (int)c->func); fflush(stdout);
+ if(c->func)
+ c->ret= c->func(c->avctx, c->arg);
+ else
+ return 0;
+//printf("thread_func %X signal complete\n", (int)v); fflush(stdout);
+ release_sem(c->done_sem);
+ }
+
+ return B_OK;
+}
+
+/**
+ * free what has been allocated by avcodec_thread_init().
+ * must be called after decoding has finished, especially dont call while avcodec_thread_execute() is running
+ */
+void avcodec_thread_free(AVCodecContext *s){
+ ThreadContext *c= s->thread_opaque;
+ int i;
+ int32 ret;
+
+ for(i=0; i<s->thread_count; i++){
+
+ c[i].func= NULL;
+ release_sem(c[i].work_sem);
+ wait_for_thread(c[i].thread, &ret);
+ if(c[i].work_sem > B_OK) delete_sem(c[i].work_sem);
+ if(c[i].done_sem > B_OK) delete_sem(c[i].done_sem);
+ }
+
+ av_freep(&s->thread_opaque);
+}
+
+int avcodec_thread_execute(AVCodecContext *s, int (*func)(AVCodecContext *c2, void *arg2),void **arg, int *ret, int count){
+ ThreadContext *c= s->thread_opaque;
+ int i;
+
+ assert(s == c->avctx);
+ assert(count <= s->thread_count);
+
+ /* note, we can be certain that this is not called with the same AVCodecContext by different threads at the same time */
+
+ for(i=0; i<count; i++){
+ c[i].arg= arg[i];
+ c[i].func= func;
+ c[i].ret= 12345;
+
+ release_sem(c[i].work_sem);
+ }
+ for(i=0; i<count; i++){
+ acquire_sem(c[i].done_sem);
+
+ c[i].func= NULL;
+ if(ret) ret[i]= c[i].ret;
+ }
+ return 0;
+}
+
+int avcodec_thread_init(AVCodecContext *s, int thread_count){
+ int i;
+ ThreadContext *c;
+
+ s->thread_count= thread_count;
+
+ assert(!s->thread_opaque);
+ c= av_mallocz(sizeof(ThreadContext)*thread_count);
+ s->thread_opaque= c;
+
+ for(i=0; i<thread_count; i++){
+//printf("init semaphors %d\n", i); fflush(stdout);
+ c[i].avctx= s;
+
+ if((c[i].work_sem = create_sem(0, "ff work sem")) < B_OK)
+ goto fail;
+ if((c[i].done_sem = create_sem(0, "ff done sem")) < B_OK)
+ goto fail;
+
+//printf("create thread %d\n", i); fflush(stdout);
+ c[i].thread = spawn_thread(ff_thread_func, "libavcodec thread", B_LOW_PRIORITY, &c[i] );
+ if( c[i].thread < B_OK ) goto fail;
+ resume_thread(c[i].thread );
+ }
+//printf("init done\n"); fflush(stdout);
+
+ s->execute= avcodec_thread_execute;
+
+ return 0;
+fail:
+ avcodec_thread_free(s);
+ return -1;
+}
+
+/* provide a mean to serialize calls to avcodec_*() for thread safety. */
+
+int avcodec_thread_lock_lib(void)
+{
+ return lock_ben(&av_thread_lib_ben);
+}
+
+int avcodec_thread_unlock_lib(void)
+{
+ return unlock_ben(&av_thread_lib_ben);
+}
+
+/* our versions of _init and _fini (which are called by those actually from crt.o) */
+
+void initialize_after(void)
+{
+ av_thread_lib_ben.atom = 0;
+ av_thread_lib_ben.sem = create_sem(0, "libavcodec benaphore");
+}
+
+void uninitialize_before(void)
+{
+ delete_sem(av_thread_lib_ben.sem);
+}
+
+
+
diff --git a/contrib/ffmpeg/libavcodec/bfin/dsputil_bfin.c b/contrib/ffmpeg/libavcodec/bfin/dsputil_bfin.c
new file mode 100644
index 000000000..196ef6fea
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/bfin/dsputil_bfin.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2006 Michael Benjamin
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../avcodec.h"
+#include "../dsputil.h"
+
+static int sad8x8_bfin( void *c, uint8_t *blk1, uint8_t *blk2, int line_size, int h )
+{
+ int sum;
+ __asm__ __volatile__ (
+ "P0 = %1;" // blk1
+ "P1 = %2;" // blk2
+ "P2 = %3;\n" // h
+ "I0 = P0;"
+ "I1 = P1;\n"
+ "A0 = 0;"
+ "A1 = 0;\n"
+ "M0 = P2;\n"
+ "P3 = 32;\n"
+ "LSETUP (sad8x8LoopBegin, sad8x8LoopEnd) LC0=P3;\n"
+ "sad8x8LoopBegin:\n"
+ " DISALGNEXCPT || R0 = [I0] || R2 = [I1];\n"
+ " DISALGNEXCPT || R1 = [I0++] || R3 = [I1++];\n"
+ "sad8x8LoopEnd:\n"
+ " SAA ( R1:0 , R3:2 );\n"
+ "R3 = A1.L + A1.H, R2 = A0.L + A0.H;\n"
+ "%0 = R2 + R3 (S);\n"
+ : "=&d" (sum)
+ : "m"(blk1), "m"(blk2), "m"(h)
+ : "P0","P1","P2","I0","I1","A0","A1","R0","R1","R2","R3");
+ return sum;
+}
+
+void dsputil_init_bfin( DSPContext* c, AVCodecContext *avctx )
+{
+ c->pix_abs[1][0] = sad8x8_bfin;
+ c->sad[1] = sad8x8_bfin;
+}
diff --git a/src/libffmpeg/libavcodec/bitstream.c b/contrib/ffmpeg/libavcodec/bitstream.c
index 49c6ece1b..22d256df5 100755..100644
--- a/src/libffmpeg/libavcodec/bitstream.c
+++ b/contrib/ffmpeg/libavcodec/bitstream.c
@@ -3,18 +3,20 @@
* Copyright (c) 2000, 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* alternative bitstream reader & writer by Michael Niedermayer <michaelni@gmx.at>
@@ -47,47 +49,6 @@ void ff_put_string(PutBitContext * pbc, char *s, int put_zero)
put_bits(pbc, 8, 0);
}
-/* bit input functions */
-
-/**
- * reads 0-32 bits.
- */
-unsigned int get_bits_long(GetBitContext *s, int n){
- if(n<=17) return get_bits(s, n);
- else{
- int ret= get_bits(s, 16) << (n-16);
- return ret | get_bits(s, n-16);
- }
-}
-
-/**
- * shows 0-32 bits.
- */
-unsigned int show_bits_long(GetBitContext *s, int n){
- if(n<=17) return show_bits(s, n);
- else{
- GetBitContext gb= *s;
- int ret= get_bits_long(s, n);
- *s= gb;
- return ret;
- }
-}
-
-void align_get_bits(GetBitContext *s)
-{
- int n= (-get_bits_count(s)) & 7;
- if(n) skip_bits(s, n);
-}
-
-int check_marker(GetBitContext *s, const char *msg)
-{
- int bit= get_bits1(s);
- if(!bit)
- av_log(NULL, AV_LOG_INFO, "Marker bit missing %s\n", msg);
-
- return bit;
-}
-
/* VLC decoding */
//#define DEBUG_VLC
diff --git a/src/libffmpeg/libavcodec/bitstream.h b/contrib/ffmpeg/libavcodec/bitstream.h
index 10db64d33..af25b6dcf 100644
--- a/src/libffmpeg/libavcodec/bitstream.h
+++ b/contrib/ffmpeg/libavcodec/bitstream.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file bitstream.h
* bitstream api header.
@@ -6,17 +26,28 @@
#ifndef BITSTREAM_H
#define BITSTREAM_H
+#include "log.h"
+
+#if defined(ALT_BITSTREAM_READER_LE) && !defined(ALT_BITSTREAM_READER)
+#define ALT_BITSTREAM_READER
+#endif
+
//#define ALT_BITSTREAM_WRITER
//#define ALIGNED_BITSTREAM_WRITER
-
+#if !defined(LIBMPEG2_BITSTREAM_READER) && !defined(A32_BITSTREAM_READER) && !defined(ALT_BITSTREAM_READER)
+# ifdef ARCH_ARMV4L
+# define A32_BITSTREAM_READER
+# else
#define ALT_BITSTREAM_READER
//#define LIBMPEG2_BITSTREAM_READER
//#define A32_BITSTREAM_READER
+# endif
+#endif
#define LIBMPEG2_BITSTREAM_READER_HACK //add BERO
extern const uint8_t ff_reverse[256];
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
// avoid +32 for shift optimization (gcc should do that ...)
static inline int32_t NEG_SSR32( int32_t a, int8_t s){
asm ("sarl %1, %0\n\t"
@@ -140,7 +171,7 @@ typedef struct RL_VLC_ELEM {
#endif
/* used to avoid missaligned exceptions on some archs (alpha, ...) */
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
# define unaligned16(a) (*(const uint16_t*)(a))
# define unaligned32(a) (*(const uint32_t*)(a))
# define unaligned64(a) (*(const uint64_t*)(a))
@@ -169,7 +200,7 @@ unaligned(16)
unaligned(32)
unaligned(64)
#undef unaligned
-#endif //!ARCH_X86
+#endif /* defined(ARCH_X86) */
#ifndef ALT_BITSTREAM_WRITER
static inline void put_bits(PutBitContext *s, int n, unsigned int value)
@@ -216,7 +247,7 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
static inline void put_bits(PutBitContext *s, int n, unsigned int value)
{
# ifdef ALIGNED_BITSTREAM_WRITER
-# if defined(ARCH_X86) || defined(ARCH_X86_64)
+# if defined(ARCH_X86)
asm volatile(
"movl %0, %%ecx \n\t"
"xorl %%eax, %%eax \n\t"
@@ -247,7 +278,7 @@ static inline void put_bits(PutBitContext *s, int n, unsigned int value)
s->index= index;
# endif
# else //ALIGNED_BITSTREAM_WRITER
-# if defined(ARCH_X86) || defined(ARCH_X86_64)
+# if defined(ARCH_X86)
asm volatile(
"movl $7, %%ecx \n\t"
"andl %0, %%ecx \n\t"
@@ -429,13 +460,16 @@ static inline int unaligned32_le(const void *v)
# ifdef ALT_BITSTREAM_READER_LE
# define SHOW_UBITS(name, gb, num)\
((name##_cache) & (NEG_USR32(0xffffffff,num)))
+
+# define SHOW_SBITS(name, gb, num)\
+ NEG_SSR32((name##_cache)<<(32-(num)), num)
# else
# define SHOW_UBITS(name, gb, num)\
NEG_USR32(name##_cache, num)
-# endif
# define SHOW_SBITS(name, gb, num)\
NEG_SSR32(name##_cache, num)
+# endif
# define GET_CACHE(name, gb)\
((uint32_t)name##_cache)
@@ -443,6 +477,11 @@ static inline int unaligned32_le(const void *v)
static inline int get_bits_count(GetBitContext *s){
return s->index;
}
+
+static inline void skip_bits_long(GetBitContext *s, int n){
+ s->index += n;
+}
+
#elif defined LIBMPEG2_BITSTREAM_READER
//libmpeg2 like reader
@@ -506,6 +545,16 @@ static inline int get_bits_count(GetBitContext *s){
return (s->buffer_ptr - s->buffer)*8 - 16 + s->bit_count;
}
+static inline void skip_bits_long(GetBitContext *s, int n){
+ OPEN_READER(re, s)
+ re_bit_count += n;
+ re_buffer_ptr += 2*(re_bit_count>>4);
+ re_bit_count &= 15;
+ re_cache = ((re_buffer_ptr[-2]<<8) + re_buffer_ptr[-1]) << (16+re_bit_count);
+ UPDATE_CACHE(re, s)
+ CLOSE_READER(re, s)
+}
+
#elif defined A32_BITSTREAM_READER
# define MIN_CACHE_BITS 32
@@ -531,13 +580,13 @@ static inline int get_bits_count(GetBitContext *s){
name##_bit_count-= 32;\
}\
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
# define SKIP_CACHE(name, gb, num)\
asm(\
"shldl %2, %1, %0 \n\t"\
"shll %2, %1 \n\t"\
: "+r" (name##_cache0), "+r" (name##_cache1)\
- : "Ic" ((uint8_t)num)\
+ : "Ic" ((uint8_t)(num))\
);
#else
# define SKIP_CACHE(name, gb, num)\
@@ -571,6 +620,17 @@ static inline int get_bits_count(GetBitContext *s){
return ((uint8_t*)s->buffer_ptr - s->buffer)*8 - 32 + s->bit_count;
}
+static inline void skip_bits_long(GetBitContext *s, int n){
+ OPEN_READER(re, s)
+ re_bit_count += n;
+ re_buffer_ptr += re_bit_count>>5;
+ re_bit_count &= 31;
+ re_cache0 = be2me_32( re_buffer_ptr[-1] ) << re_bit_count;
+ re_cache1 = 0;
+ UPDATE_CACHE(re, s)
+ CLOSE_READER(re, s)
+}
+
#endif
/**
@@ -615,8 +675,6 @@ static inline unsigned int get_bits(GetBitContext *s, int n){
return tmp;
}
-unsigned int get_bits_long(GetBitContext *s, int n);
-
/**
* shows 0-17 bits.
* Note, the alt bitstream reader can read up to 25 bits, but the libmpeg2 reader can't
@@ -630,8 +688,6 @@ static inline unsigned int show_bits(GetBitContext *s, int n){
return tmp;
}
-unsigned int show_bits_long(GetBitContext *s, int n);
-
static inline void skip_bits(GetBitContext *s, int n){
//Note gcc seems to optimize this to s->index+=n for the ALT_READER :))
OPEN_READER(re, s)
@@ -669,6 +725,44 @@ static inline void skip_bits1(GetBitContext *s){
}
/**
+ * reads 0-32 bits.
+ */
+static inline unsigned int get_bits_long(GetBitContext *s, int n){
+ if(n<=17) return get_bits(s, n);
+ else{
+#ifdef ALT_BITSTREAM_READER_LE
+ int ret= get_bits(s, 16);
+ return ret | (get_bits(s, n-16) << 16);
+#else
+ int ret= get_bits(s, 16) << (n-16);
+ return ret | get_bits(s, n-16);
+#endif
+ }
+}
+
+/**
+ * shows 0-32 bits.
+ */
+static inline unsigned int show_bits_long(GetBitContext *s, int n){
+ if(n<=17) return show_bits(s, n);
+ else{
+ GetBitContext gb= *s;
+ int ret= get_bits_long(s, n);
+ *s= gb;
+ return ret;
+ }
+}
+
+static inline int check_marker(GetBitContext *s, const char *msg)
+{
+ int bit= get_bits1(s);
+ if(!bit)
+ av_log(NULL, AV_LOG_INFO, "Marker bit missing %s\n", msg);
+
+ return bit;
+}
+
+/**
* init GetBitContext.
* @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger then the actual read bits
* because some optimized bitstream readers read 32 or 64 bit at once and could read over the end
@@ -689,38 +783,22 @@ static inline void init_get_bits(GetBitContext *s,
#ifdef ALT_BITSTREAM_READER
s->index=0;
#elif defined LIBMPEG2_BITSTREAM_READER
-#ifdef LIBMPEG2_BITSTREAM_READER_HACK
- if ((int)buffer&1) {
- /* word alignment */
- s->cache = (*buffer++)<<24;
- s->buffer_ptr = buffer;
- s->bit_count = 16-8;
- } else
-#endif
- {
- s->buffer_ptr = buffer;
- s->bit_count = 16;
- s->cache = 0;
- }
+ s->buffer_ptr = (uint8_t*)((intptr_t)buffer&(~1));
+ s->bit_count = 16 + 8*((intptr_t)buffer&1);
+ skip_bits_long(s, 0);
#elif defined A32_BITSTREAM_READER
- s->buffer_ptr = (uint32_t*)buffer;
- s->bit_count = 32;
- s->cache0 = 0;
- s->cache1 = 0;
-#endif
- {
- OPEN_READER(re, s)
- UPDATE_CACHE(re, s)
- UPDATE_CACHE(re, s)
- CLOSE_READER(re, s)
- }
-#ifdef A32_BITSTREAM_READER
- s->cache1 = 0;
+ s->buffer_ptr = (uint32_t*)((intptr_t)buffer&(~3));
+ s->bit_count = 32 + 8*((intptr_t)buffer&3);
+ skip_bits_long(s, 0);
#endif
}
-int check_marker(GetBitContext *s, const char *msg);
-void align_get_bits(GetBitContext *s);
+static inline void align_get_bits(GetBitContext *s)
+{
+ int n= (-get_bits_count(s)) & 7;
+ if(n) skip_bits(s, n);
+}
+
int init_vlc(VLC *vlc, int nb_bits, int nb_codes,
const void *bits, int bits_wrap, int bits_size,
const void *codes, int codes_wrap, int codes_size,
@@ -816,7 +894,6 @@ static always_inline int get_vlc2(GetBitContext *s, VLC_TYPE (*table)[2],
//#define TRACE
#ifdef TRACE
-#include "avcodec.h"
static inline void print_bin(int bits, int n){
int i;
diff --git a/contrib/ffmpeg/libavcodec/bitstream_filter.c b/contrib/ffmpeg/libavcodec/bitstream_filter.c
new file mode 100644
index 000000000..b52acf60a
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/bitstream_filter.c
@@ -0,0 +1,284 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "mpegaudio.h"
+
+AVBitStreamFilter *first_bitstream_filter= NULL;
+
+void av_register_bitstream_filter(AVBitStreamFilter *bsf){
+ bsf->next = first_bitstream_filter;
+ first_bitstream_filter= bsf;
+}
+
+AVBitStreamFilterContext *av_bitstream_filter_init(const char *name){
+ AVBitStreamFilter *bsf= first_bitstream_filter;
+
+ while(bsf){
+ if(!strcmp(name, bsf->name)){
+ AVBitStreamFilterContext *bsfc= av_mallocz(sizeof(AVBitStreamFilterContext));
+ bsfc->filter= bsf;
+ bsfc->priv_data= av_mallocz(bsf->priv_data_size);
+ return bsfc;
+ }
+ bsf= bsf->next;
+ }
+ return NULL;
+}
+
+void av_bitstream_filter_close(AVBitStreamFilterContext *bsfc){
+ av_freep(&bsfc->priv_data);
+ av_parser_close(bsfc->parser);
+ av_free(bsfc);
+}
+
+int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe){
+ *poutbuf= (uint8_t *) buf;
+ *poutbuf_size= buf_size;
+ return bsfc->filter->filter(bsfc, avctx, args, poutbuf, poutbuf_size, buf, buf_size, keyframe);
+}
+
+static int dump_extradata(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe){
+ int cmd= args ? *args : 0;
+ /* cast to avoid warning about discarding qualifiers */
+ if(avctx->extradata){
+ if( (keyframe && (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER) && cmd=='a')
+ ||(keyframe && (cmd=='k' || !cmd))
+ ||(cmd=='e')
+ /*||(? && (s->flags & PARSER_FLAG_DUMP_EXTRADATA_AT_BEGIN)*/){
+ int size= buf_size + avctx->extradata_size;
+ *poutbuf_size= size;
+ *poutbuf= av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ memcpy(*poutbuf, avctx->extradata, avctx->extradata_size);
+ memcpy((*poutbuf) + avctx->extradata_size, buf, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int remove_extradata(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe){
+ int cmd= args ? *args : 0;
+ AVCodecParserContext *s;
+
+ if(!bsfc->parser){
+ bsfc->parser= av_parser_init(avctx->codec_id);
+ }
+ s= bsfc->parser;
+
+ if(s && s->parser->split){
+ if( (((avctx->flags & CODEC_FLAG_GLOBAL_HEADER) || (avctx->flags2 & CODEC_FLAG2_LOCAL_HEADER)) && cmd=='a')
+ ||(!keyframe && cmd=='k')
+ ||(cmd=='e' || !cmd)
+ ){
+ int i= s->parser->split(avctx, buf, buf_size);
+ buf += i;
+ buf_size -= i;
+ }
+ }
+ *poutbuf= (uint8_t *) buf;
+ *poutbuf_size= buf_size;
+
+ return 0;
+}
+
+static int noise(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe){
+ int amount= args ? atoi(args) : 10000;
+ unsigned int *state= bsfc->priv_data;
+ int i;
+
+ *poutbuf= av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ memcpy(*poutbuf, buf, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ for(i=0; i<buf_size; i++){
+ (*state) += (*poutbuf)[i] + 1;
+ if(*state % amount == 0)
+ (*poutbuf)[i] = *state;
+ }
+ return 1;
+}
+
+#define MP3_MASK 0xFFFE0CCF
+
+static int mp3_header_compress(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe){
+ uint32_t header, extraheader;
+ int mode_extension, header_size;
+
+ if(avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){
+ av_log(avctx, AV_LOG_ERROR, "not standards compliant\n");
+ return -1;
+ }
+
+ header = BE_32(buf);
+ mode_extension= (header>>4)&3;
+
+ if(ff_mpa_check_header(header) < 0 || (header&0x60000) != 0x20000){
+output_unchanged:
+ *poutbuf= (uint8_t *) buf;
+ *poutbuf_size= buf_size;
+
+ av_log(avctx, AV_LOG_INFO, "cannot compress %08X\n", header);
+ return 0;
+ }
+
+ if(avctx->extradata_size == 0){
+ avctx->extradata_size=15;
+ avctx->extradata= av_malloc(avctx->extradata_size);
+ strcpy(avctx->extradata, "FFCMP3 0.0");
+ memcpy(avctx->extradata+11, buf, 4);
+ }
+ if(avctx->extradata_size != 15){
+ av_log(avctx, AV_LOG_ERROR, "Extradata invalid\n");
+ return -1;
+ }
+ extraheader = BE_32(avctx->extradata+11);
+ if((extraheader&MP3_MASK) != (header&MP3_MASK))
+ goto output_unchanged;
+
+ header_size= (header&0x10000) ? 4 : 6;
+
+ *poutbuf_size= buf_size - header_size;
+ *poutbuf= av_malloc(buf_size - header_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ memcpy(*poutbuf, buf + header_size, buf_size - header_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if(avctx->channels==2){
+ if((header & (3<<19)) != 3<<19){
+ (*poutbuf)[1] &= 0x3F;
+ (*poutbuf)[1] |= mode_extension<<6;
+ FFSWAP(int, (*poutbuf)[1], (*poutbuf)[2]);
+ }else{
+ (*poutbuf)[1] &= 0x8F;
+ (*poutbuf)[1] |= mode_extension<<4;
+ }
+ }
+
+ return 1;
+}
+
+static int mp3_header_decompress(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe){
+ uint32_t header;
+ int sample_rate= avctx->sample_rate;
+ int sample_rate_index=0;
+ int lsf, mpeg25, bitrate_index, frame_size;
+
+ header = BE_32(buf);
+ if(ff_mpa_check_header(header) >= 0){
+ *poutbuf= (uint8_t *) buf;
+ *poutbuf_size= buf_size;
+
+ return 0;
+ }
+
+ if(avctx->extradata_size != 15 || strcmp(avctx->extradata, "FFCMP3 0.0")){
+ av_log(avctx, AV_LOG_ERROR, "Extradata invalid %d\n", avctx->extradata_size);
+ return -1;
+ }
+
+ header= BE_32(avctx->extradata+11) & MP3_MASK;
+
+ lsf = sample_rate < (24000+32000)/2;
+ mpeg25 = sample_rate < (12000+16000)/2;
+ sample_rate_index= (header>>10)&3;
+ sample_rate= mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25); //in case sample rate is a little off
+
+ for(bitrate_index=2; bitrate_index<30; bitrate_index++){
+ frame_size = mpa_bitrate_tab[lsf][2][bitrate_index>>1];
+ frame_size = (frame_size * 144000) / (sample_rate << lsf) + (bitrate_index&1);
+ if(frame_size == buf_size + 4)
+ break;
+ if(frame_size == buf_size + 6)
+ break;
+ }
+ if(bitrate_index == 30){
+ av_log(avctx, AV_LOG_ERROR, "couldnt find bitrate_index\n");
+ return -1;
+ }
+
+ header |= (bitrate_index&1)<<9;
+ header |= (bitrate_index>>1)<<12;
+ header |= (frame_size == buf_size + 4)<<16; //FIXME actually set a correct crc instead of 0
+
+ *poutbuf_size= frame_size;
+ *poutbuf= av_malloc(frame_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ memcpy(*poutbuf + frame_size - buf_size, buf, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if(avctx->channels==2){
+ uint8_t *p= *poutbuf + frame_size - buf_size;
+ if(lsf){
+ FFSWAP(int, p[1], p[2]);
+ header |= (p[1] & 0xC0)>>2;
+ p[1] &= 0x3F;
+ }else{
+ header |= p[1] & 0x30;
+ p[1] &= 0xCF;
+ }
+ }
+
+ (*poutbuf)[0]= header>>24;
+ (*poutbuf)[1]= header>>16;
+ (*poutbuf)[2]= header>> 8;
+ (*poutbuf)[3]= header ;
+
+ return 1;
+}
+
+AVBitStreamFilter dump_extradata_bsf={
+ "dump_extra",
+ 0,
+ dump_extradata,
+};
+
+AVBitStreamFilter remove_extradata_bsf={
+ "remove_extra",
+ 0,
+ remove_extradata,
+};
+
+AVBitStreamFilter noise_bsf={
+ "noise",
+ sizeof(int),
+ noise,
+};
+
+AVBitStreamFilter mp3_header_compress_bsf={
+ "mp3comp",
+ 0,
+ mp3_header_compress,
+};
+
+AVBitStreamFilter mp3_header_decompress_bsf={
+ "mp3decomp",
+ 0,
+ mp3_header_decompress,
+};
diff --git a/contrib/ffmpeg/libavcodec/bmp.c b/contrib/ffmpeg/libavcodec/bmp.c
new file mode 100644
index 000000000..2a4d83393
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/bmp.c
@@ -0,0 +1,254 @@
+/*
+ * BMP image format
+ * Copyright (c) 2005 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "bitstream.h"
+#include "bswap.h"
+
+typedef struct BMPContext {
+ AVFrame picture;
+} BMPContext;
+
+#define BMP_RGB 0
+#define BMP_RLE8 1
+#define BMP_RLE4 2
+#define BMP_BITFIELDS 3
+
+#define read16(bits) bswap_16(get_bits(bits, 16))
+#define read32(bits) bswap_32(get_bits_long(bits, 32))
+
+static int bmp_decode_init(AVCodecContext *avctx){
+ BMPContext *s = avctx->priv_data;
+
+ avcodec_get_frame_defaults((AVFrame*)&s->picture);
+ avctx->coded_frame = (AVFrame*)&s->picture;
+
+ return 0;
+}
+
+static int bmp_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ BMPContext *s = avctx->priv_data;
+ AVFrame *picture = data;
+ AVFrame *p = &s->picture;
+ GetBitContext bits;
+ unsigned int fsize, hsize;
+ int width, height;
+ unsigned int depth;
+ unsigned int comp;
+ unsigned int ihsize;
+ int i, j, n, linesize;
+ uint32_t rgb[3];
+ uint8_t *ptr;
+ int dsize;
+
+ if(buf_size < 14){
+ av_log(avctx, AV_LOG_ERROR, "buf size too small (%d)\n", buf_size);
+ return -1;
+ }
+
+ init_get_bits(&bits, buf, buf_size);
+
+ if(get_bits(&bits, 16) != 0x424d){ /* 'BM' */
+ av_log(avctx, AV_LOG_ERROR, "bad magic number\n");
+ return -1;
+ }
+
+ fsize = read32(&bits);
+ if(buf_size < fsize){
+ av_log(avctx, AV_LOG_ERROR, "not enough data (%d < %d)\n",
+ buf_size, fsize);
+ return -1;
+ }
+
+ skip_bits(&bits, 16); /* reserved1 */
+ skip_bits(&bits, 16); /* reserved2 */
+
+ hsize = read32(&bits); /* header size */
+ if(fsize <= hsize){
+ av_log(avctx, AV_LOG_ERROR, "not enough data (%d < %d)\n",
+ fsize, hsize);
+ return -1;
+ }
+
+ ihsize = read32(&bits); /* more header size */
+ if(ihsize + 14 > hsize){
+ av_log(avctx, AV_LOG_ERROR, "invalid header size %d\n", hsize);
+ return -1;
+ }
+
+ width = read32(&bits);
+ height = read32(&bits);
+
+ if(read16(&bits) != 1){ /* planes */
+ av_log(avctx, AV_LOG_ERROR, "invalid BMP header\n");
+ return -1;
+ }
+
+ depth = read16(&bits);
+
+ if(ihsize > 16)
+ comp = read32(&bits);
+ else
+ comp = BMP_RGB;
+
+ if(comp != BMP_RGB && comp != BMP_BITFIELDS){
+ av_log(avctx, AV_LOG_ERROR, "BMP coding %d not supported\n", comp);
+ return -1;
+ }
+
+ if(comp == BMP_BITFIELDS){
+ skip_bits(&bits, 20 * 8);
+ rgb[0] = read32(&bits);
+ rgb[1] = read32(&bits);
+ rgb[2] = read32(&bits);
+ }
+
+ avctx->codec_id = CODEC_ID_BMP;
+ avctx->width = width;
+ avctx->height = height > 0? height: -height;
+
+ avctx->pix_fmt = PIX_FMT_NONE;
+
+ switch(depth){
+ case 32:
+ if(comp == BMP_BITFIELDS){
+ rgb[0] = (rgb[0] >> 15) & 3;
+ rgb[1] = (rgb[1] >> 15) & 3;
+ rgb[2] = (rgb[2] >> 15) & 3;
+
+ if(rgb[0] + rgb[1] + rgb[2] != 3 ||
+ rgb[0] == rgb[1] || rgb[0] == rgb[2] || rgb[1] == rgb[2]){
+ break;
+ }
+ } else {
+ rgb[0] = 2;
+ rgb[1] = 1;
+ rgb[2] = 0;
+ }
+
+ avctx->pix_fmt = PIX_FMT_BGR24;
+ break;
+ case 24:
+ avctx->pix_fmt = PIX_FMT_BGR24;
+ break;
+ case 16:
+ if(comp == BMP_RGB)
+ avctx->pix_fmt = PIX_FMT_RGB555;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "depth %d not supported\n", depth);
+ return -1;
+ }
+
+ if(avctx->pix_fmt == PIX_FMT_NONE){
+ av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
+ return -1;
+ }
+
+ p->reference = 0;
+ if(avctx->get_buffer(avctx, p) < 0){
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+ p->pict_type = FF_I_TYPE;
+ p->key_frame = 1;
+
+ buf += hsize;
+ dsize = buf_size - hsize;
+
+ /* Line size in file multiple of 4 */
+ n = (avctx->width * (depth / 8) + 3) & ~3;
+
+ if(n * avctx->height > dsize){
+ av_log(avctx, AV_LOG_ERROR, "not enough data (%d < %d)\n",
+ dsize, n * avctx->height);
+ return -1;
+ }
+
+ if(height > 0){
+ ptr = p->data[0] + (avctx->height - 1) * p->linesize[0];
+ linesize = -p->linesize[0];
+ } else {
+ ptr = p->data[0];
+ linesize = p->linesize[0];
+ }
+
+ switch(depth){
+ case 24:
+ for(i = 0; i < avctx->height; i++){
+ memcpy(ptr, buf, n);
+ buf += n;
+ ptr += linesize;
+ }
+ break;
+ case 16:
+ for(i = 0; i < avctx->height; i++){
+ uint16_t *src = (uint16_t *) buf;
+ uint16_t *dst = (uint16_t *) ptr;
+
+ for(j = 0; j < avctx->width; j++)
+ *dst++ = le2me_16(*src++);
+
+ buf += n;
+ ptr += linesize;
+ }
+ break;
+ case 32:
+ for(i = 0; i < avctx->height; i++){
+ uint8_t *src = buf;
+ uint8_t *dst = ptr;
+
+ for(j = 0; j < avctx->width; j++){
+ dst[0] = src[rgb[2]];
+ dst[1] = src[rgb[1]];
+ dst[2] = src[rgb[0]];
+ dst += 3;
+ src += 4;
+ }
+
+ buf += n;
+ ptr += linesize;
+ }
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "BMP decoder is broken\n");
+ return -1;
+ }
+
+ *picture = s->picture;
+ *data_size = sizeof(AVPicture);
+
+ return buf_size;
+}
+
+AVCodec bmp_decoder = {
+ "bmp",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_BMP,
+ sizeof(BMPContext),
+ bmp_decode_init,
+ NULL,
+ NULL,
+ bmp_decode_frame
+};
diff --git a/contrib/ffmpeg/libavcodec/bytestream.h b/contrib/ffmpeg/libavcodec/bytestream.h
new file mode 100644
index 000000000..25c457fe4
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/bytestream.h
@@ -0,0 +1,89 @@
+/*
+ * Bytestream functions
+ * copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FFMPEG_BYTESTREAM_H
+#define FFMPEG_BYTESTREAM_H
+
+static always_inline unsigned int bytestream_get_le32(uint8_t **b)
+{
+ (*b) += 4;
+ return LE_32(*b - 4);
+}
+
+static always_inline unsigned int bytestream_get_le16(uint8_t **b)
+{
+ (*b) += 2;
+ return LE_16(*b - 2);
+}
+
+static always_inline unsigned int bytestream_get_byte(uint8_t **b)
+{
+ (*b)++;
+ return (*b)[-1];
+}
+
+static always_inline unsigned int bytestream_get_buffer(uint8_t **b, uint8_t *dst, unsigned int size)
+{
+ memcpy(dst, *b, size);
+ (*b) += size;
+ return size;
+}
+
+static always_inline void bytestream_put_be32(uint8_t **b, const unsigned int value)
+{
+ *(*b)++ = value >> 24;
+ *(*b)++ = value >> 16;
+ *(*b)++ = value >> 8;
+ *(*b)++ = value;
+};
+
+static always_inline void bytestream_put_be16(uint8_t **b, const unsigned int value)
+{
+ *(*b)++ = value >> 8;
+ *(*b)++ = value;
+}
+
+static always_inline void bytestream_put_le32(uint8_t **b, const unsigned int value)
+{
+ *(*b)++ = value;
+ *(*b)++ = value >> 8;
+ *(*b)++ = value >> 16;
+ *(*b)++ = value >> 24;
+}
+
+static always_inline void bytestream_put_le16(uint8_t **b, const unsigned int value)
+{
+ *(*b)++ = value;
+ *(*b)++ = value >> 8;
+}
+
+static always_inline void bytestream_put_byte(uint8_t **b, const unsigned int value)
+{
+ *(*b)++ = value;
+}
+
+static always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
+{
+ memcpy(*b, src, size);
+ (*b) += size;
+}
+
+#endif /* FFMPEG_BYTESTREAM_H */
diff --git a/src/libffmpeg/libavcodec/cabac.c b/contrib/ffmpeg/libavcodec/cabac.c
index 88790a960..c6da6292a 100644
--- a/src/libffmpeg/libavcodec/cabac.c
+++ b/contrib/ffmpeg/libavcodec/cabac.c
@@ -2,18 +2,20 @@
* H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -29,7 +31,7 @@
#include "bitstream.h"
#include "cabac.h"
-const uint8_t ff_h264_lps_range[64][4]= {
+static const uint8_t lps_range[64][4]= {
{128,176,208,240}, {128,167,197,227}, {128,158,187,216}, {123,150,178,205},
{116,142,169,195}, {111,135,160,185}, {105,128,152,175}, {100,122,144,166},
{ 95,116,137,158}, { 90,110,130,150}, { 85,104,123,142}, { 81, 99,117,135},
@@ -48,7 +50,12 @@ const uint8_t ff_h264_lps_range[64][4]= {
{ 6, 8, 9, 11}, { 6, 7, 9, 10}, { 6, 7, 8, 9}, { 2, 2, 2, 2},
};
-const uint8_t ff_h264_mps_state[64]= {
+uint8_t ff_h264_mlps_state[4*64];
+uint8_t ff_h264_lps_range[4*2*64];
+uint8_t ff_h264_lps_state[2*64];
+uint8_t ff_h264_mps_state[2*64];
+
+static const uint8_t mps_state[64]= {
1, 2, 3, 4, 5, 6, 7, 8,
9,10,11,12,13,14,15,16,
17,18,19,20,21,22,23,24,
@@ -59,7 +66,7 @@ const uint8_t ff_h264_mps_state[64]= {
57,58,59,60,61,62,62,63,
};
-const uint8_t ff_h264_lps_state[64]= {
+static const uint8_t lps_state[64]= {
0, 0, 1, 2, 2, 4, 4, 5,
6, 7, 8, 9, 9,11,11,12,
13,13,15,15,16,16,18,18,
@@ -69,25 +76,40 @@ const uint8_t ff_h264_lps_state[64]= {
33,33,34,34,35,35,35,36,
36,36,37,37,37,38,38,63,
};
-
-const uint8_t ff_h264_norm_shift[256]= {
- 8,7,6,6,5,5,5,5,4,4,4,4,4,4,4,4,
- 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
- 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+#if 0
+const uint8_t ff_h264_norm_shift_old[128]= {
+ 7,6,5,5,4,4,4,4,3,3,3,3,3,3,3,3,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
- 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
- 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
};
+#endif
+const uint8_t ff_h264_norm_shift[512]= {
+ 9,8,7,7,6,6,6,6,5,5,5,5,5,5,5,5,
+ 4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+};
/**
*
@@ -122,28 +144,37 @@ void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size){
c->low = (*c->bytestream++)<<10;
#endif
c->low+= ((*c->bytestream++)<<2) + 2;
- c->range= 0x1FE<<(CABAC_BITS + 1);
+ c->range= 0x1FE;
}
-void ff_init_cabac_states(CABACContext *c, uint8_t const (*lps_range)[4],
- uint8_t const *mps_state, uint8_t const *lps_state, int state_count){
+void ff_init_cabac_states(CABACContext *c){
int i, j;
- for(i=0; i<state_count; i++){
+ for(i=0; i<64; i++){
for(j=0; j<4; j++){ //FIXME check if this is worth the 1 shift we save
- c->lps_range[2*i+0][j+4]=
- c->lps_range[2*i+1][j+4]= lps_range[i][j];
+ ff_h264_lps_range[j*2*64+2*i+0]=
+ ff_h264_lps_range[j*2*64+2*i+1]= lps_range[i][j];
}
- c->mps_state[2*i+0]= 2*mps_state[i];
- c->mps_state[2*i+1]= 2*mps_state[i]+1;
+ ff_h264_mlps_state[128+2*i+0]=
+ ff_h264_mps_state[2*i+0]= 2*mps_state[i]+0;
+ ff_h264_mlps_state[128+2*i+1]=
+ ff_h264_mps_state[2*i+1]= 2*mps_state[i]+1;
if( i ){
- c->lps_state[2*i+0]= 2*lps_state[i];
- c->lps_state[2*i+1]= 2*lps_state[i]+1;
+#ifdef BRANCHLESS_CABAC_DECODER
+ ff_h264_mlps_state[128-2*i-1]= 2*lps_state[i]+0;
+ ff_h264_mlps_state[128-2*i-2]= 2*lps_state[i]+1;
}else{
- c->lps_state[2*i+0]= 1;
- c->lps_state[2*i+1]= 0;
+ ff_h264_mlps_state[128-2*i-1]= 1;
+ ff_h264_mlps_state[128-2*i-2]= 0;
+#else
+ ff_h264_lps_state[2*i+0]= 2*lps_state[i]+0;
+ ff_h264_lps_state[2*i+1]= 2*lps_state[i]+1;
+ }else{
+ ff_h264_lps_state[2*i+0]= 1;
+ ff_h264_lps_state[2*i+1]= 0;
+#endif
}
}
}
diff --git a/contrib/ffmpeg/libavcodec/cabac.h b/contrib/ffmpeg/libavcodec/cabac.h
new file mode 100644
index 000000000..43fe78e3b
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/cabac.h
@@ -0,0 +1,859 @@
+/*
+ * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+/**
+ * @file cabac.h
+ * Context Adaptive Binary Arithmetic Coder.
+ */
+
+
+//#undef NDEBUG
+#include <assert.h>
+#ifdef ARCH_X86
+#include "x86_cpu.h"
+#endif
+
+#define CABAC_BITS 16
+#define CABAC_MASK ((1<<CABAC_BITS)-1)
+#define BRANCHLESS_CABAC_DECODER 1
+//#define ARCH_X86_DISABLED 1
+
+typedef struct CABACContext{
+ int low;
+ int range;
+ int outstanding_count;
+#ifdef STRICT_LIMITS
+ int symCount;
+#endif
+ const uint8_t *bytestream_start;
+ const uint8_t *bytestream;
+ const uint8_t *bytestream_end;
+ PutBitContext pb;
+}CABACContext;
+
+extern uint8_t ff_h264_mlps_state[4*64];
+extern uint8_t ff_h264_lps_range[4*2*64]; ///< rangeTabLPS
+extern uint8_t ff_h264_mps_state[2*64]; ///< transIdxMPS
+extern uint8_t ff_h264_lps_state[2*64]; ///< transIdxLPS
+extern const uint8_t ff_h264_norm_shift[512];
+
+
+void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size);
+void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size);
+void ff_init_cabac_states(CABACContext *c);
+
+
+static inline void put_cabac_bit(CABACContext *c, int b){
+ put_bits(&c->pb, 1, b);
+ for(;c->outstanding_count; c->outstanding_count--){
+ put_bits(&c->pb, 1, 1-b);
+ }
+}
+
+static inline void renorm_cabac_encoder(CABACContext *c){
+ while(c->range < 0x100){
+ //FIXME optimize
+ if(c->low<0x100){
+ put_cabac_bit(c, 0);
+ }else if(c->low<0x200){
+ c->outstanding_count++;
+ c->low -= 0x100;
+ }else{
+ put_cabac_bit(c, 1);
+ c->low -= 0x200;
+ }
+
+ c->range+= c->range;
+ c->low += c->low;
+ }
+}
+
+static void put_cabac(CABACContext *c, uint8_t * const state, int bit){
+ int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + *state];
+
+ if(bit == ((*state)&1)){
+ c->range -= RangeLPS;
+ *state= ff_h264_mps_state[*state];
+ }else{
+ c->low += c->range - RangeLPS;
+ c->range = RangeLPS;
+ *state= ff_h264_lps_state[*state];
+ }
+
+ renorm_cabac_encoder(c);
+
+#ifdef STRICT_LIMITS
+ c->symCount++;
+#endif
+}
+
+static void put_cabac_static(CABACContext *c, int RangeLPS, int bit){
+ assert(c->range > RangeLPS);
+
+ if(!bit){
+ c->range -= RangeLPS;
+ }else{
+ c->low += c->range - RangeLPS;
+ c->range = RangeLPS;
+ }
+
+ renorm_cabac_encoder(c);
+
+#ifdef STRICT_LIMITS
+ c->symCount++;
+#endif
+}
+
+/**
+ * @param bit 0 -> write zero bit, !=0 write one bit
+ */
+static void put_cabac_bypass(CABACContext *c, int bit){
+ c->low += c->low;
+
+ if(bit){
+ c->low += c->range;
+ }
+//FIXME optimize
+ if(c->low<0x200){
+ put_cabac_bit(c, 0);
+ }else if(c->low<0x400){
+ c->outstanding_count++;
+ c->low -= 0x200;
+ }else{
+ put_cabac_bit(c, 1);
+ c->low -= 0x400;
+ }
+
+#ifdef STRICT_LIMITS
+ c->symCount++;
+#endif
+}
+
+/**
+ *
+ * @return the number of bytes written
+ */
+static int put_cabac_terminate(CABACContext *c, int bit){
+ c->range -= 2;
+
+ if(!bit){
+ renorm_cabac_encoder(c);
+ }else{
+ c->low += c->range;
+ c->range= 2;
+
+ renorm_cabac_encoder(c);
+
+ assert(c->low <= 0x1FF);
+ put_cabac_bit(c, c->low>>9);
+ put_bits(&c->pb, 2, ((c->low>>7)&3)|1);
+
+ flush_put_bits(&c->pb); //FIXME FIXME FIXME XXX wrong
+ }
+
+#ifdef STRICT_LIMITS
+ c->symCount++;
+#endif
+
+ return (put_bits_count(&c->pb)+7)>>3;
+}
+
+/**
+ * put (truncated) unary binarization.
+ */
+static void put_cabac_u(CABACContext *c, uint8_t * state, int v, int max, int max_index, int truncated){
+ int i;
+
+ assert(v <= max);
+
+#if 1
+ for(i=0; i<v; i++){
+ put_cabac(c, state, 1);
+ if(i < max_index) state++;
+ }
+ if(truncated==0 || v<max)
+ put_cabac(c, state, 0);
+#else
+ if(v <= max_index){
+ for(i=0; i<v; i++){
+ put_cabac(c, state+i, 1);
+ }
+ if(truncated==0 || v<max)
+ put_cabac(c, state+i, 0);
+ }else{
+ for(i=0; i<=max_index; i++){
+ put_cabac(c, state+i, 1);
+ }
+ for(; i<v; i++){
+ put_cabac(c, state+max_index, 1);
+ }
+ if(truncated==0 || v<max)
+ put_cabac(c, state+max_index, 0);
+ }
+#endif
+}
+
+/**
+ * put unary exp golomb k-th order binarization.
+ */
+static void put_cabac_ueg(CABACContext *c, uint8_t * state, int v, int max, int is_signed, int k, int max_index){
+ int i;
+
+ if(v==0)
+ put_cabac(c, state, 0);
+ else{
+ const int sign= v < 0;
+
+ if(is_signed) v= FFABS(v);
+
+ if(v<max){
+ for(i=0; i<v; i++){
+ put_cabac(c, state, 1);
+ if(i < max_index) state++;
+ }
+
+ put_cabac(c, state, 0);
+ }else{
+ int m= 1<<k;
+
+ for(i=0; i<max; i++){
+ put_cabac(c, state, 1);
+ if(i < max_index) state++;
+ }
+
+ v -= max;
+ while(v >= m){ //FIXME optimize
+ put_cabac_bypass(c, 1);
+ v-= m;
+ m+= m;
+ }
+ put_cabac_bypass(c, 0);
+ while(m>>=1){
+ put_cabac_bypass(c, v&m);
+ }
+ }
+
+ if(is_signed)
+ put_cabac_bypass(c, sign);
+ }
+}
+
+static void refill(CABACContext *c){
+#if CABAC_BITS == 16
+ c->low+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1);
+#else
+ c->low+= c->bytestream[0]<<1;
+#endif
+ c->low -= CABAC_MASK;
+ c->bytestream+= CABAC_BITS/8;
+}
+
+static void refill2(CABACContext *c){
+ int i, x;
+
+ x= c->low ^ (c->low-1);
+ i= 7 - ff_h264_norm_shift[x>>(CABAC_BITS-1)];
+
+ x= -CABAC_MASK;
+
+#if CABAC_BITS == 16
+ x+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1);
+#else
+ x+= c->bytestream[0]<<1;
+#endif
+
+ c->low += x<<i;
+ c->bytestream+= CABAC_BITS/8;
+}
+
+static inline void renorm_cabac_decoder(CABACContext *c){
+ while(c->range < 0x100){
+ c->range+= c->range;
+ c->low+= c->low;
+ if(!(c->low & CABAC_MASK))
+ refill(c);
+ }
+}
+
+static inline void renorm_cabac_decoder_once(CABACContext *c){
+#ifdef ARCH_X86_DISABLED
+ int temp;
+#if 0
+ //P3:683 athlon:475
+ asm(
+ "lea -0x100(%0), %2 \n\t"
+ "shr $31, %2 \n\t" //FIXME 31->63 for x86-64
+ "shl %%cl, %0 \n\t"
+ "shl %%cl, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+c"(temp)
+ );
+#elif 0
+ //P3:680 athlon:474
+ asm(
+ "cmp $0x100, %0 \n\t"
+ "setb %%cl \n\t" //FIXME 31->63 for x86-64
+ "shl %%cl, %0 \n\t"
+ "shl %%cl, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+c"(temp)
+ );
+#elif 1
+ int temp2;
+ //P3:665 athlon:517
+ asm(
+ "lea -0x100(%0), %%eax \n\t"
+ "cdq \n\t"
+ "mov %0, %%eax \n\t"
+ "and %%edx, %0 \n\t"
+ "and %1, %%edx \n\t"
+ "add %%eax, %0 \n\t"
+ "add %%edx, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2)
+ );
+#elif 0
+ int temp2;
+ //P3:673 athlon:509
+ asm(
+ "cmp $0x100, %0 \n\t"
+ "sbb %%edx, %%edx \n\t"
+ "mov %0, %%eax \n\t"
+ "and %%edx, %0 \n\t"
+ "and %1, %%edx \n\t"
+ "add %%eax, %0 \n\t"
+ "add %%edx, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2)
+ );
+#else
+ int temp2;
+ //P3:677 athlon:511
+ asm(
+ "cmp $0x100, %0 \n\t"
+ "lea (%0, %0), %%eax \n\t"
+ "lea (%1, %1), %%edx \n\t"
+ "cmovb %%eax, %0 \n\t"
+ "cmovb %%edx, %1 \n\t"
+ : "+r"(c->range), "+r"(c->low), "+a"(temp), "+d"(temp2)
+ );
+#endif
+#else
+ //P3:675 athlon:476
+ int shift= (uint32_t)(c->range - 0x100)>>31;
+ c->range<<= shift;
+ c->low <<= shift;
+#endif
+ if(!(c->low & CABAC_MASK))
+ refill(c);
+}
+
+static int always_inline get_cabac_inline(CABACContext *c, uint8_t * const state){
+ //FIXME gcc generates duplicate load/stores for c->low and c->range
+#define LOW "0"
+#define RANGE "4"
+#ifdef ARCH_X86_64
+#define BYTESTART "16"
+#define BYTE "24"
+#define BYTEEND "32"
+#else
+#define BYTESTART "12"
+#define BYTE "16"
+#define BYTEEND "20"
+#endif
+#if defined(ARCH_X86) && !(defined(PIC) && defined(__GNUC__))
+ int bit;
+
+#ifndef BRANCHLESS_CABAC_DECODER
+ asm volatile(
+ "movzbl (%1), %0 \n\t"
+ "movl "RANGE "(%2), %%ebx \n\t"
+ "movl "RANGE "(%2), %%edx \n\t"
+ "andl $0xC0, %%ebx \n\t"
+ "movzbl "MANGLE(ff_h264_lps_range)"(%0, %%ebx, 2), %%esi\n\t"
+ "movl "LOW "(%2), %%ebx \n\t"
+//eax:state ebx:low, edx:range, esi:RangeLPS
+ "subl %%esi, %%edx \n\t"
+ "movl %%edx, %%ecx \n\t"
+ "shll $17, %%ecx \n\t"
+ "cmpl %%ecx, %%ebx \n\t"
+ " ja 1f \n\t"
+
+#if 1
+ //athlon:4067 P3:4110
+ "lea -0x100(%%edx), %%ecx \n\t"
+ "shr $31, %%ecx \n\t"
+ "shl %%cl, %%edx \n\t"
+ "shl %%cl, %%ebx \n\t"
+#else
+ //athlon:4057 P3:4130
+ "cmp $0x100, %%edx \n\t" //FIXME avoidable
+ "setb %%cl \n\t"
+ "shl %%cl, %%edx \n\t"
+ "shl %%cl, %%ebx \n\t"
+#endif
+ "movzbl "MANGLE(ff_h264_mps_state)"(%0), %%ecx \n\t"
+ "movb %%cl, (%1) \n\t"
+//eax:state ebx:low, edx:range, esi:RangeLPS
+ "test %%bx, %%bx \n\t"
+ " jnz 2f \n\t"
+ "mov "BYTE "(%2), %%"REG_S" \n\t"
+ "subl $0xFFFF, %%ebx \n\t"
+ "movzwl (%%"REG_S"), %%ecx \n\t"
+ "bswap %%ecx \n\t"
+ "shrl $15, %%ecx \n\t"
+ "add $2, %%"REG_S" \n\t"
+ "addl %%ecx, %%ebx \n\t"
+ "mov %%"REG_S", "BYTE "(%2) \n\t"
+ "jmp 2f \n\t"
+ "1: \n\t"
+//eax:state ebx:low, edx:range, esi:RangeLPS
+ "subl %%ecx, %%ebx \n\t"
+ "movl %%esi, %%edx \n\t"
+ "movzbl " MANGLE(ff_h264_norm_shift) "(%%esi), %%ecx \n\t"
+ "shll %%cl, %%ebx \n\t"
+ "shll %%cl, %%edx \n\t"
+ "movzbl "MANGLE(ff_h264_lps_state)"(%0), %%ecx \n\t"
+ "movb %%cl, (%1) \n\t"
+ "add $1, %0 \n\t"
+ "test %%bx, %%bx \n\t"
+ " jnz 2f \n\t"
+
+ "mov "BYTE "(%2), %%"REG_c" \n\t"
+ "movzwl (%%"REG_c"), %%esi \n\t"
+ "bswap %%esi \n\t"
+ "shrl $15, %%esi \n\t"
+ "subl $0xFFFF, %%esi \n\t"
+ "add $2, %%"REG_c" \n\t"
+ "mov %%"REG_c", "BYTE "(%2) \n\t"
+
+ "leal -1(%%ebx), %%ecx \n\t"
+ "xorl %%ebx, %%ecx \n\t"
+ "shrl $15, %%ecx \n\t"
+ "movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t"
+ "neg %%ecx \n\t"
+ "add $7, %%ecx \n\t"
+
+ "shll %%cl , %%esi \n\t"
+ "addl %%esi, %%ebx \n\t"
+ "2: \n\t"
+ "movl %%edx, "RANGE "(%2) \n\t"
+ "movl %%ebx, "LOW "(%2) \n\t"
+ :"=&a"(bit) //FIXME this is fragile gcc either runs out of registers or misscompiles it (for example if "+a"(bit) or "+m"(*state) is used
+ :"r"(state), "r"(c)
+ : "%"REG_c, "%ebx", "%edx", "%"REG_S, "memory"
+ );
+ bit&=1;
+#else /* BRANCHLESS_CABAC_DECODER */
+
+
+#if defined CMOV_IS_FAST
+#define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
+ "mov "tmp" , %%ecx \n\t"\
+ "shl $17 , "tmp" \n\t"\
+ "cmp "low" , "tmp" \n\t"\
+ "cmova %%ecx , "range" \n\t"\
+ "sbb %%ecx , %%ecx \n\t"\
+ "and %%ecx , "tmp" \n\t"\
+ "sub "tmp" , "low" \n\t"\
+ "xor %%ecx , "ret" \n\t"
+#else /* CMOV_IS_FAST */
+#define BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
+ "mov "tmp" , %%ecx \n\t"\
+ "shl $17 , "tmp" \n\t"\
+ "sub "low" , "tmp" \n\t"\
+ "sar $31 , "tmp" \n\t" /*lps_mask*/\
+ "sub %%ecx , "range" \n\t" /*RangeLPS - range*/\
+ "and "tmp" , "range" \n\t" /*(RangeLPS - range)&lps_mask*/\
+ "add %%ecx , "range" \n\t" /*new range*/\
+ "shl $17 , %%ecx \n\t"\
+ "and "tmp" , %%ecx \n\t"\
+ "sub %%ecx , "low" \n\t"\
+ "xor "tmp" , "ret" \n\t"
+#endif /* CMOV_IS_FAST */
+
+
+#define BRANCHLESS_GET_CABAC(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
+ "movzbl "statep" , "ret" \n\t"\
+ "mov "range" , "tmp" \n\t"\
+ "and $0xC0 , "range" \n\t"\
+ "movzbl "MANGLE(ff_h264_lps_range)"("ret", "range", 2), "range" \n\t"\
+ "sub "range" , "tmp" \n\t"\
+ BRANCHLESS_GET_CABAC_UPDATE(ret, cabac, statep, low, lowword, range, tmp, tmpbyte)\
+ "movzbl " MANGLE(ff_h264_norm_shift) "("range"), %%ecx \n\t"\
+ "shl %%cl , "range" \n\t"\
+ "movzbl "MANGLE(ff_h264_mlps_state)"+128("ret"), "tmp" \n\t"\
+ "mov "tmpbyte" , "statep" \n\t"\
+ "shl %%cl , "low" \n\t"\
+ "test "lowword" , "lowword" \n\t"\
+ " jnz 1f \n\t"\
+ "mov "BYTE"("cabac"), %%"REG_c" \n\t"\
+ "movzwl (%%"REG_c") , "tmp" \n\t"\
+ "bswap "tmp" \n\t"\
+ "shr $15 , "tmp" \n\t"\
+ "sub $0xFFFF , "tmp" \n\t"\
+ "add $2 , %%"REG_c" \n\t"\
+ "mov %%"REG_c" , "BYTE "("cabac") \n\t"\
+ "lea -1("low") , %%ecx \n\t"\
+ "xor "low" , %%ecx \n\t"\
+ "shr $15 , %%ecx \n\t"\
+ "movzbl " MANGLE(ff_h264_norm_shift) "(%%ecx), %%ecx \n\t"\
+ "neg %%ecx \n\t"\
+ "add $7 , %%ecx \n\t"\
+ "shl %%cl , "tmp" \n\t"\
+ "add "tmp" , "low" \n\t"\
+ "1: \n\t"
+
+ asm volatile(
+ "movl "RANGE "(%2), %%esi \n\t"
+ "movl "LOW "(%2), %%ebx \n\t"
+ BRANCHLESS_GET_CABAC("%0", "%2", "(%1)", "%%ebx", "%%bx", "%%esi", "%%edx", "%%dl")
+ "movl %%esi, "RANGE "(%2) \n\t"
+ "movl %%ebx, "LOW "(%2) \n\t"
+
+ :"=&a"(bit)
+ :"r"(state), "r"(c)
+ : "%"REG_c, "%ebx", "%edx", "%esi", "memory"
+ );
+ bit&=1;
+#endif /* BRANCHLESS_CABAC_DECODER */
+#else /* defined(ARCH_X86) && !(defined(PIC) && defined(__GNUC__)) */
+ int s = *state;
+ int RangeLPS= ff_h264_lps_range[2*(c->range&0xC0) + s];
+ int bit, lps_mask attribute_unused;
+
+ c->range -= RangeLPS;
+#ifndef BRANCHLESS_CABAC_DECODER
+ if(c->low < (c->range<<17)){
+ bit= s&1;
+ *state= ff_h264_mps_state[s];
+ renorm_cabac_decoder_once(c);
+ }else{
+ bit= ff_h264_norm_shift[RangeLPS];
+ c->low -= (c->range<<17);
+ *state= ff_h264_lps_state[s];
+ c->range = RangeLPS<<bit;
+ c->low <<= bit;
+ bit= (s&1)^1;
+
+ if(!(c->low & 0xFFFF)){
+ refill2(c);
+ }
+ }
+#else /* BRANCHLESS_CABAC_DECODER */
+ lps_mask= ((c->range<<17) - c->low)>>31;
+
+ c->low -= (c->range<<17) & lps_mask;
+ c->range += (RangeLPS - c->range) & lps_mask;
+
+ s^=lps_mask;
+ *state= (ff_h264_mlps_state+128)[s];
+ bit= s&1;
+
+ lps_mask= ff_h264_norm_shift[c->range];
+ c->range<<= lps_mask;
+ c->low <<= lps_mask;
+ if(!(c->low & CABAC_MASK))
+ refill2(c);
+#endif /* BRANCHLESS_CABAC_DECODER */
+#endif /* defined(ARCH_X86) && !(defined(PIC) && defined(__GNUC__)) */
+ return bit;
+}
+
+static int __attribute((noinline)) get_cabac_noinline(CABACContext *c, uint8_t * const state){
+ return get_cabac_inline(c,state);
+}
+
+static int get_cabac(CABACContext *c, uint8_t * const state){
+ return get_cabac_inline(c,state);
+}
+
+static int get_cabac_bypass(CABACContext *c){
+#if 0 //not faster
+ int bit;
+ asm volatile(
+ "movl "RANGE "(%1), %%ebx \n\t"
+ "movl "LOW "(%1), %%eax \n\t"
+ "shl $17, %%ebx \n\t"
+ "add %%eax, %%eax \n\t"
+ "sub %%ebx, %%eax \n\t"
+ "cdq \n\t"
+ "and %%edx, %%ebx \n\t"
+ "add %%ebx, %%eax \n\t"
+ "test %%ax, %%ax \n\t"
+ " jnz 1f \n\t"
+ "movl "BYTE "(%1), %%"REG_b" \n\t"
+ "subl $0xFFFF, %%eax \n\t"
+ "movzwl (%%"REG_b"), %%ecx \n\t"
+ "bswap %%ecx \n\t"
+ "shrl $15, %%ecx \n\t"
+ "addl $2, %%"REG_b" \n\t"
+ "addl %%ecx, %%eax \n\t"
+ "movl %%"REG_b", "BYTE "(%1) \n\t"
+ "1: \n\t"
+ "movl %%eax, "LOW "(%1) \n\t"
+
+ :"=&d"(bit)
+ :"r"(c)
+ : "%eax", "%"REG_b, "%ecx", "memory"
+ );
+ return bit+1;
+#else
+ int range;
+ c->low += c->low;
+
+ if(!(c->low & CABAC_MASK))
+ refill(c);
+
+ range= c->range<<17;
+ if(c->low < range){
+ return 0;
+ }else{
+ c->low -= range;
+ return 1;
+ }
+#endif
+}
+
+
+static always_inline int get_cabac_bypass_sign(CABACContext *c, int val){
+#if defined(ARCH_X86) && !(defined(PIC) && defined(__GNUC__))
+ asm volatile(
+ "movl "RANGE "(%1), %%ebx \n\t"
+ "movl "LOW "(%1), %%eax \n\t"
+ "shl $17, %%ebx \n\t"
+ "add %%eax, %%eax \n\t"
+ "sub %%ebx, %%eax \n\t"
+ "cdq \n\t"
+ "and %%edx, %%ebx \n\t"
+ "add %%ebx, %%eax \n\t"
+ "xor %%edx, %%ecx \n\t"
+ "sub %%edx, %%ecx \n\t"
+ "test %%ax, %%ax \n\t"
+ " jnz 1f \n\t"
+ "mov "BYTE "(%1), %%"REG_b" \n\t"
+ "subl $0xFFFF, %%eax \n\t"
+ "movzwl (%%"REG_b"), %%edx \n\t"
+ "bswap %%edx \n\t"
+ "shrl $15, %%edx \n\t"
+ "add $2, %%"REG_b" \n\t"
+ "addl %%edx, %%eax \n\t"
+ "mov %%"REG_b", "BYTE "(%1) \n\t"
+ "1: \n\t"
+ "movl %%eax, "LOW "(%1) \n\t"
+
+ :"+c"(val)
+ :"r"(c)
+ : "%eax", "%"REG_b, "%edx", "memory"
+ );
+ return val;
+#else
+ int range, mask;
+ c->low += c->low;
+
+ if(!(c->low & CABAC_MASK))
+ refill(c);
+
+ range= c->range<<17;
+ c->low -= range;
+ mask= c->low >> 31;
+ range &= mask;
+ c->low += range;
+ return (val^mask)-mask;
+#endif
+}
+
+//FIXME the x86 code from this file should be moved into i386/h264 or cabac something.c/h (note ill kill you if you move my code away from under my fingers before iam finished with it!)
+//FIXME use some macros to avoid duplicatin get_cabac (cant be done yet as that would make optimization work hard)
+#if defined(ARCH_X86) && !(defined(PIC) && defined(__GNUC__))
+static int decode_significance_x86(CABACContext *c, int max_coeff, uint8_t *significant_coeff_ctx_base, int *index){
+ void *end= significant_coeff_ctx_base + max_coeff - 1;
+ int minusstart= -(int)significant_coeff_ctx_base;
+ int minusindex= 4-(int)index;
+ int coeff_count;
+ asm volatile(
+ "movl "RANGE "(%3), %%esi \n\t"
+ "movl "LOW "(%3), %%ebx \n\t"
+
+ "2: \n\t"
+
+ BRANCHLESS_GET_CABAC("%%edx", "%3", "(%1)", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al")
+
+ "test $1, %%edx \n\t"
+ " jz 3f \n\t"
+
+ BRANCHLESS_GET_CABAC("%%edx", "%3", "61(%1)", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al")
+
+ "mov %2, %%"REG_a" \n\t"
+ "movl %4, %%ecx \n\t"
+ "add %1, %%"REG_c" \n\t"
+ "movl %%ecx, (%%"REG_a") \n\t"
+
+ "test $1, %%edx \n\t"
+ " jnz 4f \n\t"
+
+ "add $4, %%"REG_a" \n\t"
+ "mov %%"REG_a", %2 \n\t"
+
+ "3: \n\t"
+ "add $1, %1 \n\t"
+ "cmp %5, %1 \n\t"
+ " jb 2b \n\t"
+ "mov %2, %%"REG_a" \n\t"
+ "movl %4, %%ecx \n\t"
+ "add %1, %%"REG_c" \n\t"
+ "movl %%ecx, (%%"REG_a") \n\t"
+ "4: \n\t"
+ "add %6, %%eax \n\t"
+ "shr $2, %%eax \n\t"
+
+ "movl %%esi, "RANGE "(%3) \n\t"
+ "movl %%ebx, "LOW "(%3) \n\t"
+ :"=&a"(coeff_count), "+r"(significant_coeff_ctx_base), "+m"(index)\
+ :"r"(c), "m"(minusstart), "m"(end), "m"(minusindex)\
+ : "%"REG_c, "%ebx", "%edx", "%esi", "memory"\
+ );
+ return coeff_count;
+}
+
+static int decode_significance_8x8_x86(CABACContext *c, uint8_t *significant_coeff_ctx_base, int *index, uint8_t *sig_off){
+ int minusindex= 4-(int)index;
+ int coeff_count;
+ long last=0;
+ asm volatile(
+ "movl "RANGE "(%3), %%esi \n\t"
+ "movl "LOW "(%3), %%ebx \n\t"
+
+ "mov %1, %%"REG_D" \n\t"
+ "2: \n\t"
+
+ "mov %6, %%"REG_a" \n\t"
+ "movzbl (%%"REG_a", %%"REG_D"), %%edi \n\t"
+ "add %5, %%"REG_D" \n\t"
+
+ BRANCHLESS_GET_CABAC("%%edx", "%3", "(%%"REG_D")", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al")
+
+ "mov %1, %%edi \n\t"
+ "test $1, %%edx \n\t"
+ " jz 3f \n\t"
+
+ "movzbl "MANGLE(last_coeff_flag_offset_8x8)"(%%edi), %%edi\n\t"
+ "add %5, %%"REG_D" \n\t"
+
+ BRANCHLESS_GET_CABAC("%%edx", "%3", "15(%%"REG_D")", "%%ebx", "%%bx", "%%esi", "%%eax", "%%al")
+
+ "mov %2, %%"REG_a" \n\t"
+ "mov %1, %%edi \n\t"
+ "movl %%edi, (%%"REG_a") \n\t"
+
+ "test $1, %%edx \n\t"
+ " jnz 4f \n\t"
+
+ "add $4, %%"REG_a" \n\t"
+ "mov %%"REG_a", %2 \n\t"
+
+ "3: \n\t"
+ "addl $1, %%edi \n\t"
+ "mov %%edi, %1 \n\t"
+ "cmpl $63, %%edi \n\t"
+ " jb 2b \n\t"
+ "mov %2, %%"REG_a" \n\t"
+ "movl %%edi, (%%"REG_a") \n\t"
+ "4: \n\t"
+ "addl %4, %%eax \n\t"
+ "shr $2, %%eax \n\t"
+
+ "movl %%esi, "RANGE "(%3) \n\t"
+ "movl %%ebx, "LOW "(%3) \n\t"
+ :"=&a"(coeff_count),"+m"(last), "+m"(index)\
+ :"r"(c), "m"(minusindex), "m"(significant_coeff_ctx_base), "m"(sig_off)\
+ : "%"REG_c, "%ebx", "%edx", "%esi", "%"REG_D, "memory"\
+ );
+ return coeff_count;
+}
+#endif /* defined(ARCH_X86) && !(defined(PIC) && defined(__GNUC__)) */
+
+/**
+ *
+ * @return the number of bytes read or 0 if no end
+ */
+static int get_cabac_terminate(CABACContext *c){
+ c->range -= 2;
+ if(c->low < c->range<<17){
+ renorm_cabac_decoder_once(c);
+ return 0;
+ }else{
+ return c->bytestream - c->bytestream_start;
+ }
+}
+
+/**
+ * get (truncated) unnary binarization.
+ */
+static int get_cabac_u(CABACContext *c, uint8_t * state, int max, int max_index, int truncated){
+ int i;
+
+ for(i=0; i<max; i++){
+ if(get_cabac(c, state)==0)
+ return i;
+
+ if(i< max_index) state++;
+ }
+
+ return truncated ? max : -1;
+}
+
+/**
+ * get unary exp golomb k-th order binarization.
+ */
+static int get_cabac_ueg(CABACContext *c, uint8_t * state, int max, int is_signed, int k, int max_index){
+ int i, v;
+ int m= 1<<k;
+
+ if(get_cabac(c, state)==0)
+ return 0;
+
+ if(0 < max_index) state++;
+
+ for(i=1; i<max; i++){
+ if(get_cabac(c, state)==0){
+ if(is_signed && get_cabac_bypass(c)){
+ return -i;
+ }else
+ return i;
+ }
+
+ if(i < max_index) state++;
+ }
+
+ while(get_cabac_bypass(c)){
+ i+= m;
+ m+= m;
+ }
+
+ v=0;
+ while(m>>=1){
+ v+= v + get_cabac_bypass(c);
+ }
+ i += v;
+
+ if(is_signed && get_cabac_bypass(c)){
+ return -i;
+ }else
+ return i;
+}
diff --git a/src/libffmpeg/libavcodec/cavs.c b/contrib/ffmpeg/libavcodec/cavs.c
index 520089268..ee862bbc7 100644
--- a/src/libffmpeg/libavcodec/cavs.c
+++ b/contrib/ffmpeg/libavcodec/cavs.c
@@ -2,18 +2,20 @@
* Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
* Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,6 +31,7 @@
#include "mpegvideo.h"
#include "cavsdata.h"
+#ifdef CONFIG_CAVS_DECODER
typedef struct {
MpegEncContext s;
Picture picture; ///< currently decoded frame
@@ -291,7 +294,7 @@ static void intra_pred_plane(uint8_t *d,uint8_t *top,uint8_t *left,int stride) {
int x,y,ia;
int ih = 0;
int iv = 0;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
for(x=0; x<4; x++) {
ih += (x+1)*(top[5+x]-top[3-x]);
@@ -1316,50 +1319,7 @@ static int decode_seq_header(AVSContext *h) {
return 0;
}
-/**
- * finds the end of the current frame in the bitstream.
- * @return the position of the first byte of the next frame, or -1
- */
-int ff_cavs_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size) {
- int pic_found, i;
- uint32_t state;
-
- pic_found= pc->frame_start_found;
- state= pc->state;
-
- i=0;
- if(!pic_found){
- for(i=0; i<buf_size; i++){
- state= (state<<8) | buf[i];
- if(state == PIC_I_START_CODE || state == PIC_PB_START_CODE){
- i++;
- pic_found=1;
- break;
- }
- }
- }
-
- if(pic_found){
- /* EOF considered as end of frame */
- if (buf_size == 0)
- return 0;
- for(; i<buf_size; i++){
- state= (state<<8) | buf[i];
- if((state&0xFFFFFF00) == 0x100){
- if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
- pc->frame_start_found=0;
- pc->state=-1;
- return i-3;
- }
- }
- }
- }
- pc->frame_start_found= pic_found;
- pc->state= state;
- return END_NOT_FOUND;
-}
-
-void ff_cavs_flush(AVCodecContext * avctx) {
+static void cavs_flush(AVCodecContext * avctx) {
AVSContext *h = avctx->priv_data;
h->got_keyframe = 0;
}
@@ -1496,5 +1456,85 @@ AVCodec cavs_decoder = {
cavs_decode_end,
cavs_decode_frame,
CODEC_CAP_DR1 | CODEC_CAP_DELAY,
- .flush= ff_cavs_flush,
+ .flush= cavs_flush,
+};
+#endif /* CONFIG_CAVS_DECODER */
+
+#ifdef CONFIG_CAVSVIDEO_PARSER
+/**
+ * finds the end of the current frame in the bitstream.
+ * @return the position of the first byte of the next frame, or -1
+ */
+static int cavs_find_frame_end(ParseContext *pc, const uint8_t *buf,
+ int buf_size) {
+ int pic_found, i;
+ uint32_t state;
+
+ pic_found= pc->frame_start_found;
+ state= pc->state;
+
+ i=0;
+ if(!pic_found){
+ for(i=0; i<buf_size; i++){
+ state= (state<<8) | buf[i];
+ if(state == PIC_I_START_CODE || state == PIC_PB_START_CODE){
+ i++;
+ pic_found=1;
+ break;
+ }
+ }
+ }
+
+ if(pic_found){
+ /* EOF considered as end of frame */
+ if (buf_size == 0)
+ return 0;
+ for(; i<buf_size; i++){
+ state= (state<<8) | buf[i];
+ if((state&0xFFFFFF00) == 0x100){
+ if(state < SLICE_MIN_START_CODE || state > SLICE_MAX_START_CODE){
+ pc->frame_start_found=0;
+ pc->state=-1;
+ return i-3;
+ }
+ }
+ }
+ }
+ pc->frame_start_found= pic_found;
+ pc->state= state;
+ return END_NOT_FOUND;
+}
+
+static int cavsvideo_parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ ParseContext *pc = s->priv_data;
+ int next;
+
+ if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){
+ next= buf_size;
+ }else{
+ next= cavs_find_frame_end(pc, buf, buf_size);
+
+ if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) {
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+ return buf_size;
+ }
+ }
+ *poutbuf = (uint8_t *)buf;
+ *poutbuf_size = buf_size;
+ return next;
+}
+
+AVCodecParser cavsvideo_parser = {
+ { CODEC_ID_CAVS },
+ sizeof(ParseContext1),
+ NULL,
+ cavsvideo_parse,
+ ff_parse1_close,
+ ff_mpeg4video_split,
};
+#endif /* CONFIG_CAVSVIDEO_PARSER */
diff --git a/src/libffmpeg/libavcodec/cavsdata.h b/contrib/ffmpeg/libavcodec/cavsdata.h
index d6c78e433..d76985136 100644
--- a/src/libffmpeg/libavcodec/cavsdata.h
+++ b/contrib/ffmpeg/libavcodec/cavsdata.h
@@ -2,18 +2,20 @@
* Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
* Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -129,6 +131,7 @@ enum mv_loc_t {
MV_BWD_X3
};
+#ifdef CONFIG_CAVS_DECODER
static const uint8_t partition_flags[30] = {
0, //I_8X8
0, //P_SKIP
@@ -637,3 +640,4 @@ static const int_fast8_t left_modifier_l[8] = { 0,-1, 6,-1,-1, 7, 6, 7};
static const int_fast8_t top_modifier_l[8] = {-1, 1, 5,-1,-1, 5, 7, 7};
static const int_fast8_t left_modifier_c[7] = { 5,-1, 2,-1, 6, 5, 6};
static const int_fast8_t top_modifier_c[7] = { 4, 1,-1,-1, 4, 6, 6};
+#endif /* CONFIG_CAVS_DECODER */
diff --git a/contrib/ffmpeg/libavcodec/cavsdsp.c b/contrib/ffmpeg/libavcodec/cavsdsp.c
new file mode 100644
index 000000000..220dec1b8
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/cavsdsp.c
@@ -0,0 +1,546 @@
+/*
+ * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
+ *
+ * DSP functions
+ *
+ * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include "dsputil.h"
+
+/*****************************************************************************
+ *
+ * in-loop deblocking filter
+ *
+ ****************************************************************************/
+
+#define P2 p0_p[-3*stride]
+#define P1 p0_p[-2*stride]
+#define P0 p0_p[-1*stride]
+#define Q0 p0_p[ 0*stride]
+#define Q1 p0_p[ 1*stride]
+#define Q2 p0_p[ 2*stride]
+
+static inline void loop_filter_l2(uint8_t *p0_p,int stride,int alpha, int beta) {
+ int p0 = P0;
+ int q0 = Q0;
+
+ if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
+ int s = p0 + q0 + 2;
+ alpha = (alpha>>2) + 2;
+ if(abs(P2-p0) < beta && abs(p0-q0) < alpha) {
+ P0 = (P1 + p0 + s) >> 2;
+ P1 = (2*P1 + s) >> 2;
+ } else
+ P0 = (2*P1 + s) >> 2;
+ if(abs(Q2-q0) < beta && abs(q0-p0) < alpha) {
+ Q0 = (Q1 + q0 + s) >> 2;
+ Q1 = (2*Q1 + s) >> 2;
+ } else
+ Q0 = (2*Q1 + s) >> 2;
+ }
+}
+
+static inline void loop_filter_l1(uint8_t *p0_p, int stride, int alpha, int beta, int tc) {
+ int p0 = P0;
+ int q0 = Q0;
+
+ if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
+ int delta = clip(((q0-p0)*3+P1-Q1+4)>>3,-tc, tc);
+ P0 = clip_uint8(p0+delta);
+ Q0 = clip_uint8(q0-delta);
+ if(abs(P2-p0)<beta) {
+ delta = clip(((P0-P1)*3+P2-Q0+4)>>3, -tc, tc);
+ P1 = clip_uint8(P1+delta);
+ }
+ if(abs(Q2-q0)<beta) {
+ delta = clip(((Q1-Q0)*3+P0-Q2+4)>>3, -tc, tc);
+ Q1 = clip_uint8(Q1-delta);
+ }
+ }
+}
+
+static inline void loop_filter_c2(uint8_t *p0_p,int stride,int alpha, int beta) {
+ int p0 = P0;
+ int q0 = Q0;
+
+ if(abs(p0-q0)<alpha && abs(P1-p0)<beta && abs(Q1-q0)<beta) {
+ int s = p0 + q0 + 2;
+ alpha = (alpha>>2) + 2;
+ if(abs(P2-p0) < beta && abs(p0-q0) < alpha) {
+ P0 = (P1 + p0 + s) >> 2;
+ } else
+ P0 = (2*P1 + s) >> 2;
+ if(abs(Q2-q0) < beta && abs(q0-p0) < alpha) {
+ Q0 = (Q1 + q0 + s) >> 2;
+ } else
+ Q0 = (2*Q1 + s) >> 2;
+ }
+}
+
+static inline void loop_filter_c1(uint8_t *p0_p,int stride,int alpha, int beta,
+ int tc) {
+ if(abs(P0-Q0)<alpha && abs(P1-P0)<beta && abs(Q1-Q0)<beta) {
+ int delta = clip(((Q0-P0)*3+P1-Q1+4)>>3, -tc, tc);
+ P0 = clip_uint8(P0+delta);
+ Q0 = clip_uint8(Q0-delta);
+ }
+}
+
+#undef P0
+#undef P1
+#undef P2
+#undef Q0
+#undef Q1
+#undef Q2
+
+static void cavs_filter_lv_c(uint8_t *d, int stride, int alpha, int beta, int tc,
+ int bs1, int bs2) {
+ int i;
+ if(bs1==2)
+ for(i=0;i<16;i++)
+ loop_filter_l2(d + i*stride,1,alpha,beta);
+ else {
+ if(bs1)
+ for(i=0;i<8;i++)
+ loop_filter_l1(d + i*stride,1,alpha,beta,tc);
+ if (bs2)
+ for(i=8;i<16;i++)
+ loop_filter_l1(d + i*stride,1,alpha,beta,tc);
+ }
+}
+
+static void cavs_filter_lh_c(uint8_t *d, int stride, int alpha, int beta, int tc,
+ int bs1, int bs2) {
+ int i;
+ if(bs1==2)
+ for(i=0;i<16;i++)
+ loop_filter_l2(d + i,stride,alpha,beta);
+ else {
+ if(bs1)
+ for(i=0;i<8;i++)
+ loop_filter_l1(d + i,stride,alpha,beta,tc);
+ if (bs2)
+ for(i=8;i<16;i++)
+ loop_filter_l1(d + i,stride,alpha,beta,tc);
+ }
+}
+
+static void cavs_filter_cv_c(uint8_t *d, int stride, int alpha, int beta, int tc,
+ int bs1, int bs2) {
+ int i;
+ if(bs1==2)
+ for(i=0;i<8;i++)
+ loop_filter_c2(d + i*stride,1,alpha,beta);
+ else {
+ if(bs1)
+ for(i=0;i<4;i++)
+ loop_filter_c1(d + i*stride,1,alpha,beta,tc);
+ if (bs2)
+ for(i=4;i<8;i++)
+ loop_filter_c1(d + i*stride,1,alpha,beta,tc);
+ }
+}
+
+static void cavs_filter_ch_c(uint8_t *d, int stride, int alpha, int beta, int tc,
+ int bs1, int bs2) {
+ int i;
+ if(bs1==2)
+ for(i=0;i<8;i++)
+ loop_filter_c2(d + i,stride,alpha,beta);
+ else {
+ if(bs1)
+ for(i=0;i<4;i++)
+ loop_filter_c1(d + i,stride,alpha,beta,tc);
+ if (bs2)
+ for(i=4;i<8;i++)
+ loop_filter_c1(d + i,stride,alpha,beta,tc);
+ }
+}
+
+/*****************************************************************************
+ *
+ * inverse transform
+ *
+ ****************************************************************************/
+
+static void cavs_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride) {
+ int i;
+ DCTELEM (*src)[8] = (DCTELEM(*)[8])block;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ src[0][0] += 8;
+
+ for( i = 0; i < 8; i++ ) {
+ const int a0 = 3*src[i][1] - (src[i][7]<<1);
+ const int a1 = 3*src[i][3] + (src[i][5]<<1);
+ const int a2 = (src[i][3]<<1) - 3*src[i][5];
+ const int a3 = (src[i][1]<<1) + 3*src[i][7];
+
+ const int b4 = ((a0 + a1 + a3)<<1) + a1;
+ const int b5 = ((a0 - a1 + a2)<<1) + a0;
+ const int b6 = ((a3 - a2 - a1)<<1) + a3;
+ const int b7 = ((a0 - a2 - a3)<<1) - a2;
+
+ const int a7 = (src[i][2]<<2) - 10*src[i][6];
+ const int a6 = (src[i][6]<<2) + 10*src[i][2];
+ const int a5 = ((src[i][0] - src[i][4]) << 3) + 4;
+ const int a4 = ((src[i][0] + src[i][4]) << 3) + 4;
+
+ const int b0 = a4 + a6;
+ const int b1 = a5 + a7;
+ const int b2 = a5 - a7;
+ const int b3 = a4 - a6;
+
+ src[i][0] = (b0 + b4) >> 3;
+ src[i][1] = (b1 + b5) >> 3;
+ src[i][2] = (b2 + b6) >> 3;
+ src[i][3] = (b3 + b7) >> 3;
+ src[i][4] = (b3 - b7) >> 3;
+ src[i][5] = (b2 - b6) >> 3;
+ src[i][6] = (b1 - b5) >> 3;
+ src[i][7] = (b0 - b4) >> 3;
+ }
+ for( i = 0; i < 8; i++ ) {
+ const int a0 = 3*src[1][i] - (src[7][i]<<1);
+ const int a1 = 3*src[3][i] + (src[5][i]<<1);
+ const int a2 = (src[3][i]<<1) - 3*src[5][i];
+ const int a3 = (src[1][i]<<1) + 3*src[7][i];
+
+ const int b4 = ((a0 + a1 + a3)<<1) + a1;
+ const int b5 = ((a0 - a1 + a2)<<1) + a0;
+ const int b6 = ((a3 - a2 - a1)<<1) + a3;
+ const int b7 = ((a0 - a2 - a3)<<1) - a2;
+
+ const int a7 = (src[2][i]<<2) - 10*src[6][i];
+ const int a6 = (src[6][i]<<2) + 10*src[2][i];
+ const int a5 = (src[0][i] - src[4][i]) << 3;
+ const int a4 = (src[0][i] + src[4][i]) << 3;
+
+ const int b0 = a4 + a6;
+ const int b1 = a5 + a7;
+ const int b2 = a5 - a7;
+ const int b3 = a4 - a6;
+
+ dst[i + 0*stride] = cm[ dst[i + 0*stride] + ((b0 + b4) >> 7)];
+ dst[i + 1*stride] = cm[ dst[i + 1*stride] + ((b1 + b5) >> 7)];
+ dst[i + 2*stride] = cm[ dst[i + 2*stride] + ((b2 + b6) >> 7)];
+ dst[i + 3*stride] = cm[ dst[i + 3*stride] + ((b3 + b7) >> 7)];
+ dst[i + 4*stride] = cm[ dst[i + 4*stride] + ((b3 - b7) >> 7)];
+ dst[i + 5*stride] = cm[ dst[i + 5*stride] + ((b2 - b6) >> 7)];
+ dst[i + 6*stride] = cm[ dst[i + 6*stride] + ((b1 - b5) >> 7)];
+ dst[i + 7*stride] = cm[ dst[i + 7*stride] + ((b0 - b4) >> 7)];
+ }
+ memset(block,0,64*sizeof(DCTELEM));
+}
+
+/*****************************************************************************
+ *
+ * motion compensation
+ *
+ ****************************************************************************/
+
+#define CAVS_SUBPIX(OPNAME, OP, NAME, A, B, C, D, E, F) \
+static void OPNAME ## cavs_filt8_h_ ## NAME(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int h=8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<h; i++)\
+ {\
+ OP(dst[0], A*src[-2] + B*src[-1] + C*src[0] + D*src[1] + E*src[2] + F*src[3]);\
+ OP(dst[1], A*src[-1] + B*src[ 0] + C*src[1] + D*src[2] + E*src[3] + F*src[4]);\
+ OP(dst[2], A*src[ 0] + B*src[ 1] + C*src[2] + D*src[3] + E*src[4] + F*src[5]);\
+ OP(dst[3], A*src[ 1] + B*src[ 2] + C*src[3] + D*src[4] + E*src[5] + F*src[6]);\
+ OP(dst[4], A*src[ 2] + B*src[ 3] + C*src[4] + D*src[5] + E*src[6] + F*src[7]);\
+ OP(dst[5], A*src[ 3] + B*src[ 4] + C*src[5] + D*src[6] + E*src[7] + F*src[8]);\
+ OP(dst[6], A*src[ 4] + B*src[ 5] + C*src[6] + D*src[7] + E*src[8] + F*src[9]);\
+ OP(dst[7], A*src[ 5] + B*src[ 6] + C*src[7] + D*src[8] + E*src[9] + F*src[10]);\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }\
+}\
+\
+static void OPNAME ## cavs_filt8_v_ ## NAME(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ const int w=8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ for(i=0; i<w; i++)\
+ {\
+ const int srcB= src[-2*srcStride];\
+ const int srcA= src[-1*srcStride];\
+ const int src0= src[0 *srcStride];\
+ const int src1= src[1 *srcStride];\
+ const int src2= src[2 *srcStride];\
+ const int src3= src[3 *srcStride];\
+ const int src4= src[4 *srcStride];\
+ const int src5= src[5 *srcStride];\
+ const int src6= src[6 *srcStride];\
+ const int src7= src[7 *srcStride];\
+ const int src8= src[8 *srcStride];\
+ const int src9= src[9 *srcStride];\
+ const int src10= src[10 *srcStride];\
+ OP(dst[0*dstStride], A*srcB + B*srcA + C*src0 + D*src1 + E*src2 + F*src3);\
+ OP(dst[1*dstStride], A*srcA + B*src0 + C*src1 + D*src2 + E*src3 + F*src4);\
+ OP(dst[2*dstStride], A*src0 + B*src1 + C*src2 + D*src3 + E*src4 + F*src5);\
+ OP(dst[3*dstStride], A*src1 + B*src2 + C*src3 + D*src4 + E*src5 + F*src6);\
+ OP(dst[4*dstStride], A*src2 + B*src3 + C*src4 + D*src5 + E*src6 + F*src7);\
+ OP(dst[5*dstStride], A*src3 + B*src4 + C*src5 + D*src6 + E*src7 + F*src8);\
+ OP(dst[6*dstStride], A*src4 + B*src5 + C*src6 + D*src7 + E*src8 + F*src9);\
+ OP(dst[7*dstStride], A*src5 + B*src6 + C*src7 + D*src8 + E*src9 + F*src10);\
+ dst++;\
+ src++;\
+ }\
+}\
+\
+static void OPNAME ## cavs_filt16_v_ ## NAME(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\
+ OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## cavs_filt8_v_ ## NAME(dst , src , dstStride, srcStride);\
+ OPNAME ## cavs_filt8_v_ ## NAME(dst+8, src+8, dstStride, srcStride);\
+}\
+\
+static void OPNAME ## cavs_filt16_h_ ## NAME(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\
+ OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## cavs_filt8_h_ ## NAME(dst , src , dstStride, srcStride);\
+ OPNAME ## cavs_filt8_h_ ## NAME(dst+8, src+8, dstStride, srcStride);\
+}\
+
+#define CAVS_SUBPIX_HV(OPNAME, OP, NAME, AH, BH, CH, DH, EH, FH, AV, BV, CV, DV, EV, FV, FULL) \
+static void OPNAME ## cavs_filt8_hv_ ## NAME(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int srcStride){\
+ int16_t temp[8*(8+5)];\
+ int16_t *tmp = temp;\
+ const int h=8;\
+ const int w=8;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ src1 -= 2*srcStride;\
+ for(i=0; i<h+5; i++)\
+ {\
+ tmp[0]= AH*src1[-2] + BH*src1[-1] + CH*src1[0] + DH*src1[1] + EH*src1[2] + FH*src1[3];\
+ tmp[1]= AH*src1[-1] + BH*src1[ 0] + CH*src1[1] + DH*src1[2] + EH*src1[3] + FH*src1[4];\
+ tmp[2]= AH*src1[ 0] + BH*src1[ 1] + CH*src1[2] + DH*src1[3] + EH*src1[4] + FH*src1[5];\
+ tmp[3]= AH*src1[ 1] + BH*src1[ 2] + CH*src1[3] + DH*src1[4] + EH*src1[5] + FH*src1[6];\
+ tmp[4]= AH*src1[ 2] + BH*src1[ 3] + CH*src1[4] + DH*src1[5] + EH*src1[6] + FH*src1[7];\
+ tmp[5]= AH*src1[ 3] + BH*src1[ 4] + CH*src1[5] + DH*src1[6] + EH*src1[7] + FH*src1[8];\
+ tmp[6]= AH*src1[ 4] + BH*src1[ 5] + CH*src1[6] + DH*src1[7] + EH*src1[8] + FH*src1[9];\
+ tmp[7]= AH*src1[ 5] + BH*src1[ 6] + CH*src1[7] + DH*src1[8] + EH*src1[9] + FH*src1[10];\
+ tmp+=8;\
+ src1+=srcStride;\
+ }\
+ if(FULL) {\
+ tmp = temp+8*2; \
+ for(i=0; i<w; i++) \
+ { \
+ const int tmpB= tmp[-2*8]; \
+ const int tmpA= tmp[-1*8]; \
+ const int tmp0= tmp[0 *8]; \
+ const int tmp1= tmp[1 *8]; \
+ const int tmp2= tmp[2 *8]; \
+ const int tmp3= tmp[3 *8]; \
+ const int tmp4= tmp[4 *8]; \
+ const int tmp5= tmp[5 *8]; \
+ const int tmp6= tmp[6 *8]; \
+ const int tmp7= tmp[7 *8]; \
+ const int tmp8= tmp[8 *8]; \
+ const int tmp9= tmp[9 *8]; \
+ const int tmp10=tmp[10*8]; \
+ OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3 + 64*src2[0*srcStride]); \
+ OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4 + 64*src2[1*srcStride]); \
+ OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5 + 64*src2[2*srcStride]); \
+ OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6 + 64*src2[3*srcStride]); \
+ OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7 + 64*src2[4*srcStride]); \
+ OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8 + 64*src2[5*srcStride]); \
+ OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9 + 64*src2[6*srcStride]); \
+ OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10 + 64*src2[7*srcStride]); \
+ dst++; \
+ tmp++; \
+ src2++; \
+ } \
+ } else {\
+ tmp = temp+8*2; \
+ for(i=0; i<w; i++) \
+ { \
+ const int tmpB= tmp[-2*8]; \
+ const int tmpA= tmp[-1*8]; \
+ const int tmp0= tmp[0 *8]; \
+ const int tmp1= tmp[1 *8]; \
+ const int tmp2= tmp[2 *8]; \
+ const int tmp3= tmp[3 *8]; \
+ const int tmp4= tmp[4 *8]; \
+ const int tmp5= tmp[5 *8]; \
+ const int tmp6= tmp[6 *8]; \
+ const int tmp7= tmp[7 *8]; \
+ const int tmp8= tmp[8 *8]; \
+ const int tmp9= tmp[9 *8]; \
+ const int tmp10=tmp[10*8]; \
+ OP(dst[0*dstStride], AV*tmpB + BV*tmpA + CV*tmp0 + DV*tmp1 + EV*tmp2 + FV*tmp3); \
+ OP(dst[1*dstStride], AV*tmpA + BV*tmp0 + CV*tmp1 + DV*tmp2 + EV*tmp3 + FV*tmp4); \
+ OP(dst[2*dstStride], AV*tmp0 + BV*tmp1 + CV*tmp2 + DV*tmp3 + EV*tmp4 + FV*tmp5); \
+ OP(dst[3*dstStride], AV*tmp1 + BV*tmp2 + CV*tmp3 + DV*tmp4 + EV*tmp5 + FV*tmp6); \
+ OP(dst[4*dstStride], AV*tmp2 + BV*tmp3 + CV*tmp4 + DV*tmp5 + EV*tmp6 + FV*tmp7); \
+ OP(dst[5*dstStride], AV*tmp3 + BV*tmp4 + CV*tmp5 + DV*tmp6 + EV*tmp7 + FV*tmp8); \
+ OP(dst[6*dstStride], AV*tmp4 + BV*tmp5 + CV*tmp6 + DV*tmp7 + EV*tmp8 + FV*tmp9); \
+ OP(dst[7*dstStride], AV*tmp5 + BV*tmp6 + CV*tmp7 + DV*tmp8 + EV*tmp9 + FV*tmp10); \
+ dst++; \
+ tmp++; \
+ } \
+ }\
+}\
+\
+static void OPNAME ## cavs_filt16_hv_ ## NAME(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int srcStride){ \
+ OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, src2 , dstStride, srcStride); \
+ OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, src2+8, dstStride, srcStride); \
+ src1 += 8*srcStride;\
+ src2 += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## cavs_filt8_hv_ ## NAME(dst , src1, src2 , dstStride, srcStride); \
+ OPNAME ## cavs_filt8_hv_ ## NAME(dst+8, src1+8, src2+8, dstStride, srcStride); \
+}\
+
+#define CAVS_MC(OPNAME, SIZE) \
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _h_qpel_l(dst, src, stride, stride);\
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _h_hpel(dst, src, stride, stride);\
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _h_qpel_r(dst, src, stride, stride);\
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _v_qpel_l(dst, src, stride, stride);\
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _v_hpel(dst, src, stride, stride);\
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _v_qpel_r(dst, src, stride, stride);\
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _hv_jj(dst, src, NULL, stride, stride); \
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src, stride, stride); \
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride, stride, stride); \
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+1, stride, stride); \
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _hv_egpr(dst, src, src+stride+1,stride, stride); \
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _hv_ff(dst, src, src+stride+1,stride, stride); \
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _hv_ii(dst, src, src+stride+1,stride, stride); \
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _hv_kk(dst, src, src+stride+1,stride, stride); \
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_filt ## SIZE ## _hv_qq(dst, src, src+stride+1,stride, stride); \
+}\
+
+#define op_put1(a, b) a = cm[((b)+4)>>3]
+#define op_put2(a, b) a = cm[((b)+64)>>7]
+#define op_put3(a, b) a = cm[((b)+32)>>6]
+#define op_put4(a, b) a = cm[((b)+512)>>10]
+#define op_avg1(a, b) a = ((a)+cm[((b)+4)>>3] +1)>>1
+#define op_avg2(a, b) a = ((a)+cm[((b)+64)>>7] +1)>>1
+#define op_avg3(a, b) a = ((a)+cm[((b)+32)>>6] +1)>>1
+#define op_avg4(a, b) a = ((a)+cm[((b)+512)>>10]+1)>>1
+CAVS_SUBPIX(put_ , op_put1, hpel, 0, -1, 5, 5, -1, 0)
+CAVS_SUBPIX(put_ , op_put2, qpel_l, -1, -2, 96, 42, -7, 0)
+CAVS_SUBPIX(put_ , op_put2, qpel_r, 0, -7, 42, 96, -2, -1)
+CAVS_SUBPIX_HV(put_, op_put3, jj, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0)
+CAVS_SUBPIX_HV(put_, op_put4, ff, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0)
+CAVS_SUBPIX_HV(put_, op_put4, ii, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0)
+CAVS_SUBPIX_HV(put_, op_put4, kk, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0)
+CAVS_SUBPIX_HV(put_, op_put4, qq, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0)
+CAVS_SUBPIX_HV(put_, op_put2, egpr, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1)
+CAVS_SUBPIX(avg_ , op_avg1, hpel, 0, -1, 5, 5, -1, 0)
+CAVS_SUBPIX(avg_ , op_avg2, qpel_l, -1, -2, 96, 42, -7, 0)
+CAVS_SUBPIX(avg_ , op_avg2, qpel_r, 0, -7, 42, 96, -2, -1)
+CAVS_SUBPIX_HV(avg_, op_avg3, jj, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 0)
+CAVS_SUBPIX_HV(avg_, op_avg4, ff, 0, -1, 5, 5, -1, 0, -1, -2, 96, 42, -7, 0, 0)
+CAVS_SUBPIX_HV(avg_, op_avg4, ii, -1, -2, 96, 42, -7, 0, 0, -1, 5, 5, -1, 0, 0)
+CAVS_SUBPIX_HV(avg_, op_avg4, kk, 0, -7, 42, 96, -2, -1, 0, -1, 5, 5, -1, 0, 0)
+CAVS_SUBPIX_HV(avg_, op_avg4, qq, 0, -1, 5, 5, -1, 0, 0, -7, 42, 96, -2,-1, 0)
+CAVS_SUBPIX_HV(avg_, op_avg2, egpr, 0, -1, 5, 5, -1, 0, 0, -1, 5, 5, -1, 0, 1)
+CAVS_MC(put_, 8)
+CAVS_MC(put_, 16)
+CAVS_MC(avg_, 8)
+CAVS_MC(avg_, 16)
+
+void ff_put_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride);
+void ff_avg_cavs_qpel8_mc00_c(uint8_t *dst, uint8_t *src, int stride);
+void ff_put_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride);
+void ff_avg_cavs_qpel16_mc00_c(uint8_t *dst, uint8_t *src, int stride);
+
+void ff_cavsdsp_init(DSPContext* c, AVCodecContext *avctx) {
+#define dspfunc(PFX, IDX, NUM) \
+ c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_c; \
+ c->PFX ## _pixels_tab[IDX][ 1] = ff_ ## PFX ## NUM ## _mc10_c; \
+ c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_c; \
+ c->PFX ## _pixels_tab[IDX][ 3] = ff_ ## PFX ## NUM ## _mc30_c; \
+ c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_c; \
+ c->PFX ## _pixels_tab[IDX][ 5] = ff_ ## PFX ## NUM ## _mc11_c; \
+ c->PFX ## _pixels_tab[IDX][ 6] = ff_ ## PFX ## NUM ## _mc21_c; \
+ c->PFX ## _pixels_tab[IDX][ 7] = ff_ ## PFX ## NUM ## _mc31_c; \
+ c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_c; \
+ c->PFX ## _pixels_tab[IDX][ 9] = ff_ ## PFX ## NUM ## _mc12_c; \
+ c->PFX ## _pixels_tab[IDX][10] = ff_ ## PFX ## NUM ## _mc22_c; \
+ c->PFX ## _pixels_tab[IDX][11] = ff_ ## PFX ## NUM ## _mc32_c; \
+ c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_c; \
+ c->PFX ## _pixels_tab[IDX][13] = ff_ ## PFX ## NUM ## _mc13_c; \
+ c->PFX ## _pixels_tab[IDX][14] = ff_ ## PFX ## NUM ## _mc23_c; \
+ c->PFX ## _pixels_tab[IDX][15] = ff_ ## PFX ## NUM ## _mc33_c
+ dspfunc(put_cavs_qpel, 0, 16);
+ dspfunc(put_cavs_qpel, 1, 8);
+ dspfunc(avg_cavs_qpel, 0, 16);
+ dspfunc(avg_cavs_qpel, 1, 8);
+ c->cavs_filter_lv = cavs_filter_lv_c;
+ c->cavs_filter_lh = cavs_filter_lh_c;
+ c->cavs_filter_cv = cavs_filter_cv_c;
+ c->cavs_filter_ch = cavs_filter_ch_c;
+ c->cavs_idct8_add = cavs_idct8_add_c;
+}
diff --git a/src/libffmpeg/libavcodec/cinepak.c b/contrib/ffmpeg/libavcodec/cinepak.c
index 797681231..e137377e5 100644
--- a/src/libffmpeg/libavcodec/cinepak.c
+++ b/contrib/ffmpeg/libavcodec/cinepak.c
@@ -2,18 +2,20 @@
* Cinepak Video Decoder
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/cljr.c b/contrib/ffmpeg/libavcodec/cljr.c
index feb0d8bb2..44810f5cf 100644
--- a/src/libffmpeg/libavcodec/cljr.c
+++ b/contrib/ffmpeg/libavcodec/cljr.c
@@ -2,18 +2,20 @@
* Cirrus Logic AccuPak (CLJR) codec
* Copyright (c) 2003 Alex Beregszaszi
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/cook.c b/contrib/ffmpeg/libavcodec/cook.c
index fb04cf574..47d9ce2c3 100644
--- a/src/libffmpeg/libavcodec/cook.c
+++ b/contrib/ffmpeg/libavcodec/cook.c
@@ -3,18 +3,20 @@
* Copyright (c) 2003 Sascha Sommer
* Copyright (c) 2005 Benjamin Larsson
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -45,7 +47,6 @@
#include <stddef.h>
#include <stdio.h>
-#define ALT_BITSTREAM_READER
#include "avcodec.h"
#include "bitstream.h"
#include "dsputil.h"
diff --git a/src/libffmpeg/libavcodec/cookdata.h b/contrib/ffmpeg/libavcodec/cookdata.h
index 1247d9d91..395c9a7dd 100644
--- a/src/libffmpeg/libavcodec/cookdata.h
+++ b/contrib/ffmpeg/libavcodec/cookdata.h
@@ -3,18 +3,20 @@
* Copyright (c) 2003 Sascha Sommer
* Copyright (c) 2005 Benjamin Larsson
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/cscd.c b/contrib/ffmpeg/libavcodec/cscd.c
index 0d6e04526..e4257f4c0 100644
--- a/src/libffmpeg/libavcodec/cscd.c
+++ b/contrib/ffmpeg/libavcodec/cscd.c
@@ -2,18 +2,20 @@
* CamStudio decoder
* Copyright (c) 2006 Reimar Doeffinger
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
diff --git a/src/libffmpeg/libavcodec/cyuv.c b/contrib/ffmpeg/libavcodec/cyuv.c
index b64e1a58b..101f2bd85 100644
--- a/src/libffmpeg/libavcodec/cyuv.c
+++ b/contrib/ffmpeg/libavcodec/cyuv.c
@@ -2,18 +2,20 @@
*
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Creative YUV (CYUV) Video Decoder
@@ -75,9 +77,9 @@ static int cyuv_decode_frame(AVCodecContext *avctx,
int v_ptr;
/* prediction error tables (make it clear that they are signed values) */
- signed char *y_table = buf + 0;
- signed char *u_table = buf + 16;
- signed char *v_table = buf + 32;
+ signed char *y_table = (signed char*)buf + 0;
+ signed char *u_table = (signed char*)buf + 16;
+ signed char *v_table = (signed char*)buf + 32;
unsigned char y_pred, u_pred, v_pred;
int stream_ptr;
diff --git a/contrib/ffmpeg/libavcodec/dct-test.c b/contrib/ffmpeg/libavcodec/dct-test.c
new file mode 100644
index 000000000..2c16f47e4
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/dct-test.c
@@ -0,0 +1,535 @@
+/*
+ * (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file dct-test.c
+ * DCT test. (c) 2001 Fabrice Bellard.
+ * Started from sample code by Juan J. Sierralta P.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "dsputil.h"
+
+#include "simple_idct.h"
+#include "faandct.h"
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#undef printf
+
+void *fast_memcpy(void *a, const void *b, size_t c){return memcpy(a,b,c);};
+
+/* reference fdct/idct */
+extern void fdct(DCTELEM *block);
+extern void idct(DCTELEM *block);
+extern void ff_idct_xvid_mmx(DCTELEM *block);
+extern void ff_idct_xvid_mmx2(DCTELEM *block);
+extern void init_fdct();
+
+extern void j_rev_dct(DCTELEM *data);
+extern void ff_mmx_idct(DCTELEM *data);
+extern void ff_mmxext_idct(DCTELEM *data);
+
+extern void odivx_idct_c (short *block);
+
+#define AANSCALE_BITS 12
+static const unsigned short aanscales[64] = {
+ /* precomputed values scaled up by 14 bits */
+ 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
+ 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270,
+ 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906,
+ 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315,
+ 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520,
+ 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552,
+ 8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446,
+ 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247
+};
+
+uint8_t cropTbl[256 + 2 * MAX_NEG_CROP];
+
+int64_t gettime(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv,NULL);
+ return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+#define NB_ITS 20000
+#define NB_ITS_SPEED 50000
+
+static short idct_mmx_perm[64];
+
+static short idct_simple_mmx_perm[64]={
+ 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
+ 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
+ 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
+ 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
+ 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
+ 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
+ 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
+ 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
+};
+
+void idct_mmx_init(void)
+{
+ int i;
+
+ /* the mmx/mmxext idct uses a reordered input, so we patch scan tables */
+ for (i = 0; i < 64; i++) {
+ idct_mmx_perm[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
+// idct_simple_mmx_perm[i] = simple_block_permute_op(i);
+ }
+}
+
+static DCTELEM block[64] __attribute__ ((aligned (8)));
+static DCTELEM block1[64] __attribute__ ((aligned (8)));
+static DCTELEM block_org[64] __attribute__ ((aligned (8)));
+
+void dct_error(const char *name, int is_idct,
+ void (*fdct_func)(DCTELEM *block),
+ void (*fdct_ref)(DCTELEM *block), int test)
+{
+ int it, i, scale;
+ int err_inf, v;
+ int64_t err2, ti, ti1, it1;
+ int64_t sysErr[64], sysErrMax=0;
+ int maxout=0;
+ int blockSumErrMax=0, blockSumErr;
+
+ srandom(0);
+
+ err_inf = 0;
+ err2 = 0;
+ for(i=0; i<64; i++) sysErr[i]=0;
+ for(it=0;it<NB_ITS;it++) {
+ for(i=0;i<64;i++)
+ block1[i] = 0;
+ switch(test){
+ case 0:
+ for(i=0;i<64;i++)
+ block1[i] = (random() % 512) -256;
+ if (is_idct){
+ fdct(block1);
+
+ for(i=0;i<64;i++)
+ block1[i]>>=3;
+ }
+ break;
+ case 1:{
+ int num= (random()%10)+1;
+ for(i=0;i<num;i++)
+ block1[random()%64] = (random() % 512) -256;
+ }break;
+ case 2:
+ block1[0]= (random()%4096)-2048;
+ block1[63]= (block1[0]&1)^1;
+ break;
+ }
+
+#if 0 // simulate mismatch control
+{ int sum=0;
+ for(i=0;i<64;i++)
+ sum+=block1[i];
+
+ if((sum&1)==0) block1[63]^=1;
+}
+#endif
+
+ for(i=0; i<64; i++)
+ block_org[i]= block1[i];
+
+ if (fdct_func == ff_mmx_idct ||
+ fdct_func == j_rev_dct || fdct_func == ff_mmxext_idct) {
+ for(i=0;i<64;i++)
+ block[idct_mmx_perm[i]] = block1[i];
+ } else if(fdct_func == ff_simple_idct_mmx ) {
+ for(i=0;i<64;i++)
+ block[idct_simple_mmx_perm[i]] = block1[i];
+
+ } else {
+ for(i=0; i<64; i++)
+ block[i]= block1[i];
+ }
+#if 0 // simulate mismatch control for tested IDCT but not the ref
+{ int sum=0;
+ for(i=0;i<64;i++)
+ sum+=block[i];
+
+ if((sum&1)==0) block[63]^=1;
+}
+#endif
+
+ fdct_func(block);
+ emms(); /* for ff_mmx_idct */
+
+ if (fdct_func == fdct_ifast
+#ifndef FAAN_POSTSCALE
+ || fdct_func == ff_faandct
+#endif
+ ) {
+ for(i=0; i<64; i++) {
+ scale = 8*(1 << (AANSCALE_BITS + 11)) / aanscales[i];
+ block[i] = (block[i] * scale /*+ (1<<(AANSCALE_BITS-1))*/) >> AANSCALE_BITS;
+ }
+ }
+
+ fdct_ref(block1);
+
+ blockSumErr=0;
+ for(i=0;i<64;i++) {
+ v = abs(block[i] - block1[i]);
+ if (v > err_inf)
+ err_inf = v;
+ err2 += v * v;
+ sysErr[i] += block[i] - block1[i];
+ blockSumErr += v;
+ if( abs(block[i])>maxout) maxout=abs(block[i]);
+ }
+ if(blockSumErrMax < blockSumErr) blockSumErrMax= blockSumErr;
+#if 0 // print different matrix pairs
+ if(blockSumErr){
+ printf("\n");
+ for(i=0; i<64; i++){
+ if((i&7)==0) printf("\n");
+ printf("%4d ", block_org[i]);
+ }
+ for(i=0; i<64; i++){
+ if((i&7)==0) printf("\n");
+ printf("%4d ", block[i] - block1[i]);
+ }
+ }
+#endif
+ }
+ for(i=0; i<64; i++) sysErrMax= MAX(sysErrMax, FFABS(sysErr[i]));
+
+#if 1 // dump systematic errors
+ for(i=0; i<64; i++){
+ if(i%8==0) printf("\n");
+ printf("%5d ", (int)sysErr[i]);
+ }
+ printf("\n");
+#endif
+
+ printf("%s %s: err_inf=%d err2=%0.8f syserr=%0.8f maxout=%d blockSumErr=%d\n",
+ is_idct ? "IDCT" : "DCT",
+ name, err_inf, (double)err2 / NB_ITS / 64.0, (double)sysErrMax / NB_ITS, maxout, blockSumErrMax);
+#if 1 //Speed test
+ /* speed test */
+ for(i=0;i<64;i++)
+ block1[i] = 0;
+ switch(test){
+ case 0:
+ for(i=0;i<64;i++)
+ block1[i] = (random() % 512) -256;
+ if (is_idct){
+ fdct(block1);
+
+ for(i=0;i<64;i++)
+ block1[i]>>=3;
+ }
+ break;
+ case 1:{
+ case 2:
+ block1[0] = (random() % 512) -256;
+ block1[1] = (random() % 512) -256;
+ block1[2] = (random() % 512) -256;
+ block1[3] = (random() % 512) -256;
+ }break;
+ }
+
+ if (fdct_func == ff_mmx_idct ||
+ fdct_func == j_rev_dct || fdct_func == ff_mmxext_idct) {
+ for(i=0;i<64;i++)
+ block[idct_mmx_perm[i]] = block1[i];
+ } else if(fdct_func == ff_simple_idct_mmx ) {
+ for(i=0;i<64;i++)
+ block[idct_simple_mmx_perm[i]] = block1[i];
+ } else {
+ for(i=0; i<64; i++)
+ block[i]= block1[i];
+ }
+
+ ti = gettime();
+ it1 = 0;
+ do {
+ for(it=0;it<NB_ITS_SPEED;it++) {
+ for(i=0; i<64; i++)
+ block[i]= block1[i];
+// memcpy(block, block1, sizeof(DCTELEM) * 64);
+// dont memcpy especially not fastmemcpy because it does movntq !!!
+ fdct_func(block);
+ }
+ it1 += NB_ITS_SPEED;
+ ti1 = gettime() - ti;
+ } while (ti1 < 1000000);
+ emms();
+
+ printf("%s %s: %0.1f kdct/s\n",
+ is_idct ? "IDCT" : "DCT",
+ name, (double)it1 * 1000.0 / (double)ti1);
+#endif
+}
+
+static uint8_t img_dest[64] __attribute__ ((aligned (8)));
+static uint8_t img_dest1[64] __attribute__ ((aligned (8)));
+
+void idct248_ref(uint8_t *dest, int linesize, int16_t *block)
+{
+ static int init;
+ static double c8[8][8];
+ static double c4[4][4];
+ double block1[64], block2[64], block3[64];
+ double s, sum, v;
+ int i, j, k;
+
+ if (!init) {
+ init = 1;
+
+ for(i=0;i<8;i++) {
+ sum = 0;
+ for(j=0;j<8;j++) {
+ s = (i==0) ? sqrt(1.0/8.0) : sqrt(1.0/4.0);
+ c8[i][j] = s * cos(M_PI * i * (j + 0.5) / 8.0);
+ sum += c8[i][j] * c8[i][j];
+ }
+ }
+
+ for(i=0;i<4;i++) {
+ sum = 0;
+ for(j=0;j<4;j++) {
+ s = (i==0) ? sqrt(1.0/4.0) : sqrt(1.0/2.0);
+ c4[i][j] = s * cos(M_PI * i * (j + 0.5) / 4.0);
+ sum += c4[i][j] * c4[i][j];
+ }
+ }
+ }
+
+ /* butterfly */
+ s = 0.5 * sqrt(2.0);
+ for(i=0;i<4;i++) {
+ for(j=0;j<8;j++) {
+ block1[8*(2*i)+j] = (block[8*(2*i)+j] + block[8*(2*i+1)+j]) * s;
+ block1[8*(2*i+1)+j] = (block[8*(2*i)+j] - block[8*(2*i+1)+j]) * s;
+ }
+ }
+
+ /* idct8 on lines */
+ for(i=0;i<8;i++) {
+ for(j=0;j<8;j++) {
+ sum = 0;
+ for(k=0;k<8;k++)
+ sum += c8[k][j] * block1[8*i+k];
+ block2[8*i+j] = sum;
+ }
+ }
+
+ /* idct4 */
+ for(i=0;i<8;i++) {
+ for(j=0;j<4;j++) {
+ /* top */
+ sum = 0;
+ for(k=0;k<4;k++)
+ sum += c4[k][j] * block2[8*(2*k)+i];
+ block3[8*(2*j)+i] = sum;
+
+ /* bottom */
+ sum = 0;
+ for(k=0;k<4;k++)
+ sum += c4[k][j] * block2[8*(2*k+1)+i];
+ block3[8*(2*j+1)+i] = sum;
+ }
+ }
+
+ /* clamp and store the result */
+ for(i=0;i<8;i++) {
+ for(j=0;j<8;j++) {
+ v = block3[8*i+j];
+ if (v < 0)
+ v = 0;
+ else if (v > 255)
+ v = 255;
+ dest[i * linesize + j] = (int)rint(v);
+ }
+ }
+}
+
+void idct248_error(const char *name,
+ void (*idct248_put)(uint8_t *dest, int line_size, int16_t *block))
+{
+ int it, i, it1, ti, ti1, err_max, v;
+
+ srandom(0);
+
+ /* just one test to see if code is correct (precision is less
+ important here) */
+ err_max = 0;
+ for(it=0;it<NB_ITS;it++) {
+
+ /* XXX: use forward transform to generate values */
+ for(i=0;i<64;i++)
+ block1[i] = (random() % 256) - 128;
+ block1[0] += 1024;
+
+ for(i=0; i<64; i++)
+ block[i]= block1[i];
+ idct248_ref(img_dest1, 8, block);
+
+ for(i=0; i<64; i++)
+ block[i]= block1[i];
+ idct248_put(img_dest, 8, block);
+
+ for(i=0;i<64;i++) {
+ v = abs((int)img_dest[i] - (int)img_dest1[i]);
+ if (v == 255)
+ printf("%d %d\n", img_dest[i], img_dest1[i]);
+ if (v > err_max)
+ err_max = v;
+ }
+#if 0
+ printf("ref=\n");
+ for(i=0;i<8;i++) {
+ int j;
+ for(j=0;j<8;j++) {
+ printf(" %3d", img_dest1[i*8+j]);
+ }
+ printf("\n");
+ }
+
+ printf("out=\n");
+ for(i=0;i<8;i++) {
+ int j;
+ for(j=0;j<8;j++) {
+ printf(" %3d", img_dest[i*8+j]);
+ }
+ printf("\n");
+ }
+#endif
+ }
+ printf("%s %s: err_inf=%d\n",
+ 1 ? "IDCT248" : "DCT248",
+ name, err_max);
+
+ ti = gettime();
+ it1 = 0;
+ do {
+ for(it=0;it<NB_ITS_SPEED;it++) {
+ for(i=0; i<64; i++)
+ block[i]= block1[i];
+// memcpy(block, block1, sizeof(DCTELEM) * 64);
+// dont memcpy especially not fastmemcpy because it does movntq !!!
+ idct248_put(img_dest, 8, block);
+ }
+ it1 += NB_ITS_SPEED;
+ ti1 = gettime() - ti;
+ } while (ti1 < 1000000);
+ emms();
+
+ printf("%s %s: %0.1f kdct/s\n",
+ 1 ? "IDCT248" : "DCT248",
+ name, (double)it1 * 1000.0 / (double)ti1);
+}
+
+void help(void)
+{
+ printf("dct-test [-i] [<test-number>]\n"
+ "test-number 0 -> test with random matrixes\n"
+ " 1 -> test with random sparse matrixes\n"
+ " 2 -> do 3. test from mpeg4 std\n"
+ "-i test IDCT implementations\n"
+ "-4 test IDCT248 implementations\n");
+ exit(1);
+}
+
+int main(int argc, char **argv)
+{
+ int test_idct = 0, test_248_dct = 0;
+ int c,i;
+ int test=1;
+
+ init_fdct();
+ idct_mmx_init();
+
+ for(i=0;i<256;i++) cropTbl[i + MAX_NEG_CROP] = i;
+ for(i=0;i<MAX_NEG_CROP;i++) {
+ cropTbl[i] = 0;
+ cropTbl[i + MAX_NEG_CROP + 256] = 255;
+ }
+
+ for(;;) {
+ c = getopt(argc, argv, "ih4");
+ if (c == -1)
+ break;
+ switch(c) {
+ case 'i':
+ test_idct = 1;
+ break;
+ case '4':
+ test_248_dct = 1;
+ break;
+ default :
+ case 'h':
+ help();
+ break;
+ }
+ }
+
+ if(optind <argc) test= atoi(argv[optind]);
+
+ printf("ffmpeg DCT/IDCT test\n");
+
+ if (test_248_dct) {
+ idct248_error("SIMPLE-C", simple_idct248_put);
+ } else {
+ if (!test_idct) {
+ dct_error("REF-DBL", 0, fdct, fdct, test); /* only to verify code ! */
+ dct_error("IJG-AAN-INT", 0, fdct_ifast, fdct, test);
+ dct_error("IJG-LLM-INT", 0, ff_jpeg_fdct_islow, fdct, test);
+ dct_error("MMX", 0, ff_fdct_mmx, fdct, test);
+ dct_error("MMX2", 0, ff_fdct_mmx2, fdct, test);
+ dct_error("FAAN", 0, ff_faandct, fdct, test);
+ } else {
+ dct_error("REF-DBL", 1, idct, idct, test);
+ dct_error("INT", 1, j_rev_dct, idct, test);
+ dct_error("LIBMPEG2-MMX", 1, ff_mmx_idct, idct, test);
+ dct_error("LIBMPEG2-MMXEXT", 1, ff_mmxext_idct, idct, test);
+ dct_error("SIMPLE-C", 1, simple_idct, idct, test);
+ dct_error("SIMPLE-MMX", 1, ff_simple_idct_mmx, idct, test);
+ dct_error("XVID-MMX", 1, ff_idct_xvid_mmx, idct, test);
+ dct_error("XVID-MMX2", 1, ff_idct_xvid_mmx2, idct, test);
+ // dct_error("ODIVX-C", 1, odivx_idct_c, idct);
+ //printf(" test against odivx idct\n");
+ // dct_error("REF", 1, idct, odivx_idct_c);
+ // dct_error("INT", 1, j_rev_dct, odivx_idct_c);
+ // dct_error("MMX", 1, ff_mmx_idct, odivx_idct_c);
+ // dct_error("MMXEXT", 1, ff_mmxext_idct, odivx_idct_c);
+ // dct_error("SIMPLE-C", 1, simple_idct, odivx_idct_c);
+ // dct_error("SIMPLE-MMX", 1, ff_simple_idct_mmx, odivx_idct_c);
+ // dct_error("ODIVX-C", 1, odivx_idct_c, odivx_idct_c);
+ }
+ }
+ return 0;
+}
diff --git a/src/libffmpeg/libavcodec/dpcm.c b/contrib/ffmpeg/libavcodec/dpcm.c
index df9da9489..99c0cac64 100644
--- a/src/libffmpeg/libavcodec/dpcm.c
+++ b/contrib/ffmpeg/libavcodec/dpcm.c
@@ -2,18 +2,20 @@
* Assorted DPCM codecs
* Copyright (c) 2003 The ffmpeg Project.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/contrib/ffmpeg/libavcodec/dsicinav.c b/contrib/ffmpeg/libavcodec/dsicinav.c
new file mode 100644
index 000000000..ded53c45a
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/dsicinav.c
@@ -0,0 +1,362 @@
+/*
+ * Delphine Software International CIN Audio/Video Decoders
+ * Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file dsicinav.c
+ * Delphine Software International CIN audio/video decoders
+ */
+
+#include "avcodec.h"
+#include "common.h"
+
+
+typedef enum CinVideoBitmapIndex {
+ CIN_CUR_BMP = 0, /* current */
+ CIN_PRE_BMP = 1, /* previous */
+ CIN_INT_BMP = 2 /* intermediate */
+} CinVideoBitmapIndex;
+
+typedef struct CinVideoContext {
+ AVCodecContext *avctx;
+ AVFrame frame;
+ unsigned int bitmap_size;
+ uint32_t palette[256];
+ uint8_t *bitmap_table[3];
+} CinVideoContext;
+
+typedef struct CinAudioContext {
+ AVCodecContext *avctx;
+ int initial_decode_frame;
+ int delta;
+} CinAudioContext;
+
+
+/* table defining a geometric sequence with multiplier = 32767 ^ (1 / 128) */
+static const int16_t cinaudio_delta16_table[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, -30210, -27853, -25680, -23677, -21829,
+ -20126, -18556, -17108, -15774, -14543, -13408, -12362, -11398,
+ -10508, -9689, -8933, -8236, -7593, -7001, -6455, -5951,
+ -5487, -5059, -4664, -4300, -3964, -3655, -3370, -3107,
+ -2865, -2641, -2435, -2245, -2070, -1908, -1759, -1622,
+ -1495, -1379, -1271, -1172, -1080, -996, -918, -847,
+ -781, -720, -663, -612, -564, -520, -479, -442,
+ -407, -376, -346, -319, -294, -271, -250, -230,
+ -212, -196, -181, -166, -153, -141, -130, -120,
+ -111, -102, -94, -87, -80, -74, -68, -62,
+ -58, -53, -49, -45, -41, -38, -35, -32,
+ -30, -27, -25, -23, -21, -20, -18, -17,
+ -15, -14, -13, -12, -11, -10, -9, -8,
+ -7, -6, -5, -4, -3, -2, -1, 0,
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 17, 18, 20, 21, 23, 25, 27, 30,
+ 32, 35, 38, 41, 45, 49, 53, 58,
+ 62, 68, 74, 80, 87, 94, 102, 111,
+ 120, 130, 141, 153, 166, 181, 196, 212,
+ 230, 250, 271, 294, 319, 346, 376, 407,
+ 442, 479, 520, 564, 612, 663, 720, 781,
+ 847, 918, 996, 1080, 1172, 1271, 1379, 1495,
+ 1622, 1759, 1908, 2070, 2245, 2435, 2641, 2865,
+ 3107, 3370, 3655, 3964, 4300, 4664, 5059, 5487,
+ 5951, 6455, 7001, 7593, 8236, 8933, 9689, 10508,
+ 11398, 12362, 13408, 14543, 15774, 17108, 18556, 20126,
+ 21829, 23677, 25680, 27853, 30210, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+
+static int cinvideo_decode_init(AVCodecContext *avctx)
+{
+ CinVideoContext *cin = (CinVideoContext *)avctx->priv_data;
+ unsigned int i;
+
+ cin->avctx = avctx;
+ avctx->pix_fmt = PIX_FMT_PAL8;
+ avctx->has_b_frames = 0;
+
+ cin->frame.data[0] = NULL;
+
+ cin->bitmap_size = avctx->width * avctx->height;
+ for (i = 0; i < 3; ++i) {
+ cin->bitmap_table[i] = av_mallocz(cin->bitmap_size);
+ if (!cin->bitmap_table[i])
+ av_log(avctx, AV_LOG_ERROR, "Can't allocate bitmap buffers.\n");
+ }
+
+ return 0;
+}
+
+static void cin_apply_delta_data(const unsigned char *src, unsigned char *dst, int size)
+{
+ while (size--)
+ *dst++ += *src++;
+}
+
+static int cin_decode_huffman(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
+{
+ int b, huff_code = 0;
+ unsigned char huff_code_table[15];
+ unsigned char *dst_cur = dst;
+ unsigned char *dst_end = dst + dst_size;
+ const unsigned char *src_end = src + src_size;
+
+ memcpy(huff_code_table, src, 15); src += 15; src_size -= 15;
+
+ while (src < src_end) {
+ huff_code = *src++;
+ if ((huff_code >> 4) == 15) {
+ b = huff_code << 4;
+ huff_code = *src++;
+ *dst_cur++ = b | (huff_code >> 4);
+ } else
+ *dst_cur++ = huff_code_table[huff_code >> 4];
+ if (dst_cur >= dst_end)
+ break;
+
+ huff_code &= 15;
+ if (huff_code == 15) {
+ *dst_cur++ = *src++;
+ } else
+ *dst_cur++ = huff_code_table[huff_code];
+ if (dst_cur >= dst_end)
+ break;
+ }
+
+ return dst_cur - dst;
+}
+
+static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
+{
+ uint16_t cmd;
+ int i, sz, offset, code;
+ unsigned char *dst_end = dst + dst_size;
+ const unsigned char *src_end = src + src_size;
+
+ while (src < src_end && dst < dst_end) {
+ code = *src++;
+ for (i = 0; i < 8 && src < src_end && dst < dst_end; ++i) {
+ if (code & (1 << i)) {
+ *dst++ = *src++;
+ } else {
+ cmd = LE_16(src); src += 2;
+ offset = cmd >> 4;
+ sz = (cmd & 0xF) + 2;
+ /* don't use memcpy/memmove here as the decoding routine (ab)uses */
+ /* buffer overlappings to repeat bytes in the destination */
+ sz = FFMIN(sz, dst_end - dst);
+ while (sz--) {
+ *dst = *(dst - offset - 1);
+ ++dst;
+ }
+ }
+ }
+ }
+}
+
+static void cin_decode_rle(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
+{
+ int len, code;
+ unsigned char *dst_end = dst + dst_size;
+ const unsigned char *src_end = src + src_size;
+
+ while (src < src_end && dst < dst_end) {
+ code = *src++;
+ if (code & 0x80) {
+ len = code - 0x7F;
+ memset(dst, *src++, FFMIN(len, dst_end - dst));
+ } else {
+ len = code + 1;
+ memcpy(dst, src, FFMIN(len, dst_end - dst));
+ src += len;
+ }
+ dst += len;
+ }
+}
+
+static int cinvideo_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ CinVideoContext *cin = (CinVideoContext *)avctx->priv_data;
+ int i, y, palette_type, palette_colors_count, bitmap_frame_type, bitmap_frame_size;
+
+ cin->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
+ if (avctx->reget_buffer(avctx, &cin->frame)) {
+ av_log(cin->avctx, AV_LOG_ERROR, "delphinecinvideo: reget_buffer() failed to allocate a frame\n");
+ return -1;
+ }
+
+ palette_type = buf[0];
+ palette_colors_count = buf[1] | (buf[2] << 8);
+ bitmap_frame_type = buf[3];
+ buf += 4;
+
+ bitmap_frame_size = buf_size - 4;
+
+ /* handle palette */
+ if (palette_type == 0) {
+ for (i = 0; i < palette_colors_count; ++i) {
+ cin->palette[i] = (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ buf += 3;
+ bitmap_frame_size -= 3;
+ }
+ } else {
+ for (i = 0; i < palette_colors_count; ++i) {
+ cin->palette[buf[0]] = (buf[3] << 16) | (buf[2] << 8) | buf[1];
+ buf += 4;
+ bitmap_frame_size -= 4;
+ }
+ }
+ memcpy(cin->frame.data[1], cin->palette, sizeof(cin->palette));
+ cin->frame.palette_has_changed = 1;
+
+ /* note: the decoding routines below assumes that surface.width = surface.pitch */
+ switch (bitmap_frame_type) {
+ case 9:
+ cin_decode_rle(buf, bitmap_frame_size,
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ break;
+ case 34:
+ cin_decode_rle(buf, bitmap_frame_size,
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP],
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ break;
+ case 35:
+ cin_decode_huffman(buf, bitmap_frame_size,
+ cin->bitmap_table[CIN_INT_BMP], cin->bitmap_size);
+ cin_decode_rle(cin->bitmap_table[CIN_INT_BMP], bitmap_frame_size,
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ break;
+ case 36:
+ bitmap_frame_size = cin_decode_huffman(buf, bitmap_frame_size,
+ cin->bitmap_table[CIN_INT_BMP], cin->bitmap_size);
+ cin_decode_rle(cin->bitmap_table[CIN_INT_BMP], bitmap_frame_size,
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP],
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ break;
+ case 37:
+ cin_decode_huffman(buf, bitmap_frame_size,
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ break;
+ case 38:
+ cin_decode_lzss(buf, bitmap_frame_size,
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ break;
+ case 39:
+ cin_decode_lzss(buf, bitmap_frame_size,
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ cin_apply_delta_data(cin->bitmap_table[CIN_PRE_BMP],
+ cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_size);
+ break;
+ }
+
+ for (y = 0; y < cin->avctx->height; ++y)
+ memcpy(cin->frame.data[0] + (cin->avctx->height - 1 - y) * cin->frame.linesize[0],
+ cin->bitmap_table[CIN_CUR_BMP] + y * cin->avctx->width,
+ cin->avctx->width);
+
+ FFSWAP(uint8_t *, cin->bitmap_table[CIN_CUR_BMP], cin->bitmap_table[CIN_PRE_BMP]);
+
+ *data_size = sizeof(AVFrame);
+ *(AVFrame *)data = cin->frame;
+
+ return buf_size;
+}
+
+static int cinvideo_decode_end(AVCodecContext *avctx)
+{
+ CinVideoContext *cin = (CinVideoContext *)avctx->priv_data;
+ int i;
+
+ if (cin->frame.data[0])
+ avctx->release_buffer(avctx, &cin->frame);
+
+ for (i = 0; i < 3; ++i)
+ av_free(cin->bitmap_table[i]);
+
+ return 0;
+}
+
+static int cinaudio_decode_init(AVCodecContext *avctx)
+{
+ CinAudioContext *cin = (CinAudioContext *)avctx->priv_data;
+
+ cin->avctx = avctx;
+ cin->initial_decode_frame = 1;
+ cin->delta = 0;
+
+ return 0;
+}
+
+static int cinaudio_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ CinAudioContext *cin = (CinAudioContext *)avctx->priv_data;
+ uint8_t *src = buf;
+ int16_t *samples = (int16_t *)data;
+
+ if (cin->initial_decode_frame) {
+ cin->initial_decode_frame = 0;
+ cin->delta = (int16_t)LE_16(src); src += 2;
+ *samples++ = cin->delta;
+ buf_size -= 2;
+ }
+ while (buf_size > 0) {
+ cin->delta += cinaudio_delta16_table[*src++];
+ cin->delta = clip(cin->delta, -32768, 32767);
+ *samples++ = cin->delta;
+ --buf_size;
+ }
+
+ *data_size = (uint8_t *)samples - (uint8_t *)data;
+
+ return src - buf;
+}
+
+
+AVCodec dsicinvideo_decoder = {
+ "dsicinvideo",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_DSICINVIDEO,
+ sizeof(CinVideoContext),
+ cinvideo_decode_init,
+ NULL,
+ cinvideo_decode_end,
+ cinvideo_decode_frame,
+ CODEC_CAP_DR1,
+};
+
+AVCodec dsicinaudio_decoder = {
+ "dsicinaudio",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_DSICINAUDIO,
+ sizeof(CinAudioContext),
+ cinaudio_decode_init,
+ NULL,
+ NULL,
+ cinaudio_decode_frame,
+};
diff --git a/src/libffmpeg/libavcodec/dsputil.c b/contrib/ffmpeg/libavcodec/dsputil.c
index 9b79b8659..51eddbc60 100644
--- a/src/libffmpeg/libavcodec/dsputil.c
+++ b/contrib/ffmpeg/libavcodec/dsputil.c
@@ -3,18 +3,20 @@
* Copyright (c) 2000, 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
@@ -35,8 +37,11 @@
/* snow.c */
void ff_spatial_dwt(int *buffer, int width, int height, int stride, int type, int decomposition_count);
-uint8_t cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
-uint32_t squareTbl[512] = {0, };
+/* vorbis.c */
+void vorbis_inverse_coupling(float *mag, float *ang, int blocksize);
+
+uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP] = {0, };
+uint32_t ff_squareTbl[512] = {0, };
const uint8_t ff_zigzag_direct[64] = {
0, 1, 8, 16, 9, 2, 3, 10,
@@ -88,7 +93,7 @@ const uint8_t ff_alternate_vertical_scan[64] = {
};
/* a*inverse[b]>>32 == a/b for all 0<=a<=65536 && 2<=b<=255 */
-const uint32_t inverse[256]={
+const uint32_t ff_inverse[256]={
0, 4294967295U,2147483648U,1431655766, 1073741824, 858993460, 715827883, 613566757,
536870912, 477218589, 429496730, 390451573, 357913942, 330382100, 306783379, 286331154,
268435456, 252645136, 238609295, 226050911, 214748365, 204522253, 195225787, 186737709,
@@ -160,7 +165,7 @@ static int pix_sum_c(uint8_t * pix, int line_size)
static int pix_norm1_c(uint8_t * pix, int line_size)
{
int s, i, j;
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < 16; i++) {
@@ -226,7 +231,7 @@ static void bswap_buf(uint32_t *dst, uint32_t *src, int w){
static int sse4_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
@@ -243,7 +248,7 @@ static int sse4_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
{
int s, i;
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
@@ -264,7 +269,7 @@ static int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h)
static int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int s, i;
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < h; i++) {
@@ -353,7 +358,7 @@ static inline int w_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, in
for(i=0; i<size; i++){
for(j=0; j<size; j++){
int v= tmp[sx + sy + i*stride + j] * scale[type][dec_count-3][level][ori];
- s += ABS(v);
+ s += FFABS(v);
}
}
}
@@ -431,7 +436,7 @@ static void put_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<8;i++) {
@@ -453,7 +458,7 @@ static void put_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<4;i++) {
@@ -471,7 +476,7 @@ static void put_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<2;i++) {
@@ -508,7 +513,7 @@ static void add_pixels_clamped_c(const DCTELEM *block, uint8_t *restrict pixels,
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<8;i++) {
@@ -529,7 +534,7 @@ static void add_pixels_clamped4_c(const DCTELEM *block, uint8_t *restrict pixels
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<4;i++) {
@@ -546,7 +551,7 @@ static void add_pixels_clamped2_c(const DCTELEM *block, uint8_t *restrict pixels
int line_size)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* read the pixels */
for(i=0;i<2;i++) {
@@ -1484,86 +1489,33 @@ H264_CHROMA_MC(avg_ , op_avg)
#undef op_avg
#undef op_put
-static inline void copy_block2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST16(dst , LD16(src ));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block4(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-
-static inline void copy_block16(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
+static void put_no_rnd_h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){
+ const int A=(8-x)*(8-y);
+ const int B=( x)*(8-y);
+ const int C=(8-x)*( y);
+ const int D=( x)*( y);
int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- ST32(dst+8 , LD32(src+8 ));
- ST32(dst+12, LD32(src+12));
- dst+=dstStride;
- src+=srcStride;
- }
-}
-static inline void copy_block17(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
- for(i=0; i<h; i++)
- {
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- ST32(dst+8 , LD32(src+8 ));
- ST32(dst+12, LD32(src+12));
- dst[16]= src[16];
- dst+=dstStride;
- src+=srcStride;
- }
-}
+ assert(x<8 && y<8 && x>=0 && y>=0);
-static inline void copy_block9(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
-{
- int i;
for(i=0; i<h; i++)
{
- ST32(dst , LD32(src ));
- ST32(dst+4 , LD32(src+4 ));
- dst[8]= src[8];
- dst+=dstStride;
- src+=srcStride;
+ dst[0] = (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + 32 - 4) >> 6;
+ dst[1] = (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + 32 - 4) >> 6;
+ dst[2] = (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + 32 - 4) >> 6;
+ dst[3] = (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + 32 - 4) >> 6;
+ dst[4] = (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + 32 - 4) >> 6;
+ dst[5] = (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + 32 - 4) >> 6;
+ dst[6] = (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + 32 - 4) >> 6;
+ dst[7] = (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + 32 - 4) >> 6;
+ dst+= stride;
+ src+= stride;
}
}
-
#define QPEL_MC(r, OPNAME, RND, OP) \
static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
@@ -1582,7 +1534,7 @@ static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstS
\
static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=8;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
@@ -1609,7 +1561,7 @@ static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstS
}\
\
static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
\
for(i=0; i<h; i++)\
@@ -1636,7 +1588,7 @@ static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dst
}\
\
static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
const int w=16;\
for(i=0; i<w; i++)\
@@ -2062,7 +2014,7 @@ QPEL_MC(0, avg_ , _ , op_avg)
#define H264_LOWPASS(OPNAME, OP, OP2) \
static void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int h=2;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
@@ -2075,7 +2027,7 @@ static void OPNAME ## h264_qpel2_h_lowpass(uint8_t *dst, uint8_t *src, int dstSt
\
static void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=2;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
@@ -2096,7 +2048,7 @@ static void OPNAME ## h264_qpel2_v_lowpass(uint8_t *dst, uint8_t *src, int dstSt
static void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
const int h=2;\
const int w=2;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
@@ -2124,7 +2076,7 @@ static void OPNAME ## h264_qpel2_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t
}\
static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int h=4;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
@@ -2139,7 +2091,7 @@ static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstSt
\
static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=4;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
@@ -2164,7 +2116,7 @@ static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstSt
static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
const int h=4;\
const int w=4;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
@@ -2199,7 +2151,7 @@ static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t
\
static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int h=8;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<h; i++)\
{\
@@ -2218,7 +2170,7 @@ static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstSt
\
static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
const int w=8;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
for(i=0; i<w; i++)\
{\
@@ -2251,7 +2203,7 @@ static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstSt
static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
const int h=8;\
const int w=8;\
- uint8_t *cm = cropTbl + MAX_NEG_CROP;\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
int i;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
@@ -2553,7 +2505,7 @@ H264_WEIGHT(2,2)
#undef H264_WEIGHT
static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
for(i=0; i<h; i++){
@@ -2598,7 +2550,7 @@ void ff_put_vc1_mspel_mc00_c(uint8_t *dst, uint8_t *src, int stride, int rnd) {
#endif /* CONFIG_VC1_DECODER||CONFIG_WMV3_DECODER */
static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int i;
for(i=0; i<w; i++){
@@ -2700,7 +2652,7 @@ static void h263_v_loop_filter_c(uint8_t *src, int stride, int qscale){
src[x-1*stride] = p1;
src[x+0*stride] = p2;
- ad1= ABS(d1)>>1;
+ ad1= FFABS(d1)>>1;
d2= clip((p0-p3)/4, -ad1, ad1);
@@ -2735,7 +2687,7 @@ static void h263_h_loop_filter_c(uint8_t *src, int stride, int qscale){
src[y*stride-1] = p1;
src[y*stride+0] = p2;
- ad1= ABS(d1)>>1;
+ ad1= FFABS(d1)>>1;
d2= clip((p0-p3)/4, -ad1, ad1);
@@ -2787,18 +2739,18 @@ static inline void h264_loop_filter_luma_c(uint8_t *pix, int xstride, int ystrid
const int q1 = pix[1*xstride];
const int q2 = pix[2*xstride];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
int tc = tc0[i];
int i_delta;
- if( ABS( p2 - p0 ) < beta ) {
+ if( FFABS( p2 - p0 ) < beta ) {
pix[-2*xstride] = p1 + clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
tc++;
}
- if( ABS( q2 - q0 ) < beta ) {
+ if( FFABS( q2 - q0 ) < beta ) {
pix[ xstride] = q1 + clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
tc++;
}
@@ -2835,9 +2787,9 @@ static inline void h264_loop_filter_chroma_c(uint8_t *pix, int xstride, int ystr
const int q0 = pix[0];
const int q1 = pix[1*xstride];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
int delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
@@ -2866,9 +2818,9 @@ static inline void h264_loop_filter_chroma_intra_c(uint8_t *pix, int xstride, in
const int q0 = pix[0];
const int q1 = pix[1*xstride];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
pix[-xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */
pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */
@@ -3097,9 +3049,9 @@ static int nsse16_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
}
if(y+1<h){
for(x=0; x<15; x++){
- score2+= ABS( s1[x ] - s1[x +stride]
+ score2+= FFABS( s1[x ] - s1[x +stride]
- s1[x+1] + s1[x+1+stride])
- -ABS( s2[x ] - s2[x +stride]
+ -FFABS( s2[x ] - s2[x +stride]
- s2[x+1] + s2[x+1+stride]);
}
}
@@ -3107,8 +3059,8 @@ static int nsse16_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
s2+= stride;
}
- if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
- else return score1 + ABS(score2)*8;
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
}
static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
@@ -3123,9 +3075,9 @@ static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
}
if(y+1<h){
for(x=0; x<7; x++){
- score2+= ABS( s1[x ] - s1[x +stride]
+ score2+= FFABS( s1[x ] - s1[x +stride]
- s1[x+1] + s1[x+1+stride])
- -ABS( s2[x ] - s2[x +stride]
+ -FFABS( s2[x ] - s2[x +stride]
- s2[x+1] + s2[x+1+stride]);
}
}
@@ -3133,8 +3085,8 @@ static int nsse8_c(void *v, uint8_t *s1, uint8_t *s2, int stride, int h){
s2+= stride;
}
- if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
- else return score1 + ABS(score2)*8;
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
}
static int try_8x8basis_c(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
@@ -3324,7 +3276,7 @@ o2= (i1)-(i2);
y= a-b;\
}
-#define BUTTERFLYA(x,y) (ABS((x)+(y)) + ABS((x)-(y)))
+#define BUTTERFLYA(x,y) (FFABS((x)+(y)) + FFABS((x)-(y)))
static int hadamard8_diff8x8_c(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
int i;
@@ -3421,7 +3373,7 @@ static int hadamard8_intra8x8_c(/*MpegEncContext*/ void *s, uint8_t *src, uint8_
+BUTTERFLYA(temp[8*3+i], temp[8*7+i]);
}
- sum -= ABS(temp[8*0] + temp[8*4]); // -mean
+ sum -= FFABS(temp[8*0] + temp[8*4]); // -mean
return sum;
}
@@ -3438,7 +3390,7 @@ static int dct_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2
s->dsp.fdct(temp);
for(i=0; i<64; i++)
- sum+= ABS(temp[i]);
+ sum+= FFABS(temp[i]);
return sum;
}
@@ -3487,7 +3439,7 @@ static int dct264_sad8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *s
#undef DST
#define SRC(x) dct[x][i]
-#define DST(x,v) sum += ABS(v)
+#define DST(x,v) sum += FFABS(v)
for( i = 0; i < 8; i++ )
DCT8_1D
#undef SRC
@@ -3508,13 +3460,11 @@ static int dct_max8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2
s->dsp.fdct(temp);
for(i=0; i<64; i++)
- sum= FFMAX(sum, ABS(temp[i]));
+ sum= FFMAX(sum, FFABS(temp[i]));
return sum;
}
-void simple_idct(DCTELEM *block); //FIXME
-
static int quant_psnr8x8_c(/*MpegEncContext*/ void *c, uint8_t *src1, uint8_t *src2, int stride, int h){
MpegEncContext * const s= (MpegEncContext *)c;
DECLARE_ALIGNED_8 (uint64_t, aligned_temp[sizeof(DCTELEM)*64*2/8]);
@@ -3684,8 +3634,8 @@ static int vsad_intra16_c(/*MpegEncContext*/ void *c, uint8_t *s, uint8_t *dummy
for(y=1; y<h; y++){
for(x=0; x<16; x+=4){
- score+= ABS(s[x ] - s[x +stride]) + ABS(s[x+1] - s[x+1+stride])
- +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
+ score+= FFABS(s[x ] - s[x +stride]) + FFABS(s[x+1] - s[x+1+stride])
+ +FFABS(s[x+2] - s[x+2+stride]) + FFABS(s[x+3] - s[x+3+stride]);
}
s+= stride;
}
@@ -3699,7 +3649,7 @@ static int vsad16_c(/*MpegEncContext*/ void *c, uint8_t *s1, uint8_t *s2, int st
for(y=1; y<h; y++){
for(x=0; x<16; x++){
- score+= ABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
+ score+= FFABS(s1[x ] - s2[x ] - s1[x +stride] + s2[x +stride]);
}
s1+= stride;
s2+= stride;
@@ -3750,6 +3700,39 @@ WARPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
WARPER8_16_SQ(rd8x8_c, rd16_c)
WARPER8_16_SQ(bit8x8_c, bit16_c)
+static void vector_fmul_c(float *dst, const float *src, int len){
+ int i;
+ for(i=0; i<len; i++)
+ dst[i] *= src[i];
+}
+
+static void vector_fmul_reverse_c(float *dst, const float *src0, const float *src1, int len){
+ int i;
+ src1 += len-1;
+ for(i=0; i<len; i++)
+ dst[i] = src0[i] * src1[-i];
+}
+
+void ff_vector_fmul_add_add_c(float *dst, const float *src0, const float *src1, const float *src2, int src3, int len, int step){
+ int i;
+ for(i=0; i<len; i++)
+ dst[i*step] = src0[i] * src1[i] + src2[i] + src3;
+}
+
+void ff_float_to_int16_c(int16_t *dst, const float *src, int len){
+ int i;
+ for(i=0; i<len; i++) {
+ int_fast32_t tmp = ((int32_t*)src)[i];
+ if(tmp & 0xf0000){
+ tmp = (0x43c0ffff - tmp)>>31;
+ // is this faster on some gcc/cpu combinations?
+// if(tmp > 0x43c0ffff) tmp = 0xFFFF;
+// else tmp = 0;
+ }
+ dst[i] = tmp - 0x8000;
+ }
+}
+
/* XXX: those functions should be suppressed ASAP when all IDCTs are
converted */
static void ff_jref_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
@@ -3787,13 +3770,13 @@ static void ff_jref_idct2_add(uint8_t *dest, int line_size, DCTELEM *block)
static void ff_jref_idct1_put(uint8_t *dest, int line_size, DCTELEM *block)
{
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
dest[0] = cm[(block[0] + 4)>>3];
}
static void ff_jref_idct1_add(uint8_t *dest, int line_size, DCTELEM *block)
{
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
dest[0] = cm[dest[0] + ((block[0] + 4)>>3)];
}
@@ -3805,14 +3788,14 @@ void dsputil_static_init(void)
{
int i;
- for(i=0;i<256;i++) cropTbl[i + MAX_NEG_CROP] = i;
+ for(i=0;i<256;i++) ff_cropTbl[i + MAX_NEG_CROP] = i;
for(i=0;i<MAX_NEG_CROP;i++) {
- cropTbl[i] = 0;
- cropTbl[i + MAX_NEG_CROP + 256] = 255;
+ ff_cropTbl[i] = 0;
+ ff_cropTbl[i + MAX_NEG_CROP + 256] = 255;
}
for(i=0;i<512;i++) {
- squareTbl[i] = (i - 256) * (i - 256);
+ ff_squareTbl[i] = (i - 256) * (i - 256);
}
for(i=0; i<64; i++) inv_zigzag_direct16[ff_zigzag_direct[i]]= i+1;
@@ -3994,6 +3977,7 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_c;
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c;
c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c;
+ c->put_no_rnd_h264_chroma_pixels_tab[0]= put_no_rnd_h264_chroma_mc8_c;
c->weight_h264_pixels_tab[0]= weight_h264_pixels16x16_c;
c->weight_h264_pixels_tab[1]= weight_h264_pixels16x8_c;
@@ -4075,6 +4059,7 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_c;
c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_c;
c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_c;
+ c->h264_loop_filter_strength= NULL;
c->h263_h_loop_filter= h263_h_loop_filter_c;
c->h263_v_loop_filter= h263_v_loop_filter_c;
@@ -4090,6 +4075,14 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->inner_add_yblock = ff_snow_inner_add_yblock;
#endif
+#ifdef CONFIG_VORBIS_DECODER
+ c->vorbis_inverse_coupling = vorbis_inverse_coupling;
+#endif
+ c->vector_fmul = vector_fmul_c;
+ c->vector_fmul_reverse = vector_fmul_reverse_c;
+ c->vector_fmul_add_add = ff_vector_fmul_add_add_c;
+ c->float_to_int16 = ff_float_to_int16_c;
+
c->shrink[0]= ff_img_copy_plane;
c->shrink[1]= ff_shrink22;
c->shrink[2]= ff_shrink44;
@@ -4097,6 +4090,9 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx)
c->prefetch= just_return;
+ memset(c->put_2tap_qpel_pixels_tab, 0, sizeof(c->put_2tap_qpel_pixels_tab));
+ memset(c->avg_2tap_qpel_pixels_tab, 0, sizeof(c->avg_2tap_qpel_pixels_tab));
+
#ifdef HAVE_MMX
dsputil_init_mmx(c, avctx);
#endif
@@ -4121,6 +4117,16 @@ void dsputil_init(DSPContext* c, AVCodecContext *avctx)
#ifdef ARCH_SH4
dsputil_init_sh4(c,avctx);
#endif
+#ifdef ARCH_BFIN
+ dsputil_init_bfin(c,avctx);
+#endif
+
+ for(i=0; i<64; i++){
+ if(!c->put_2tap_qpel_pixels_tab[0][i])
+ c->put_2tap_qpel_pixels_tab[0][i]= c->put_h264_qpel_pixels_tab[0][i];
+ if(!c->avg_2tap_qpel_pixels_tab[0][i])
+ c->avg_2tap_qpel_pixels_tab[0][i]= c->avg_h264_qpel_pixels_tab[0][i];
+ }
switch(c->idct_permutation_type){
case FF_NO_IDCT_PERM:
diff --git a/src/libffmpeg/libavcodec/dsputil.h b/contrib/ffmpeg/libavcodec/dsputil.h
index df7830564..35deb6aab 100644
--- a/src/libffmpeg/libavcodec/dsputil.h
+++ b/contrib/ffmpeg/libavcodec/dsputil.h
@@ -3,18 +3,20 @@
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -31,9 +33,6 @@
#include "common.h"
#include "avcodec.h"
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
-#define HAVE_MMX 1
-#endif
//#define DEBUG
/* dct code */
@@ -61,6 +60,10 @@ void ff_h264_idct_dc_add_c(uint8_t *dst, DCTELEM *block, int stride);
void ff_h264_lowres_idct_add_c(uint8_t *dst, int stride, DCTELEM *block);
void ff_h264_lowres_idct_put_c(uint8_t *dst, int stride, DCTELEM *block);
+void ff_vector_fmul_add_add_c(float *dst, const float *src0, const float *src1,
+ const float *src2, int src3, int blocksize, int step);
+void ff_float_to_int16_c(int16_t *dst, const float *src, int len);
+
/* encoding scans */
extern const uint8_t ff_alternate_horizontal_scan[64];
extern const uint8_t ff_alternate_vertical_scan[64];
@@ -71,8 +74,8 @@ extern const uint8_t ff_zigzag248_direct[64];
#define MAX_NEG_CROP 1024
/* temporary */
-extern uint32_t squareTbl[512];
-extern uint8_t cropTbl[256 + 2 * MAX_NEG_CROP];
+extern uint32_t ff_squareTbl[512];
+extern uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP];
/* VP3 DSP functions */
void ff_vp3_idct_c(DCTELEM *block/* align 16*/);
@@ -269,11 +272,16 @@ typedef struct DSPContext {
* h264 Chram MC
*/
h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
+ /* This is really one func used in VC-1 decoding */
+ h264_chroma_mc_func put_no_rnd_h264_chroma_pixels_tab[3];
h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
qpel_mc_func put_h264_qpel_pixels_tab[4][16];
qpel_mc_func avg_h264_qpel_pixels_tab[4][16];
+ qpel_mc_func put_2tap_qpel_pixels_tab[4][16];
+ qpel_mc_func avg_2tap_qpel_pixels_tab[4][16];
+
h264_weight_func weight_h264_pixels_tab[10];
h264_biweight_func biweight_h264_pixels_tab[10];
@@ -304,12 +312,27 @@ typedef struct DSPContext {
void (*h264_h_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta);
void (*h264_h_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta);
+ // h264_loop_filter_strength: simd only. the C version is inlined in h264.c
+ void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
+ int bidir, int edges, int step, int mask_mv0, int mask_mv1);
void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale);
void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale);
void (*h261_loop_filter)(uint8_t *src, int stride);
+ /* assume len is a multiple of 4, and arrays are 16-byte aligned */
+ void (*vorbis_inverse_coupling)(float *mag, float *ang, int blocksize);
+ /* assume len is a multiple of 8, and arrays are 16-byte aligned */
+ void (*vector_fmul)(float *dst, const float *src, int len);
+ void (*vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len);
+ /* assume len is a multiple of 8, and src arrays are 16-byte aligned */
+ void (*vector_fmul_add_add)(float *dst, const float *src0, const float *src1, const float *src2, int src3, int len, int step);
+
+ /* C version: convert floats from the range [384.0,386.0] to ints in [-32768,32767]
+ * simd versions: convert floats from [-32768.0,32767.0] without rescaling and arrays are 16byte aligned */
+ void (*float_to_int16)(int16_t *dst, const float *src, int len);
+
/* (I)DCT */
void (*fdct)(DCTELEM *block/* align 16*/);
void (*fdct248)(DCTELEM *block/* align 16*/);
@@ -374,8 +397,8 @@ typedef struct DSPContext {
void (*vc1_inv_trans_8x4)(DCTELEM *b, int n);
void (*vc1_inv_trans_4x8)(DCTELEM *b, int n);
void (*vc1_inv_trans_4x4)(DCTELEM *b, int n);
- void (*vc1_v_overlap)(uint8_t* src, int stride, int rnd);
- void (*vc1_h_overlap)(uint8_t* src, int stride, int rnd);
+ void (*vc1_v_overlap)(uint8_t* src, int stride);
+ void (*vc1_h_overlap)(uint8_t* src, int stride);
/* put 8x8 block with bicubic interpolation and quarterpel precision
* last argument is actually round value instead of height
*/
@@ -553,6 +576,13 @@ void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
+#elif defined(ARCH_BFIN)
+
+#define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (8)))
+#define STRIDE_ALIGN 8
+
+void dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
+
#else
#define DECLARE_ALIGNED_8(t,v) t v __attribute__ ((aligned (8)))
@@ -595,6 +625,8 @@ void get_psnr(uint8_t *orig_image[3], uint8_t *coded_image[3],
FFTSample type */
typedef float FFTSample;
+struct MDCTContext;
+
typedef struct FFTComplex {
FFTSample re, im;
} FFTComplex;
@@ -606,6 +638,8 @@ typedef struct FFTContext {
FFTComplex *exptab;
FFTComplex *exptab1; /* only used by SSE code */
void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
+ void (*imdct_calc)(struct MDCTContext *s, FFTSample *output,
+ const FFTSample *input, FFTSample *tmp);
} FFTContext;
int ff_fft_init(FFTContext *s, int nbits, int inverse);
@@ -636,6 +670,10 @@ typedef struct MDCTContext {
int ff_mdct_init(MDCTContext *s, int nbits, int inverse);
void ff_imdct_calc(MDCTContext *s, FFTSample *output,
const FFTSample *input, FFTSample *tmp);
+void ff_imdct_calc_3dn2(MDCTContext *s, FFTSample *output,
+ const FFTSample *input, FFTSample *tmp);
+void ff_imdct_calc_sse(MDCTContext *s, FFTSample *output,
+ const FFTSample *input, FFTSample *tmp);
void ff_mdct_calc(MDCTContext *s, FFTSample *out,
const FFTSample *input, FFTSample *tmp);
void ff_mdct_end(MDCTContext *s);
@@ -660,4 +698,81 @@ static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int st
return score;\
}
+
+static inline void copy_block2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ ST16(dst , LD16(src ));
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block4(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ ST32(dst , LD32(src ));
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ ST32(dst , LD32(src ));
+ ST32(dst+4 , LD32(src+4 ));
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block9(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ ST32(dst , LD32(src ));
+ ST32(dst+4 , LD32(src+4 ));
+ dst[8]= src[8];
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block16(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ ST32(dst , LD32(src ));
+ ST32(dst+4 , LD32(src+4 ));
+ ST32(dst+8 , LD32(src+8 ));
+ ST32(dst+12, LD32(src+12));
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
+static inline void copy_block17(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
+{
+ int i;
+ for(i=0; i<h; i++)
+ {
+ ST32(dst , LD32(src ));
+ ST32(dst+4 , LD32(src+4 ));
+ ST32(dst+8 , LD32(src+8 ));
+ ST32(dst+12, LD32(src+12));
+ dst[16]= src[16];
+ dst+=dstStride;
+ src+=srcStride;
+ }
+}
+
#endif
diff --git a/contrib/ffmpeg/libavcodec/dtsdec.c b/contrib/ffmpeg/libavcodec/dtsdec.c
new file mode 100644
index 000000000..456f3fdef
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/dtsdec.c
@@ -0,0 +1,320 @@
+/*
+ * dtsdec.c : free DTS Coherent Acoustics stream decoder.
+ * Copyright (C) 2004 Benjamin Zores <ben@geexbox.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef HAVE_AV_CONFIG_H
+#undef HAVE_AV_CONFIG_H
+#endif
+
+#include "avcodec.h"
+#include <dts.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+#define BUFFER_SIZE 18726
+#define HEADER_SIZE 14
+
+#ifdef LIBDTS_FIXED
+#define CONVERT_LEVEL (1 << 26)
+#define CONVERT_BIAS 0
+#else
+#define CONVERT_LEVEL 1
+#define CONVERT_BIAS 384
+#endif
+
+static inline
+int16_t convert (int32_t i)
+{
+#ifdef LIBDTS_FIXED
+ i >>= 15;
+#else
+ i -= 0x43c00000;
+#endif
+ return (i > 32767) ? 32767 : ((i < -32768) ? -32768 : i);
+}
+
+static void
+convert2s16_2 (sample_t * _f, int16_t * s16)
+{
+ int i;
+ int32_t * f = (int32_t *) _f;
+
+ for (i = 0; i < 256; i++)
+ {
+ s16[2*i] = convert (f[i]);
+ s16[2*i+1] = convert (f[i+256]);
+ }
+}
+
+static void
+convert2s16_4 (sample_t * _f, int16_t * s16)
+{
+ int i;
+ int32_t * f = (int32_t *) _f;
+
+ for (i = 0; i < 256; i++)
+ {
+ s16[4*i] = convert (f[i]);
+ s16[4*i+1] = convert (f[i+256]);
+ s16[4*i+2] = convert (f[i+512]);
+ s16[4*i+3] = convert (f[i+768]);
+ }
+}
+
+static void
+convert2s16_5 (sample_t * _f, int16_t * s16)
+{
+ int i;
+ int32_t * f = (int32_t *) _f;
+
+ for (i = 0; i < 256; i++)
+ {
+ s16[5*i] = convert (f[i]);
+ s16[5*i+1] = convert (f[i+256]);
+ s16[5*i+2] = convert (f[i+512]);
+ s16[5*i+3] = convert (f[i+768]);
+ s16[5*i+4] = convert (f[i+1024]);
+ }
+}
+
+static void
+convert2s16_multi (sample_t * _f, int16_t * s16, int flags)
+{
+ int i;
+ int32_t * f = (int32_t *) _f;
+
+ switch (flags)
+ {
+ case DTS_MONO:
+ for (i = 0; i < 256; i++)
+ {
+ s16[5*i] = s16[5*i+1] = s16[5*i+2] = s16[5*i+3] = 0;
+ s16[5*i+4] = convert (f[i]);
+ }
+ break;
+ case DTS_CHANNEL:
+ case DTS_STEREO:
+ case DTS_DOLBY:
+ convert2s16_2 (_f, s16);
+ break;
+ case DTS_3F:
+ for (i = 0; i < 256; i++)
+ {
+ s16[5*i] = convert (f[i]);
+ s16[5*i+1] = convert (f[i+512]);
+ s16[5*i+2] = s16[5*i+3] = 0;
+ s16[5*i+4] = convert (f[i+256]);
+ }
+ break;
+ case DTS_2F2R:
+ convert2s16_4 (_f, s16);
+ break;
+ case DTS_3F2R:
+ convert2s16_5 (_f, s16);
+ break;
+ case DTS_MONO | DTS_LFE:
+ for (i = 0; i < 256; i++)
+ {
+ s16[6*i] = s16[6*i+1] = s16[6*i+2] = s16[6*i+3] = 0;
+ s16[6*i+4] = convert (f[i+256]);
+ s16[6*i+5] = convert (f[i]);
+ }
+ break;
+ case DTS_CHANNEL | DTS_LFE:
+ case DTS_STEREO | DTS_LFE:
+ case DTS_DOLBY | DTS_LFE:
+ for (i = 0; i < 256; i++)
+ {
+ s16[6*i] = convert (f[i+256]);
+ s16[6*i+1] = convert (f[i+512]);
+ s16[6*i+2] = s16[6*i+3] = s16[6*i+4] = 0;
+ s16[6*i+5] = convert (f[i]);
+ }
+ break;
+ case DTS_3F | DTS_LFE:
+ for (i = 0; i < 256; i++)
+ {
+ s16[6*i] = convert (f[i+256]);
+ s16[6*i+1] = convert (f[i+768]);
+ s16[6*i+2] = s16[6*i+3] = 0;
+ s16[6*i+4] = convert (f[i+512]);
+ s16[6*i+5] = convert (f[i]);
+ }
+ break;
+ case DTS_2F2R | DTS_LFE:
+ for (i = 0; i < 256; i++)
+ {
+ s16[6*i] = convert (f[i+256]);
+ s16[6*i+1] = convert (f[i+512]);
+ s16[6*i+2] = convert (f[i+768]);
+ s16[6*i+3] = convert (f[i+1024]);
+ s16[6*i+4] = 0;
+ s16[6*i+5] = convert (f[i]);
+ }
+ break;
+ case DTS_3F2R | DTS_LFE:
+ for (i = 0; i < 256; i++)
+ {
+ s16[6*i] = convert (f[i+256]);
+ s16[6*i+1] = convert (f[i+768]);
+ s16[6*i+2] = convert (f[i+1024]);
+ s16[6*i+3] = convert (f[i+1280]);
+ s16[6*i+4] = convert (f[i+512]);
+ s16[6*i+5] = convert (f[i]);
+ }
+ break;
+ }
+}
+
+static int
+channels_multi (int flags)
+{
+ if (flags & DTS_LFE)
+ return 6;
+ else if (flags & 1) /* center channel */
+ return 5;
+ else if ((flags & DTS_CHANNEL_MASK) == DTS_2F2R)
+ return 4;
+ else
+ return 2;
+}
+
+static int
+dts_decode_frame (AVCodecContext *avctx, void *data, int *data_size,
+ uint8_t *buff, int buff_size)
+{
+ uint8_t * start = buff;
+ uint8_t * end = buff + buff_size;
+ static uint8_t buf[BUFFER_SIZE];
+ static uint8_t * bufptr = buf;
+ static uint8_t * bufpos = buf + HEADER_SIZE;
+
+ static int sample_rate;
+ static int frame_length;
+ static int flags;
+ int bit_rate;
+ int len;
+ dts_state_t *state = avctx->priv_data;
+
+ *data_size = 0;
+
+ while (1)
+ {
+ len = end - start;
+ if (!len)
+ break;
+ if (len > bufpos - bufptr)
+ len = bufpos - bufptr;
+ memcpy (bufptr, start, len);
+ bufptr += len;
+ start += len;
+ if (bufptr != bufpos)
+ return start - buff;
+ if (bufpos != buf + HEADER_SIZE)
+ break;
+
+ {
+ int length;
+
+ length = dts_syncinfo (state, buf, &flags, &sample_rate,
+ &bit_rate, &frame_length);
+ if (!length)
+ {
+ av_log (NULL, AV_LOG_INFO, "skip\n");
+ for (bufptr = buf; bufptr < buf + HEADER_SIZE-1; bufptr++)
+ bufptr[0] = bufptr[1];
+ continue;
+ }
+ bufpos = buf + length;
+ }
+ }
+
+ {
+ level_t level;
+ sample_t bias;
+ int i;
+
+ flags = 2; /* ???????????? */
+ level = CONVERT_LEVEL;
+ bias = CONVERT_BIAS;
+
+ flags |= DTS_ADJUST_LEVEL;
+ if (dts_frame (state, buf, &flags, &level, bias))
+ goto error;
+ avctx->sample_rate = sample_rate;
+ avctx->channels = channels_multi (flags);
+ avctx->bit_rate = bit_rate;
+ for (i = 0; i < dts_blocks_num (state); i++)
+ {
+ if (dts_block (state))
+ goto error;
+ {
+ int chans;
+ chans = channels_multi (flags);
+ convert2s16_multi (dts_samples (state), data,
+ flags & (DTS_CHANNEL_MASK | DTS_LFE));
+
+ data += 256 * sizeof (int16_t) * chans;
+ *data_size += 256 * sizeof (int16_t) * chans;
+ }
+ }
+ bufptr = buf;
+ bufpos = buf + HEADER_SIZE;
+ return start-buff;
+ error:
+ av_log (NULL, AV_LOG_ERROR, "error\n");
+ bufptr = buf;
+ bufpos = buf + HEADER_SIZE;
+ }
+
+ return start-buff;
+}
+
+static int
+dts_decode_init (AVCodecContext *avctx)
+{
+ avctx->priv_data = dts_init (0);
+ if (avctx->priv_data == NULL)
+ return -1;
+
+ return 0;
+}
+
+static int
+dts_decode_end (AVCodecContext *s)
+{
+ return 0;
+}
+
+AVCodec dts_decoder = {
+ "dts",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_DTS,
+ sizeof (dts_state_t *),
+ dts_decode_init,
+ NULL,
+ dts_decode_end,
+ dts_decode_frame,
+};
diff --git a/src/libffmpeg/libavcodec/dv.c b/contrib/ffmpeg/libavcodec/dv.c
index c39d70c54..76095a481 100644
--- a/src/libffmpeg/libavcodec/dv.c
+++ b/contrib/ffmpeg/libavcodec/dv.c
@@ -12,18 +12,20 @@
* Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
* of DV technical info.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -31,6 +33,7 @@
* @file dv.c
* DV codec.
*/
+#define ALT_BITSTREAM_READER
#include "avcodec.h"
#include "dsputil.h"
#include "mpegvideo.h"
@@ -270,11 +273,6 @@ static const int vs_total_ac_bits = (100 * 4 + 68*2) * 5;
/* see dv_88_areas and dv_248_areas for details */
static const int mb_area_start[5] = { 1, 6, 21, 43, 64 };
-#ifndef ALT_BITSTREAM_READER
-#warning only works with ALT_BITSTREAM_READER
-static int re_index; //Hack to make it compile
-#endif
-
static inline int get_bits_left(GetBitContext *s)
{
return s->size_in_bits - get_bits_count(s);
@@ -707,7 +705,7 @@ static always_inline void dv_set_class_number(DCTELEM* blk, EncBlockInfo* bi,
/* weigh it and and shift down into range, adding for rounding */
/* the extra division by a factor of 2^4 reverses the 8x expansion of the DCT
AND the 2x doubling of the weights */
- level = (ABS(level) * weight[i] + (1<<(dv_weight_bits+3))) >> (dv_weight_bits+4);
+ level = (FFABS(level) * weight[i] + (1<<(dv_weight_bits+3))) >> (dv_weight_bits+4);
bi->mb[i] = level;
if(level>max) max= level;
bi->bit_size[area] += dv_rl2vlc_size(i - prev - 1, level);
@@ -1014,6 +1012,7 @@ static int dv_decode_mt(AVCodecContext *avctx, void* sl)
return 0;
}
+#ifdef CONFIG_ENCODERS
static int dv_encode_mt(AVCodecContext *avctx, void* sl)
{
DVVideoContext *s = avctx->priv_data;
@@ -1032,7 +1031,9 @@ static int dv_encode_mt(AVCodecContext *avctx, void* sl)
&s->sys->video_place[slice*5]);
return 0;
}
+#endif
+#ifdef CONFIG_DECODERS
/* NOTE: exactly one frame must be given (120000 bytes for NTSC,
144000 bytes for PAL - or twice those for 50Mbps) */
static int dvvideo_decode_frame(AVCodecContext *avctx,
@@ -1072,7 +1073,132 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
return s->sys->frame_size;
}
+#endif
+
+
+static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c, uint8_t* buf)
+{
+ /*
+ * Here's what SMPTE314M says about these two:
+ * (page 6) APTn, AP1n, AP2n, AP3n: These data shall be identical
+ * as track application IDs (APTn = 001, AP1n =
+ * 001, AP2n = 001, AP3n = 001), if the source signal
+ * comes from a digital VCR. If the signal source is
+ * unknown, all bits for these data shall be set to 1.
+ * (page 12) STYPE: STYPE defines a signal type of video signal
+ * 00000b = 4:1:1 compression
+ * 00100b = 4:2:2 compression
+ * XXXXXX = Reserved
+ * Now, I've got two problems with these statements:
+ * 1. it looks like APT == 111b should be a safe bet, but it isn't.
+ * It seems that for PAL as defined in IEC 61834 we have to set
+ * APT to 000 and for SMPTE314M to 001.
+ * 2. It is not at all clear what STYPE is used for 4:2:0 PAL
+ * compression scheme (if any).
+ */
+ int apt = (c->sys->pix_fmt == PIX_FMT_YUV420P ? 0 : 1);
+ int stype = (c->sys->pix_fmt == PIX_FMT_YUV422P ? 4 : 0);
+
+ uint8_t aspect = 0;
+ if((int)(av_q2d(c->avctx->sample_aspect_ratio) * c->avctx->width / c->avctx->height * 10) == 17) /* 16:9 */
+ aspect = 0x02;
+
+ buf[0] = (uint8_t)pack_id;
+ switch (pack_id) {
+ case dv_header525: /* I can't imagine why these two weren't defined as real */
+ case dv_header625: /* packs in SMPTE314M -- they definitely look like ones */
+ buf[1] = 0xf8 | /* reserved -- always 1 */
+ (apt & 0x07); /* APT: Track application ID */
+ buf[2] = (0 << 7) | /* TF1: audio data is 0 - valid; 1 - invalid */
+ (0x0f << 3) | /* reserved -- always 1 */
+ (apt & 0x07); /* AP1: Audio application ID */
+ buf[3] = (0 << 7) | /* TF2: video data is 0 - valid; 1 - invalid */
+ (0x0f << 3) | /* reserved -- always 1 */
+ (apt & 0x07); /* AP2: Video application ID */
+ buf[4] = (0 << 7) | /* TF3: subcode(SSYB) is 0 - valid; 1 - invalid */
+ (0x0f << 3) | /* reserved -- always 1 */
+ (apt & 0x07); /* AP3: Subcode application ID */
+ break;
+ case dv_video_source:
+ buf[1] = 0xff; /* reserved -- always 1 */
+ buf[2] = (1 << 7) | /* B/W: 0 - b/w, 1 - color */
+ (1 << 6) | /* following CLF is valid - 0, invalid - 1 */
+ (3 << 4) | /* CLF: color frames id (see ITU-R BT.470-4) */
+ 0xf; /* reserved -- always 1 */
+ buf[3] = (3 << 6) | /* reserved -- always 1 */
+ (c->sys->dsf << 5) | /* system: 60fields/50fields */
+ stype; /* signal type video compression */
+ buf[4] = 0xff; /* VISC: 0xff -- no information */
+ break;
+ case dv_video_control:
+ buf[1] = (0 << 6) | /* Copy generation management (CGMS) 0 -- free */
+ 0x3f; /* reserved -- always 1 */
+ buf[2] = 0xc8 | /* reserved -- always b11001xxx */
+ aspect;
+ buf[3] = (1 << 7) | /* Frame/field flag 1 -- frame, 0 -- field */
+ (1 << 6) | /* First/second field flag 0 -- field 2, 1 -- field 1 */
+ (1 << 5) | /* Frame change flag 0 -- same picture as before, 1 -- different */
+ (1 << 4) | /* 1 - interlaced, 0 - noninterlaced */
+ 0xc; /* reserved -- always b1100 */
+ buf[4] = 0xff; /* reserved -- always 1 */
+ break;
+ default:
+ buf[1] = buf[2] = buf[3] = buf[4] = 0xff;
+ }
+ return 5;
+}
+
+static void dv_format_frame(DVVideoContext* c, uint8_t* buf)
+{
+ int chan, i, j, k;
+
+ for (chan = 0; chan < c->sys->n_difchan; chan++) {
+ for (i = 0; i < c->sys->difseg_size; i++) {
+ memset(buf, 0xff, 80 * 6); /* First 6 DIF blocks are for control data */
+
+ /* DV header: 1DIF */
+ buf += dv_write_dif_id(dv_sect_header, chan, i, 0, buf);
+ buf += dv_write_pack((c->sys->dsf ? dv_header625 : dv_header525), c, buf);
+ buf += 72; /* unused bytes */
+
+ /* DV subcode: 2DIFs */
+ for (j = 0; j < 2; j++) {
+ buf += dv_write_dif_id(dv_sect_subcode, chan, i, j, buf);
+ for (k = 0; k < 6; k++)
+ buf += dv_write_ssyb_id(k, (i < c->sys->difseg_size/2), buf) + 5;
+ buf += 29; /* unused bytes */
+ }
+
+ /* DV VAUX: 3DIFS */
+ for (j = 0; j < 3; j++) {
+ buf += dv_write_dif_id(dv_sect_vaux, chan, i, j, buf);
+ buf += dv_write_pack(dv_video_source, c, buf);
+ buf += dv_write_pack(dv_video_control, c, buf);
+ buf += 7*5;
+ buf += dv_write_pack(dv_video_source, c, buf);
+ buf += dv_write_pack(dv_video_control, c, buf);
+ buf += 4*5 + 2; /* unused bytes */
+ }
+
+ /* DV Audio/Video: 135 Video DIFs + 9 Audio DIFs */
+ for (j = 0; j < 135; j++) {
+ if (j%15 == 0) {
+ memset(buf, 0xff, 80);
+ buf += dv_write_dif_id(dv_sect_audio, chan, i, j/15, buf);
+ buf += 77; /* audio control & shuffled PCM audio */
+ }
+ buf += dv_write_dif_id(dv_sect_video, chan, i, j, buf);
+ buf += 77; /* 1 video macro block: 1 bytes control
+ 4 * 14 bytes Y 8x8 data
+ 10 bytes Cr 8x8 data
+ 10 bytes Cb 8x8 data */
+ }
+ }
+ }
+}
+
+#ifdef CONFIG_ENCODERS
static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size,
void *data)
{
@@ -1095,21 +1221,11 @@ static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size,
emms_c();
- /* Fill in just enough of the header for dv_frame_profile() to
- return the correct result, so that the frame can be decoded
- correctly. The rest of the metadata is filled in by the dvvideo
- avformat. (this should probably change so that encode_frame()
- fills in ALL of the metadata - e.g. for Quicktime-wrapped DV
- streams) */
-
- /* NTSC/PAL format */
- buf[3] = s->sys->dsf ? 0x80 : 0x00;
-
- /* 25Mbps or 50Mbps */
- buf[80*5 + 48 + 3] = (s->sys->pix_fmt == PIX_FMT_YUV422P) ? 0x4 : 0x0;
+ dv_format_frame(s, buf);
return s->sys->frame_size;
}
+#endif
static int dvvideo_close(AVCodecContext *c)
{
@@ -1133,6 +1249,7 @@ AVCodec dvvideo_encoder = {
};
#endif // CONFIG_DVVIDEO_ENCODER
+#ifdef CONFIG_DVVIDEO_DECODER
AVCodec dvvideo_decoder = {
"dvvideo",
CODEC_TYPE_VIDEO,
@@ -1145,3 +1262,4 @@ AVCodec dvvideo_decoder = {
CODEC_CAP_DR1,
NULL
};
+#endif
diff --git a/contrib/ffmpeg/libavcodec/dvbsub.c b/contrib/ffmpeg/libavcodec/dvbsub.c
new file mode 100644
index 000000000..44ba19d86
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/dvbsub.c
@@ -0,0 +1,445 @@
+/*
+ * DVB subtitle encoding for ffmpeg
+ * Copyright (c) 2005 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avcodec.h"
+
+typedef struct DVBSubtitleContext {
+ int hide_state;
+ int object_version;
+} DVBSubtitleContext;
+
+#define PUTBITS2(val)\
+{\
+ bitbuf |= (val) << bitcnt;\
+ bitcnt -= 2;\
+ if (bitcnt < 0) {\
+ bitcnt = 6;\
+ *q++ = bitbuf;\
+ bitbuf = 0;\
+ }\
+}
+
+static void dvb_encode_rle2(uint8_t **pq,
+ const uint8_t *bitmap, int linesize,
+ int w, int h)
+{
+ uint8_t *q;
+ unsigned int bitbuf;
+ int bitcnt;
+ int x, y, len, x1, v, color;
+
+ q = *pq;
+
+ for(y = 0; y < h; y++) {
+ *q++ = 0x10;
+ bitbuf = 0;
+ bitcnt = 6;
+
+ x = 0;
+ while (x < w) {
+ x1 = x;
+ color = bitmap[x1++];
+ while (x1 < w && bitmap[x1] == color)
+ x1++;
+ len = x1 - x;
+ if (color == 0 && len == 2) {
+ PUTBITS2(0);
+ PUTBITS2(0);
+ PUTBITS2(1);
+ } else if (len >= 3 && len <= 10) {
+ v = len - 3;
+ PUTBITS2(0);
+ PUTBITS2((v >> 2) | 2);
+ PUTBITS2(v & 3);
+ PUTBITS2(color);
+ } else if (len >= 12 && len <= 27) {
+ v = len - 12;
+ PUTBITS2(0);
+ PUTBITS2(0);
+ PUTBITS2(2);
+ PUTBITS2(v >> 2);
+ PUTBITS2(v & 3);
+ PUTBITS2(color);
+ } else if (len >= 29) {
+ /* length = 29 ... 284 */
+ if (len > 284)
+ len = 284;
+ v = len - 29;
+ PUTBITS2(0);
+ PUTBITS2(0);
+ PUTBITS2(3);
+ PUTBITS2((v >> 6));
+ PUTBITS2((v >> 4) & 3);
+ PUTBITS2((v >> 2) & 3);
+ PUTBITS2(v & 3);
+ PUTBITS2(color);
+ } else {
+ PUTBITS2(color);
+ if (color == 0) {
+ PUTBITS2(1);
+ }
+ len = 1;
+ }
+ x += len;
+ }
+ /* end of line */
+ PUTBITS2(0);
+ PUTBITS2(0);
+ PUTBITS2(0);
+ if (bitcnt != 6) {
+ *q++ = bitbuf;
+ }
+ *q++ = 0xf0;
+ bitmap += linesize;
+ }
+ *pq = q;
+}
+
+#define PUTBITS4(val)\
+{\
+ bitbuf |= (val) << bitcnt;\
+ bitcnt -= 4;\
+ if (bitcnt < 0) {\
+ bitcnt = 4;\
+ *q++ = bitbuf;\
+ bitbuf = 0;\
+ }\
+}
+
+/* some DVB decoders only implement 4 bits/pixel */
+static void dvb_encode_rle4(uint8_t **pq,
+ const uint8_t *bitmap, int linesize,
+ int w, int h)
+{
+ uint8_t *q;
+ unsigned int bitbuf;
+ int bitcnt;
+ int x, y, len, x1, v, color;
+
+ q = *pq;
+
+ for(y = 0; y < h; y++) {
+ *q++ = 0x11;
+ bitbuf = 0;
+ bitcnt = 4;
+
+ x = 0;
+ while (x < w) {
+ x1 = x;
+ color = bitmap[x1++];
+ while (x1 < w && bitmap[x1] == color)
+ x1++;
+ len = x1 - x;
+ if (color == 0 && len == 2) {
+ PUTBITS4(0);
+ PUTBITS4(0xd);
+ } else if (color == 0 && (len >= 3 && len <= 9)) {
+ PUTBITS4(0);
+ PUTBITS4(len - 2);
+ } else if (len >= 4 && len <= 7) {
+ PUTBITS4(0);
+ PUTBITS4(8 + len - 4);
+ PUTBITS4(color);
+ } else if (len >= 9 && len <= 24) {
+ PUTBITS4(0);
+ PUTBITS4(0xe);
+ PUTBITS4(len - 9);
+ PUTBITS4(color);
+ } else if (len >= 25) {
+ if (len > 280)
+ len = 280;
+ v = len - 25;
+ PUTBITS4(0);
+ PUTBITS4(0xf);
+ PUTBITS4(v >> 4);
+ PUTBITS4(v & 0xf);
+ PUTBITS4(color);
+ } else {
+ PUTBITS4(color);
+ if (color == 0) {
+ PUTBITS4(0xc);
+ }
+ len = 1;
+ }
+ x += len;
+ }
+ /* end of line */
+ PUTBITS4(0);
+ PUTBITS4(0);
+ if (bitcnt != 4) {
+ *q++ = bitbuf;
+ }
+ *q++ = 0xf0;
+ bitmap += linesize;
+ }
+ *pq = q;
+}
+
+#define SCALEBITS 10
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define RGB_TO_Y_CCIR(r, g, b) \
+((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
+ FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
+
+#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
+(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
+ FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
+(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
+ FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
+
+static inline void putbe16(uint8_t **pq, uint16_t v)
+{
+ uint8_t *q;
+ q = *pq;
+ *q++ = v >> 8;
+ *q++ = v;
+ *pq = q;
+}
+
+static int encode_dvb_subtitles(DVBSubtitleContext *s,
+ uint8_t *outbuf, AVSubtitle *h)
+{
+ uint8_t *q, *pseg_len;
+ int page_id, region_id, clut_id, object_id, i, bpp_index, page_state;
+
+
+ q = outbuf;
+
+ page_id = 1;
+
+ if (h->num_rects == 0 || h->rects == NULL)
+ return -1;
+
+ *q++ = 0x00; /* subtitle_stream_id */
+
+ /* page composition segment */
+
+ *q++ = 0x0f; /* sync_byte */
+ *q++ = 0x10; /* segment_type */
+ putbe16(&q, page_id);
+ pseg_len = q;
+ q += 2; /* segment length */
+ *q++ = 30; /* page_timeout (seconds) */
+ if (s->hide_state)
+ page_state = 0; /* normal case */
+ else
+ page_state = 2; /* mode change */
+ /* page_version = 0 + page_state */
+ *q++ = s->object_version | (page_state << 2) | 3;
+
+ for (region_id = 0; region_id < h->num_rects; region_id++) {
+ *q++ = region_id;
+ *q++ = 0xff; /* reserved */
+ putbe16(&q, h->rects[region_id].x); /* left pos */
+ putbe16(&q, h->rects[region_id].y); /* top pos */
+ }
+
+ putbe16(&pseg_len, q - pseg_len - 2);
+
+ if (!s->hide_state) {
+ for (clut_id = 0; clut_id < h->num_rects; clut_id++) {
+
+ /* CLUT segment */
+
+ if (h->rects[clut_id].nb_colors <= 4) {
+ /* 2 bpp, some decoders do not support it correctly */
+ bpp_index = 0;
+ } else if (h->rects[clut_id].nb_colors <= 16) {
+ /* 4 bpp, standard encoding */
+ bpp_index = 1;
+ } else {
+ return -1;
+ }
+
+ *q++ = 0x0f; /* sync byte */
+ *q++ = 0x12; /* CLUT definition segment */
+ putbe16(&q, page_id);
+ pseg_len = q;
+ q += 2; /* segment length */
+ *q++ = clut_id;
+ *q++ = (0 << 4) | 0xf; /* version = 0 */
+
+ for(i = 0; i < h->rects[clut_id].nb_colors; i++) {
+ *q++ = i; /* clut_entry_id */
+ *q++ = (1 << (7 - bpp_index)) | (0xf << 1) | 1; /* 2 bits/pixel full range */
+ {
+ int a, r, g, b;
+ a = (h->rects[clut_id].rgba_palette[i] >> 24) & 0xff;
+ r = (h->rects[clut_id].rgba_palette[i] >> 16) & 0xff;
+ g = (h->rects[clut_id].rgba_palette[i] >> 8) & 0xff;
+ b = (h->rects[clut_id].rgba_palette[i] >> 0) & 0xff;
+
+ *q++ = RGB_TO_Y_CCIR(r, g, b);
+ *q++ = RGB_TO_V_CCIR(r, g, b, 0);
+ *q++ = RGB_TO_U_CCIR(r, g, b, 0);
+ *q++ = 255 - a;
+ }
+ }
+
+ putbe16(&pseg_len, q - pseg_len - 2);
+ }
+ }
+
+ for (region_id = 0; region_id < h->num_rects; region_id++) {
+
+ /* region composition segment */
+
+ if (h->rects[region_id].nb_colors <= 4) {
+ /* 2 bpp, some decoders do not support it correctly */
+ bpp_index = 0;
+ } else if (h->rects[region_id].nb_colors <= 16) {
+ /* 4 bpp, standard encoding */
+ bpp_index = 1;
+ } else {
+ return -1;
+ }
+
+ *q++ = 0x0f; /* sync_byte */
+ *q++ = 0x11; /* segment_type */
+ putbe16(&q, page_id);
+ pseg_len = q;
+ q += 2; /* segment length */
+ *q++ = region_id;
+ *q++ = (s->object_version << 4) | (0 << 3) | 0x07; /* version , no fill */
+ putbe16(&q, h->rects[region_id].w); /* region width */
+ putbe16(&q, h->rects[region_id].h); /* region height */
+ *q++ = ((1 + bpp_index) << 5) | ((1 + bpp_index) << 2) | 0x03;
+ *q++ = region_id; /* clut_id == region_id */
+ *q++ = 0; /* 8 bit fill colors */
+ *q++ = 0x03; /* 4 bit and 2 bit fill colors */
+
+ if (!s->hide_state) {
+ putbe16(&q, region_id); /* object_id == region_id */
+ *q++ = (0 << 6) | (0 << 4);
+ *q++ = 0;
+ *q++ = 0xf0;
+ *q++ = 0;
+ }
+
+ putbe16(&pseg_len, q - pseg_len - 2);
+ }
+
+ if (!s->hide_state) {
+
+ for (object_id = 0; object_id < h->num_rects; object_id++) {
+ /* Object Data segment */
+
+ if (h->rects[object_id].nb_colors <= 4) {
+ /* 2 bpp, some decoders do not support it correctly */
+ bpp_index = 0;
+ } else if (h->rects[object_id].nb_colors <= 16) {
+ /* 4 bpp, standard encoding */
+ bpp_index = 1;
+ } else {
+ return -1;
+ }
+
+ *q++ = 0x0f; /* sync byte */
+ *q++ = 0x13;
+ putbe16(&q, page_id);
+ pseg_len = q;
+ q += 2; /* segment length */
+
+ putbe16(&q, object_id);
+ *q++ = (s->object_version << 4) | (0 << 2) | (0 << 1) | 1; /* version = 0,
+ onject_coding_method,
+ non_modifying_color_flag */
+ {
+ uint8_t *ptop_field_len, *pbottom_field_len, *top_ptr, *bottom_ptr;
+ void (*dvb_encode_rle)(uint8_t **pq,
+ const uint8_t *bitmap, int linesize,
+ int w, int h);
+ ptop_field_len = q;
+ q += 2;
+ pbottom_field_len = q;
+ q += 2;
+
+ if (bpp_index == 0)
+ dvb_encode_rle = dvb_encode_rle2;
+ else
+ dvb_encode_rle = dvb_encode_rle4;
+
+ top_ptr = q;
+ dvb_encode_rle(&q, h->rects[object_id].bitmap, h->rects[object_id].w * 2,
+ h->rects[object_id].w, h->rects[object_id].h >> 1);
+ bottom_ptr = q;
+ dvb_encode_rle(&q, h->rects[object_id].bitmap + h->rects[object_id].w,
+ h->rects[object_id].w * 2, h->rects[object_id].w,
+ h->rects[object_id].h >> 1);
+
+ putbe16(&ptop_field_len, bottom_ptr - top_ptr);
+ putbe16(&pbottom_field_len, q - bottom_ptr);
+ }
+
+ putbe16(&pseg_len, q - pseg_len - 2);
+ }
+ }
+
+ /* end of display set segment */
+
+ *q++ = 0x0f; /* sync_byte */
+ *q++ = 0x80; /* segment_type */
+ putbe16(&q, page_id);
+ pseg_len = q;
+ q += 2; /* segment length */
+
+ putbe16(&pseg_len, q - pseg_len - 2);
+
+ *q++ = 0xff; /* end of PES data */
+
+ s->object_version = (s->object_version + 1) & 0xf;
+ s->hide_state = !s->hide_state;
+ return q - outbuf;
+}
+
+static int dvbsub_init_decoder(AVCodecContext *avctx)
+{
+ return 0;
+}
+
+static int dvbsub_close_decoder(AVCodecContext *avctx)
+{
+ return 0;
+}
+
+static int dvbsub_encode(AVCodecContext *avctx,
+ unsigned char *buf, int buf_size, void *data)
+{
+ DVBSubtitleContext *s = avctx->priv_data;
+ AVSubtitle *sub = data;
+ int ret;
+
+ ret = encode_dvb_subtitles(s, buf, sub);
+ return ret;
+}
+
+AVCodec dvbsub_encoder = {
+ "dvbsub",
+ CODEC_TYPE_SUBTITLE,
+ CODEC_ID_DVB_SUBTITLE,
+ sizeof(DVBSubtitleContext),
+ dvbsub_init_decoder,
+ dvbsub_encode,
+ dvbsub_close_decoder,
+};
diff --git a/contrib/ffmpeg/libavcodec/dvbsubdec.c b/contrib/ffmpeg/libavcodec/dvbsubdec.c
new file mode 100644
index 000000000..ff43ca6b9
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/dvbsubdec.c
@@ -0,0 +1,1633 @@
+/*
+ * DVB subtitle decoding for ffmpeg
+ * Copyright (c) 2005 Ian Caulfield.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avcodec.h"
+#include "dsputil.h"
+#include "bitstream.h"
+
+//#define DEBUG
+//#define DEBUG_PACKET_CONTENTS
+//#define DEBUG_SAVE_IMAGES
+
+#define DVBSUB_PAGE_SEGMENT 0x10
+#define DVBSUB_REGION_SEGMENT 0x11
+#define DVBSUB_CLUT_SEGMENT 0x12
+#define DVBSUB_OBJECT_SEGMENT 0x13
+#define DVBSUB_DISPLAY_SEGMENT 0x80
+
+static unsigned char *cm;
+
+#ifdef DEBUG_SAVE_IMAGES
+#undef fprintf
+#if 0
+static void png_save(const char *filename, uint8_t *bitmap, int w, int h,
+ uint32_t *rgba_palette)
+{
+ int x, y, v;
+ FILE *f;
+ char fname[40], fname2[40];
+ char command[1024];
+
+ snprintf(fname, 40, "%s.ppm", filename);
+
+ f = fopen(fname, "w");
+ if (!f) {
+ perror(fname);
+ exit(1);
+ }
+ fprintf(f, "P6\n"
+ "%d %d\n"
+ "%d\n",
+ w, h, 255);
+ for(y = 0; y < h; y++) {
+ for(x = 0; x < w; x++) {
+ v = rgba_palette[bitmap[y * w + x]];
+ putc((v >> 16) & 0xff, f);
+ putc((v >> 8) & 0xff, f);
+ putc((v >> 0) & 0xff, f);
+ }
+ }
+ fclose(f);
+
+
+ snprintf(fname2, 40, "%s-a.pgm", filename);
+
+ f = fopen(fname2, "w");
+ if (!f) {
+ perror(fname2);
+ exit(1);
+ }
+ fprintf(f, "P5\n"
+ "%d %d\n"
+ "%d\n",
+ w, h, 255);
+ for(y = 0; y < h; y++) {
+ for(x = 0; x < w; x++) {
+ v = rgba_palette[bitmap[y * w + x]];
+ putc((v >> 24) & 0xff, f);
+ }
+ }
+ fclose(f);
+
+ snprintf(command, 1024, "pnmtopng -alpha %s %s > %s.png 2> /dev/null", fname2, fname, filename);
+ system(command);
+
+ snprintf(command, 1024, "rm %s %s", fname, fname2);
+ system(command);
+}
+#endif
+
+static void png_save2(const char *filename, uint32_t *bitmap, int w, int h)
+{
+ int x, y, v;
+ FILE *f;
+ char fname[40], fname2[40];
+ char command[1024];
+
+ snprintf(fname, 40, "%s.ppm", filename);
+
+ f = fopen(fname, "w");
+ if (!f) {
+ perror(fname);
+ exit(1);
+ }
+ fprintf(f, "P6\n"
+ "%d %d\n"
+ "%d\n",
+ w, h, 255);
+ for(y = 0; y < h; y++) {
+ for(x = 0; x < w; x++) {
+ v = bitmap[y * w + x];
+ putc((v >> 16) & 0xff, f);
+ putc((v >> 8) & 0xff, f);
+ putc((v >> 0) & 0xff, f);
+ }
+ }
+ fclose(f);
+
+
+ snprintf(fname2, 40, "%s-a.pgm", filename);
+
+ f = fopen(fname2, "w");
+ if (!f) {
+ perror(fname2);
+ exit(1);
+ }
+ fprintf(f, "P5\n"
+ "%d %d\n"
+ "%d\n",
+ w, h, 255);
+ for(y = 0; y < h; y++) {
+ for(x = 0; x < w; x++) {
+ v = bitmap[y * w + x];
+ putc((v >> 24) & 0xff, f);
+ }
+ }
+ fclose(f);
+
+ snprintf(command, 1024, "pnmtopng -alpha %s %s > %s.png 2> /dev/null", fname2, fname, filename);
+ system(command);
+
+ snprintf(command, 1024, "rm %s %s", fname, fname2);
+ system(command);
+}
+#endif
+
+#define RGBA(r,g,b,a) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b))
+
+typedef struct DVBSubCLUT {
+ int id;
+
+ uint32_t clut4[4];
+ uint32_t clut16[16];
+ uint32_t clut256[256];
+
+ struct DVBSubCLUT *next;
+} DVBSubCLUT;
+
+static DVBSubCLUT default_clut;
+
+typedef struct DVBSubObjectDisplay {
+ int object_id;
+ int region_id;
+
+ int x_pos;
+ int y_pos;
+
+ int fgcolour;
+ int bgcolour;
+
+ struct DVBSubObjectDisplay *region_list_next;
+ struct DVBSubObjectDisplay *object_list_next;
+} DVBSubObjectDisplay;
+
+typedef struct DVBSubObject {
+ int id;
+
+ int type;
+
+ DVBSubObjectDisplay *display_list;
+
+ struct DVBSubObject *next;
+} DVBSubObject;
+
+typedef struct DVBSubRegionDisplay {
+ int region_id;
+
+ int x_pos;
+ int y_pos;
+
+ struct DVBSubRegionDisplay *next;
+} DVBSubRegionDisplay;
+
+typedef struct DVBSubRegion {
+ int id;
+
+ int width;
+ int height;
+ int depth;
+
+ int clut;
+ int bgcolour;
+
+ uint8_t *pbuf;
+ int buf_size;
+
+ DVBSubObjectDisplay *display_list;
+
+ struct DVBSubRegion *next;
+} DVBSubRegion;
+
+typedef struct DVBSubContext {
+ int composition_id;
+ int ancillary_id;
+
+ int time_out;
+ DVBSubRegion *region_list;
+ DVBSubCLUT *clut_list;
+ DVBSubObject *object_list;
+
+ int display_list_size;
+ DVBSubRegionDisplay *display_list;
+} DVBSubContext;
+
+
+static DVBSubObject* get_object(DVBSubContext *ctx, int object_id)
+{
+ DVBSubObject *ptr = ctx->object_list;
+
+ while (ptr != NULL && ptr->id != object_id) {
+ ptr = ptr->next;
+ }
+
+ return ptr;
+}
+
+static DVBSubCLUT* get_clut(DVBSubContext *ctx, int clut_id)
+{
+ DVBSubCLUT *ptr = ctx->clut_list;
+
+ while (ptr != NULL && ptr->id != clut_id) {
+ ptr = ptr->next;
+ }
+
+ return ptr;
+}
+
+static DVBSubRegion* get_region(DVBSubContext *ctx, int region_id)
+{
+ DVBSubRegion *ptr = ctx->region_list;
+
+ while (ptr != NULL && ptr->id != region_id) {
+ ptr = ptr->next;
+ }
+
+ return ptr;
+}
+
+static void delete_region_display_list(DVBSubContext *ctx, DVBSubRegion *region)
+{
+ DVBSubObject *object, *obj2, **obj2_ptr;
+ DVBSubObjectDisplay *display, *obj_disp, **obj_disp_ptr;
+
+ while (region->display_list != NULL) {
+ display = region->display_list;
+
+ object = get_object(ctx, display->object_id);
+
+ if (object != NULL) {
+ obj_disp = object->display_list;
+ obj_disp_ptr = &object->display_list;
+
+ while (obj_disp != NULL && obj_disp != display) {
+ obj_disp_ptr = &obj_disp->object_list_next;
+ obj_disp = obj_disp->object_list_next;
+ }
+
+ if (obj_disp) {
+ *obj_disp_ptr = obj_disp->object_list_next;
+
+ if (object->display_list == NULL) {
+ obj2 = ctx->object_list;
+ obj2_ptr = &ctx->object_list;
+
+ while (obj2 != NULL && obj2 != object) {
+ obj2_ptr = &obj2->next;
+ obj2 = obj2->next;
+ }
+
+ *obj2_ptr = obj2->next;
+
+ av_free(obj2);
+ }
+ }
+ }
+
+ region->display_list = display->region_list_next;
+
+ av_free(display);
+ }
+
+}
+
+static void delete_state(DVBSubContext *ctx)
+{
+ DVBSubRegion *region;
+ DVBSubCLUT *clut;
+
+ while (ctx->region_list != NULL)
+ {
+ region = ctx->region_list;
+
+ ctx->region_list = region->next;
+
+ delete_region_display_list(ctx, region);
+ if (region->pbuf != NULL)
+ av_free(region->pbuf);
+
+ av_free(region);
+ }
+
+ while (ctx->clut_list != NULL)
+ {
+ clut = ctx->clut_list;
+
+ ctx->clut_list = clut->next;
+
+ av_free(clut);
+ }
+
+ /* Should already be null */
+ if (ctx->object_list != NULL)
+ av_log(0, AV_LOG_ERROR, "Memory deallocation error!\n");
+}
+
+static int dvbsub_init_decoder(AVCodecContext *avctx)
+{
+ int i, r, g, b, a = 0;
+ DVBSubContext *ctx = (DVBSubContext*) avctx->priv_data;
+
+ cm = ff_cropTbl + MAX_NEG_CROP;
+
+ memset(avctx->priv_data, 0, sizeof(DVBSubContext));
+
+ ctx->composition_id = avctx->sub_id & 0xffff;
+ ctx->ancillary_id = avctx->sub_id >> 16;
+
+ default_clut.id = -1;
+ default_clut.next = NULL;
+
+ default_clut.clut4[0] = RGBA( 0, 0, 0, 0);
+ default_clut.clut4[1] = RGBA(255, 255, 255, 255);
+ default_clut.clut4[2] = RGBA( 0, 0, 0, 255);
+ default_clut.clut4[3] = RGBA(127, 127, 127, 255);
+
+ default_clut.clut16[0] = RGBA( 0, 0, 0, 0);
+ for (i = 1; i < 16; i++) {
+ if (i < 8) {
+ r = (i & 1) ? 255 : 0;
+ g = (i & 2) ? 255 : 0;
+ b = (i & 4) ? 255 : 0;
+ } else {
+ r = (i & 1) ? 127 : 0;
+ g = (i & 2) ? 127 : 0;
+ b = (i & 4) ? 127 : 0;
+ }
+ default_clut.clut16[i] = RGBA(r, g, b, 255);
+ }
+
+ default_clut.clut256[0] = RGBA( 0, 0, 0, 0);
+ for (i = 1; i < 256; i++) {
+ if (i < 8) {
+ r = (i & 1) ? 255 : 0;
+ g = (i & 2) ? 255 : 0;
+ b = (i & 4) ? 255 : 0;
+ a = 63;
+ } else {
+ switch (i & 0x88) {
+ case 0x00:
+ r = ((i & 1) ? 85 : 0) + ((i & 0x10) ? 170 : 0);
+ g = ((i & 2) ? 85 : 0) + ((i & 0x20) ? 170 : 0);
+ b = ((i & 4) ? 85 : 0) + ((i & 0x40) ? 170 : 0);
+ a = 255;
+ break;
+ case 0x08:
+ r = ((i & 1) ? 85 : 0) + ((i & 0x10) ? 170 : 0);
+ g = ((i & 2) ? 85 : 0) + ((i & 0x20) ? 170 : 0);
+ b = ((i & 4) ? 85 : 0) + ((i & 0x40) ? 170 : 0);
+ a = 127;
+ break;
+ case 0x80:
+ r = 127 + ((i & 1) ? 43 : 0) + ((i & 0x10) ? 85 : 0);
+ g = 127 + ((i & 2) ? 43 : 0) + ((i & 0x20) ? 85 : 0);
+ b = 127 + ((i & 4) ? 43 : 0) + ((i & 0x40) ? 85 : 0);
+ a = 255;
+ break;
+ case 0x88:
+ r = ((i & 1) ? 43 : 0) + ((i & 0x10) ? 85 : 0);
+ g = ((i & 2) ? 43 : 0) + ((i & 0x20) ? 85 : 0);
+ b = ((i & 4) ? 43 : 0) + ((i & 0x40) ? 85 : 0);
+ a = 255;
+ break;
+ }
+ }
+ default_clut.clut256[i] = RGBA(r, g, b, a);
+ }
+
+ return 0;
+}
+
+static int dvbsub_close_decoder(AVCodecContext *avctx)
+{
+ DVBSubContext *ctx = (DVBSubContext*) avctx->priv_data;
+ DVBSubRegionDisplay *display;
+
+ delete_state(ctx);
+
+ while (ctx->display_list != NULL)
+ {
+ display = ctx->display_list;
+ ctx->display_list = display->next;
+
+ av_free(display);
+ }
+
+ return 0;
+}
+
+static int dvbsub_read_2bit_string(uint8_t *destbuf, int dbuf_len,
+ uint8_t **srcbuf, int buf_size,
+ int non_mod, uint8_t *map_table)
+{
+ GetBitContext gb;
+
+ int bits;
+ int run_length;
+ int pixels_read = 0;
+
+ init_get_bits(&gb, *srcbuf, buf_size << 8);
+
+ while (get_bits_count(&gb) < (buf_size << 8) && pixels_read < dbuf_len) {
+ bits = get_bits(&gb, 2);
+
+ if (bits != 0) {
+ if (non_mod != 1 || bits != 1) {
+ if (map_table != NULL)
+ *destbuf++ = map_table[bits];
+ else
+ *destbuf++ = bits;
+ }
+ pixels_read++;
+ } else {
+ bits = get_bits(&gb, 1);
+ if (bits == 1) {
+ run_length = get_bits(&gb, 3) + 3;
+ bits = get_bits(&gb, 2);
+
+ if (non_mod == 1 && bits == 1)
+ pixels_read += run_length;
+ else {
+ if (map_table != NULL)
+ bits = map_table[bits];
+ while (run_length-- > 0 && pixels_read < dbuf_len) {
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ }
+ } else {
+ bits = get_bits(&gb, 1);
+ if (bits == 0) {
+ bits = get_bits(&gb, 2);
+ if (bits == 2) {
+ run_length = get_bits(&gb, 4) + 12;
+ bits = get_bits(&gb, 2);
+
+ if (non_mod == 1 && bits == 1)
+ pixels_read += run_length;
+ else {
+ if (map_table != NULL)
+ bits = map_table[bits];
+ while (run_length-- > 0 && pixels_read < dbuf_len) {
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ }
+ } else if (bits == 3) {
+ run_length = get_bits(&gb, 8) + 29;
+ bits = get_bits(&gb, 2);
+
+ if (non_mod == 1 && bits == 1)
+ pixels_read += run_length;
+ else {
+ if (map_table != NULL)
+ bits = map_table[bits];
+ while (run_length-- > 0 && pixels_read < dbuf_len) {
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ }
+ } else if (bits == 1) {
+ pixels_read += 2;
+ if (map_table != NULL)
+ bits = map_table[0];
+ else
+ bits = 0;
+ if (pixels_read <= dbuf_len) {
+ *destbuf++ = bits;
+ *destbuf++ = bits;
+ }
+ } else {
+ (*srcbuf) += (get_bits_count(&gb) + 7) >> 3;
+ return pixels_read;
+ }
+ } else {
+ if (map_table != NULL)
+ bits = map_table[0];
+ else
+ bits = 0;
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ }
+ }
+ }
+
+ if (get_bits(&gb, 6) != 0)
+ av_log(0, AV_LOG_ERROR, "DVBSub error: line overflow\n");
+
+ (*srcbuf) += (get_bits_count(&gb) + 7) >> 3;
+
+ return pixels_read;
+}
+
+static int dvbsub_read_4bit_string(uint8_t *destbuf, int dbuf_len,
+ uint8_t **srcbuf, int buf_size,
+ int non_mod, uint8_t *map_table)
+{
+ GetBitContext gb;
+
+ int bits;
+ int run_length;
+ int pixels_read = 0;
+
+ init_get_bits(&gb, *srcbuf, buf_size << 8);
+
+ while (get_bits_count(&gb) < (buf_size << 8) && pixels_read < dbuf_len) {
+ bits = get_bits(&gb, 4);
+
+ if (bits != 0) {
+ if (non_mod != 1 || bits != 1) {
+ if (map_table != NULL)
+ *destbuf++ = map_table[bits];
+ else
+ *destbuf++ = bits;
+ }
+ pixels_read++;
+ } else {
+ bits = get_bits(&gb, 1);
+ if (bits == 0) {
+ run_length = get_bits(&gb, 3);
+
+ if (run_length == 0) {
+ (*srcbuf) += (get_bits_count(&gb) + 7) >> 3;
+ return pixels_read;
+ }
+
+ run_length += 2;
+
+ if (map_table != NULL)
+ bits = map_table[0];
+ else
+ bits = 0;
+
+ while (run_length-- > 0 && pixels_read < dbuf_len) {
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ } else {
+ bits = get_bits(&gb, 1);
+ if (bits == 0) {
+ run_length = get_bits(&gb, 2) + 4;
+ bits = get_bits(&gb, 4);
+
+ if (non_mod == 1 && bits == 1)
+ pixels_read += run_length;
+ else {
+ if (map_table != NULL)
+ bits = map_table[bits];
+ while (run_length-- > 0 && pixels_read < dbuf_len) {
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ }
+ } else {
+ bits = get_bits(&gb, 2);
+ if (bits == 2) {
+ run_length = get_bits(&gb, 4) + 9;
+ bits = get_bits(&gb, 4);
+
+ if (non_mod == 1 && bits == 1)
+ pixels_read += run_length;
+ else {
+ if (map_table != NULL)
+ bits = map_table[bits];
+ while (run_length-- > 0 && pixels_read < dbuf_len) {
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ }
+ } else if (bits == 3) {
+ run_length = get_bits(&gb, 8) + 25;
+ bits = get_bits(&gb, 4);
+
+ if (non_mod == 1 && bits == 1)
+ pixels_read += run_length;
+ else {
+ if (map_table != NULL)
+ bits = map_table[bits];
+ while (run_length-- > 0 && pixels_read < dbuf_len) {
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ }
+ } else if (bits == 1) {
+ pixels_read += 2;
+ if (map_table != NULL)
+ bits = map_table[0];
+ else
+ bits = 0;
+ if (pixels_read <= dbuf_len) {
+ *destbuf++ = bits;
+ *destbuf++ = bits;
+ }
+ } else {
+ if (map_table != NULL)
+ bits = map_table[0];
+ else
+ bits = 0;
+ *destbuf++ = bits;
+ pixels_read ++;
+ }
+ }
+ }
+ }
+ }
+
+ if (get_bits(&gb, 8) != 0)
+ av_log(0, AV_LOG_ERROR, "DVBSub error: line overflow\n");
+
+ (*srcbuf) += (get_bits_count(&gb) + 7) >> 3;
+
+ return pixels_read;
+}
+
+static int dvbsub_read_8bit_string(uint8_t *destbuf, int dbuf_len,
+ uint8_t **srcbuf, int buf_size,
+ int non_mod, uint8_t *map_table)
+{
+ uint8_t *sbuf_end = (*srcbuf) + buf_size;
+ int bits;
+ int run_length;
+ int pixels_read = 0;
+
+ while (*srcbuf < sbuf_end && pixels_read < dbuf_len) {
+ bits = *(*srcbuf)++;
+
+ if (bits != 0) {
+ if (non_mod != 1 || bits != 1) {
+ if (map_table != NULL)
+ *destbuf++ = map_table[bits];
+ else
+ *destbuf++ = bits;
+ }
+ pixels_read++;
+ } else {
+ bits = *(*srcbuf)++;
+ run_length = bits & 0x7f;
+ if ((bits & 0x80) == 0) {
+ if (run_length == 0) {
+ return pixels_read;
+ }
+
+ if (map_table != NULL)
+ bits = map_table[0];
+ else
+ bits = 0;
+ while (run_length-- > 0 && pixels_read < dbuf_len) {
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ } else {
+ bits = *(*srcbuf)++;
+
+ if (non_mod == 1 && bits == 1)
+ pixels_read += run_length;
+ if (map_table != NULL)
+ bits = map_table[bits];
+ else while (run_length-- > 0 && pixels_read < dbuf_len) {
+ *destbuf++ = bits;
+ pixels_read++;
+ }
+ }
+ }
+ }
+
+ if (*(*srcbuf)++ != 0)
+ av_log(0, AV_LOG_ERROR, "DVBSub error: line overflow\n");
+
+ return pixels_read;
+}
+
+
+
+static void dvbsub_parse_pixel_data_block(AVCodecContext *avctx, DVBSubObjectDisplay *display,
+ uint8_t *buf, int buf_size, int top_bottom, int non_mod)
+{
+ DVBSubContext *ctx = (DVBSubContext*) avctx->priv_data;
+
+ DVBSubRegion *region = get_region(ctx, display->region_id);
+ uint8_t *buf_end = buf + buf_size;
+ uint8_t *pbuf;
+ int x_pos, y_pos;
+ int i;
+
+ uint8_t map2to4[] = { 0x0, 0x7, 0x8, 0xf};
+ uint8_t map2to8[] = {0x00, 0x77, 0x88, 0xff};
+ uint8_t map4to8[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff};
+ uint8_t *map_table;
+
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "DVB pixel block size %d, %s field:\n", buf_size,
+ top_bottom ? "bottom" : "top");
+#endif
+
+#ifdef DEBUG_PACKET_CONTENTS
+ for (i = 0; i < buf_size; i++)
+ {
+ if (i % 16 == 0)
+ av_log(avctx, AV_LOG_INFO, "0x%08p: ", buf+i);
+
+ av_log(avctx, AV_LOG_INFO, "%02x ", buf[i]);
+ if (i % 16 == 15)
+ av_log(avctx, AV_LOG_INFO, "\n");
+ }
+
+ if (i % 16 != 0)
+ av_log(avctx, AV_LOG_INFO, "\n");
+
+#endif
+
+ if (region == 0)
+ return;
+
+ pbuf = region->pbuf;
+
+ x_pos = display->x_pos;
+ y_pos = display->y_pos;
+
+ if ((y_pos & 1) != top_bottom)
+ y_pos++;
+
+ while (buf < buf_end) {
+ if (x_pos > region->width || y_pos > region->height) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid object location!\n");
+ return;
+ }
+
+ switch (*buf++) {
+ case 0x10:
+ if (region->depth == 8)
+ map_table = map2to8;
+ else if (region->depth == 4)
+ map_table = map2to4;
+ else
+ map_table = NULL;
+
+ x_pos += dvbsub_read_2bit_string(pbuf + (y_pos * region->width) + x_pos,
+ region->width - x_pos, &buf, buf_size,
+ non_mod, map_table);
+ break;
+ case 0x11:
+ if (region->depth < 4) {
+ av_log(avctx, AV_LOG_ERROR, "4-bit pixel string in %d-bit region!\n", region->depth);
+ return;
+ }
+
+ if (region->depth == 8)
+ map_table = map4to8;
+ else
+ map_table = NULL;
+
+ x_pos += dvbsub_read_4bit_string(pbuf + (y_pos * region->width) + x_pos,
+ region->width - x_pos, &buf, buf_size,
+ non_mod, map_table);
+ break;
+ case 0x12:
+ if (region->depth < 8) {
+ av_log(avctx, AV_LOG_ERROR, "8-bit pixel string in %d-bit region!\n", region->depth);
+ return;
+ }
+
+ x_pos += dvbsub_read_8bit_string(pbuf + (y_pos * region->width) + x_pos,
+ region->width - x_pos, &buf, buf_size,
+ non_mod, NULL);
+ break;
+
+ case 0x20:
+ map2to4[0] = (*buf) >> 4;
+ map2to4[1] = (*buf++) & 0xf;
+ map2to4[2] = (*buf) >> 4;
+ map2to4[3] = (*buf++) & 0xf;
+ break;
+ case 0x21:
+ for (i = 0; i < 4; i++)
+ map2to8[i] = *buf++;
+ break;
+ case 0x22:
+ for (i = 0; i < 16; i++)
+ map4to8[i] = *buf++;
+ break;
+
+ case 0xf0:
+ x_pos = display->x_pos;
+ y_pos += 2;
+ break;
+ default:
+ av_log(avctx, AV_LOG_INFO, "Unknown/unsupported pixel block 0x%x\n", *(buf-1));
+ }
+ }
+
+}
+
+static void dvbsub_parse_object_segment(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size)
+{
+ DVBSubContext *ctx = (DVBSubContext*) avctx->priv_data;
+
+ uint8_t *buf_end = buf + buf_size;
+ uint8_t *block;
+ int object_id;
+ DVBSubObject *object;
+ DVBSubObjectDisplay *display;
+ int top_field_len, bottom_field_len;
+
+ int coding_method, non_modifying_colour;
+
+ object_id = BE_16(buf);
+ buf += 2;
+
+ object = get_object(ctx, object_id);
+
+ if (!object)
+ return;
+
+ coding_method = ((*buf) >> 2) & 3;
+ non_modifying_colour = ((*buf++) >> 1) & 1;
+
+ if (coding_method == 0) {
+ top_field_len = BE_16(buf);
+ buf += 2;
+ bottom_field_len = BE_16(buf);
+ buf += 2;
+
+ if (buf + top_field_len + bottom_field_len > buf_end) {
+ av_log(avctx, AV_LOG_ERROR, "Field data size too large\n");
+ return;
+ }
+
+ for (display = object->display_list; display != 0; display = display->object_list_next) {
+ block = buf;
+
+ dvbsub_parse_pixel_data_block(avctx, display, block, top_field_len, 0,
+ non_modifying_colour);
+
+ if (bottom_field_len > 0)
+ block = buf + top_field_len;
+ else
+ bottom_field_len = top_field_len;
+
+ dvbsub_parse_pixel_data_block(avctx, display, block, bottom_field_len, 1,
+ non_modifying_colour);
+ }
+
+/* } else if (coding_method == 1) {*/
+
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Unknown object coding %d\n", coding_method);
+ }
+
+}
+
+#define SCALEBITS 10
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define YUV_TO_RGB1_CCIR(cb1, cr1)\
+{\
+ cb = (cb1) - 128;\
+ cr = (cr1) - 128;\
+ r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
+ g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
+ ONE_HALF;\
+ b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
+}
+
+#define YUV_TO_RGB2_CCIR(r, g, b, y1)\
+{\
+ y = ((y1) - 16) * FIX(255.0/219.0);\
+ r = cm[(y + r_add) >> SCALEBITS];\
+ g = cm[(y + g_add) >> SCALEBITS];\
+ b = cm[(y + b_add) >> SCALEBITS];\
+}
+
+
+static void dvbsub_parse_clut_segment(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size)
+{
+ DVBSubContext *ctx = (DVBSubContext*) avctx->priv_data;
+
+ uint8_t *buf_end = buf + buf_size;
+ int clut_id;
+ DVBSubCLUT *clut;
+ int entry_id, depth , full_range;
+ int y, cr, cb, alpha;
+ int r, g, b, r_add, g_add, b_add;
+
+#ifdef DEBUG_PACKET_CONTENTS
+ int i;
+
+ av_log(avctx, AV_LOG_INFO, "DVB clut packet:\n");
+
+ for (i=0; i < buf_size; i++)
+ {
+ av_log(avctx, AV_LOG_INFO, "%02x ", buf[i]);
+ if (i % 16 == 15)
+ av_log(avctx, AV_LOG_INFO, "\n");
+ }
+
+ if (i % 16 != 0)
+ av_log(avctx, AV_LOG_INFO, "\n");
+
+#endif
+
+ clut_id = *buf++;
+ buf += 1;
+
+ clut = get_clut(ctx, clut_id);
+
+ if (clut == NULL) {
+ clut = av_malloc(sizeof(DVBSubCLUT));
+
+ memcpy(clut, &default_clut, sizeof(DVBSubCLUT));
+
+ clut->id = clut_id;
+
+ clut->next = ctx->clut_list;
+ ctx->clut_list = clut;
+ }
+
+ while (buf + 4 < buf_end)
+ {
+ entry_id = *buf++;
+
+ depth = (*buf) & 0xe0;
+
+ if (depth == 0) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid clut depth 0x%x!\n", *buf);
+ return;
+ }
+
+ full_range = (*buf++) & 1;
+
+ if (full_range) {
+ y = *buf++;
+ cr = *buf++;
+ cb = *buf++;
+ alpha = *buf++;
+ } else {
+ y = buf[0] & 0xfc;
+ cr = (((buf[0] & 3) << 2) | ((buf[1] >> 6) & 3)) << 4;
+ cb = (buf[1] << 2) & 0xf0;
+ alpha = (buf[1] << 6) & 0xc0;
+
+ buf += 2;
+ }
+
+ if (y == 0)
+ alpha = 0xff;
+
+ YUV_TO_RGB1_CCIR(cb, cr);
+ YUV_TO_RGB2_CCIR(r, g, b, y);
+
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "clut %d := (%d,%d,%d,%d)\n", entry_id, r, g, b, alpha);
+#endif
+
+ if (depth & 0x80)
+ clut->clut4[entry_id] = RGBA(r,g,b,255 - alpha);
+ if (depth & 0x40)
+ clut->clut16[entry_id] = RGBA(r,g,b,255 - alpha);
+ if (depth & 0x20)
+ clut->clut256[entry_id] = RGBA(r,g,b,255 - alpha);
+ }
+}
+
+
+static void dvbsub_parse_region_segment(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size)
+{
+ DVBSubContext *ctx = (DVBSubContext*) avctx->priv_data;
+
+ uint8_t *buf_end = buf + buf_size;
+ int region_id, object_id;
+ DVBSubRegion *region;
+ DVBSubObject *object;
+ DVBSubObjectDisplay *display;
+ int fill;
+
+ if (buf_size < 10)
+ return;
+
+ region_id = *buf++;
+
+ region = get_region(ctx, region_id);
+
+ if (region == NULL)
+ {
+ region = av_mallocz(sizeof(DVBSubRegion));
+
+ region->id = region_id;
+
+ region->next = ctx->region_list;
+ ctx->region_list = region;
+ }
+
+ fill = ((*buf++) >> 3) & 1;
+
+ region->width = BE_16(buf);
+ buf += 2;
+ region->height = BE_16(buf);
+ buf += 2;
+
+ if (region->width * region->height != region->buf_size) {
+ if (region->pbuf != 0)
+ av_free(region->pbuf);
+
+ region->buf_size = region->width * region->height;
+
+ region->pbuf = av_malloc(region->buf_size);
+
+ fill = 1;
+ }
+
+ region->depth = 1 << (((*buf++) >> 2) & 7);
+ region->clut = *buf++;
+
+ if (region->depth == 8)
+ region->bgcolour = *buf++;
+ else {
+ buf += 1;
+
+ if (region->depth == 4)
+ region->bgcolour = (((*buf++) >> 4) & 15);
+ else
+ region->bgcolour = (((*buf++) >> 2) & 3);
+ }
+
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "Region %d, (%dx%d)\n", region_id, region->width, region->height);
+#endif
+
+ if (fill) {
+ memset(region->pbuf, region->bgcolour, region->buf_size);
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "Fill region (%d)\n", region->bgcolour);
+#endif
+ }
+
+ delete_region_display_list(ctx, region);
+
+ while (buf + 5 < buf_end) {
+ object_id = BE_16(buf);
+ buf += 2;
+
+ object = get_object(ctx, object_id);
+
+ if (object == NULL) {
+ object = av_mallocz(sizeof(DVBSubObject));
+
+ object->id = object_id;
+ object->next = ctx->object_list;
+ ctx->object_list = object;
+ }
+
+ object->type = (*buf) >> 6;
+
+ display = av_mallocz(sizeof(DVBSubObjectDisplay));
+
+ display->object_id = object_id;
+ display->region_id = region_id;
+
+ display->x_pos = BE_16(buf) & 0xfff;
+ buf += 2;
+ display->y_pos = BE_16(buf) & 0xfff;
+ buf += 2;
+
+ if ((object->type == 1 || object->type == 2) && buf+1 < buf_end) {
+ display->fgcolour = *buf++;
+ display->bgcolour = *buf++;
+ }
+
+ display->region_list_next = region->display_list;
+ region->display_list = display;
+
+ display->object_list_next = object->display_list;
+ object->display_list = display;
+ }
+}
+
+static void dvbsub_parse_page_segment(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size)
+{
+ DVBSubContext *ctx = (DVBSubContext*) avctx->priv_data;
+ DVBSubRegionDisplay *display;
+ DVBSubRegionDisplay *tmp_display_list, **tmp_ptr;
+
+ uint8_t *buf_end = buf + buf_size;
+ int region_id;
+ int page_state;
+
+ if (buf_size < 1)
+ return;
+
+ ctx->time_out = *buf++;
+ page_state = ((*buf++) >> 2) & 3;
+
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "Page time out %ds, state %d\n", ctx->time_out, page_state);
+#endif
+
+ if (page_state == 2)
+ {
+ delete_state(ctx);
+ }
+
+ tmp_display_list = ctx->display_list;
+ ctx->display_list = NULL;
+ ctx->display_list_size = 0;
+
+ while (buf + 5 < buf_end) {
+ region_id = *buf++;
+ buf += 1;
+
+ display = tmp_display_list;
+ tmp_ptr = &tmp_display_list;
+
+ while (display != NULL && display->region_id != region_id) {
+ tmp_ptr = &display->next;
+ display = display->next;
+ }
+
+ if (display == NULL)
+ display = av_mallocz(sizeof(DVBSubRegionDisplay));
+
+ display->region_id = region_id;
+
+ display->x_pos = BE_16(buf);
+ buf += 2;
+ display->y_pos = BE_16(buf);
+ buf += 2;
+
+ *tmp_ptr = display->next;
+
+ display->next = ctx->display_list;
+ ctx->display_list = display;
+ ctx->display_list_size++;
+
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "Region %d, (%d,%d)\n", region_id, display->x_pos, display->y_pos);
+#endif
+ }
+
+ while (tmp_display_list != 0) {
+ display = tmp_display_list;
+
+ tmp_display_list = display->next;
+
+ av_free(display);
+ }
+
+}
+
+
+#ifdef DEBUG_SAVE_IMAGES
+static void save_display_set(DVBSubContext *ctx)
+{
+ DVBSubRegion *region;
+ DVBSubRegionDisplay *display;
+ DVBSubCLUT *clut;
+ uint32_t *clut_table;
+ int x_pos, y_pos, width, height;
+ int x, y, y_off, x_off;
+ uint32_t *pbuf;
+ char filename[32];
+ static int fileno_index = 0;
+
+ x_pos = -1;
+ y_pos = -1;
+ width = 0;
+ height = 0;
+
+ for (display = ctx->display_list; display != NULL; display = display->next) {
+ region = get_region(ctx, display->region_id);
+
+ if (x_pos == -1) {
+ x_pos = display->x_pos;
+ y_pos = display->y_pos;
+ width = region->width;
+ height = region->height;
+ } else {
+ if (display->x_pos < x_pos) {
+ width += (x_pos - display->x_pos);
+ x_pos = display->x_pos;
+ }
+
+ if (display->y_pos < y_pos) {
+ height += (y_pos - display->y_pos);
+ y_pos = display->y_pos;
+ }
+
+ if (display->x_pos + region->width > x_pos + width) {
+ width = display->x_pos + region->width - x_pos;
+ }
+
+ if (display->y_pos + region->height > y_pos + height) {
+ height = display->y_pos + region->height - y_pos;
+ }
+ }
+ }
+
+ if (x_pos >= 0) {
+
+ pbuf = av_malloc(width * height * 4);
+
+ for (display = ctx->display_list; display != NULL; display = display->next) {
+ region = get_region(ctx, display->region_id);
+
+ x_off = display->x_pos - x_pos;
+ y_off = display->y_pos - y_pos;
+
+ clut = get_clut(ctx, region->clut);
+
+ if (clut == 0)
+ clut = &default_clut;
+
+ switch (region->depth) {
+ case 2:
+ clut_table = clut->clut4;
+ break;
+ case 8:
+ clut_table = clut->clut256;
+ break;
+ case 4:
+ default:
+ clut_table = clut->clut16;
+ break;
+ }
+
+ for (y = 0; y < region->height; y++) {
+ for (x = 0; x < region->width; x++) {
+ pbuf[((y + y_off) * width) + x_off + x] =
+ clut_table[region->pbuf[y * region->width + x]];
+ }
+ }
+
+ }
+
+ snprintf(filename, 32, "dvbs.%d", fileno_index);
+
+ png_save2(filename, pbuf, width, height);
+
+ av_free(pbuf);
+ }
+
+ fileno_index++;
+}
+#endif
+
+static int dvbsub_display_end_segment(AVCodecContext *avctx, uint8_t *buf,
+ int buf_size, AVSubtitle *sub)
+{
+ DVBSubContext *ctx = (DVBSubContext*) avctx->priv_data;
+
+ DVBSubRegion *region;
+ DVBSubRegionDisplay *display;
+ AVSubtitleRect *rect;
+ DVBSubCLUT *clut;
+ uint32_t *clut_table;
+ int i;
+
+ sub->rects = NULL;
+ sub->start_display_time = 0;
+ sub->end_display_time = ctx->time_out * 1000;
+ sub->format = 0;
+
+ sub->num_rects = ctx->display_list_size;
+
+ if (sub->num_rects > 0)
+ sub->rects = av_mallocz(sizeof(AVSubtitleRect) * sub->num_rects);
+
+ i = 0;
+
+ for (display = ctx->display_list; display != NULL; display = display->next) {
+ region = get_region(ctx, display->region_id);
+ rect = &sub->rects[i];
+
+ if (region == NULL)
+ continue;
+
+ rect->x = display->x_pos;
+ rect->y = display->y_pos;
+ rect->w = region->width;
+ rect->h = region->height;
+ rect->nb_colors = 16;
+ rect->linesize = region->width;
+
+ clut = get_clut(ctx, region->clut);
+
+ if (clut == NULL)
+ clut = &default_clut;
+
+ switch (region->depth) {
+ case 2:
+ clut_table = clut->clut4;
+ break;
+ case 8:
+ clut_table = clut->clut256;
+ break;
+ case 4:
+ default:
+ clut_table = clut->clut16;
+ break;
+ }
+
+ rect->rgba_palette = av_malloc((1 << region->depth) * sizeof(uint32_t));
+ memcpy(rect->rgba_palette, clut_table, (1 << region->depth) * sizeof(uint32_t));
+
+ rect->bitmap = av_malloc(region->buf_size);
+ memcpy(rect->bitmap, region->pbuf, region->buf_size);
+
+ i++;
+ }
+
+ sub->num_rects = i;
+
+#ifdef DEBUG_SAVE_IMAGES
+ save_display_set(ctx);
+#endif
+
+ return 1;
+}
+
+static int dvbsub_decode(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ DVBSubContext *ctx = (DVBSubContext*) avctx->priv_data;
+ AVSubtitle *sub = (AVSubtitle*) data;
+ uint8_t *p, *p_end;
+ int segment_type;
+ int page_id;
+ int segment_length;
+
+#ifdef DEBUG_PACKET_CONTENTS
+ int i;
+
+ av_log(avctx, AV_LOG_INFO, "DVB sub packet:\n");
+
+ for (i=0; i < buf_size; i++)
+ {
+ av_log(avctx, AV_LOG_INFO, "%02x ", buf[i]);
+ if (i % 16 == 15)
+ av_log(avctx, AV_LOG_INFO, "\n");
+ }
+
+ if (i % 16 != 0)
+ av_log(avctx, AV_LOG_INFO, "\n");
+
+#endif
+
+ if (buf_size <= 2)
+ return -1;
+
+ p = buf;
+ p_end = buf + buf_size;
+
+ while (p < p_end && *p == 0x0f)
+ {
+ p += 1;
+ segment_type = *p++;
+ page_id = BE_16(p);
+ p += 2;
+ segment_length = BE_16(p);
+ p += 2;
+
+ if (page_id == ctx->composition_id || page_id == ctx->ancillary_id) {
+ switch (segment_type) {
+ case DVBSUB_PAGE_SEGMENT:
+ dvbsub_parse_page_segment(avctx, p, segment_length);
+ break;
+ case DVBSUB_REGION_SEGMENT:
+ dvbsub_parse_region_segment(avctx, p, segment_length);
+ break;
+ case DVBSUB_CLUT_SEGMENT:
+ dvbsub_parse_clut_segment(avctx, p, segment_length);
+ break;
+ case DVBSUB_OBJECT_SEGMENT:
+ dvbsub_parse_object_segment(avctx, p, segment_length);
+ break;
+ case DVBSUB_DISPLAY_SEGMENT:
+ *data_size = dvbsub_display_end_segment(avctx, p, segment_length, sub);
+ break;
+ default:
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "Subtitling segment type 0x%x, page id %d, length %d\n",
+ segment_type, page_id, segment_length);
+#endif
+ break;
+ }
+ }
+
+ p += segment_length;
+ }
+
+ if (p != p_end)
+ {
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "Junk at end of packet\n");
+#endif
+ return -1;
+ }
+
+ return buf_size;
+}
+
+
+AVCodec dvbsub_decoder = {
+ "dvbsub",
+ CODEC_TYPE_SUBTITLE,
+ CODEC_ID_DVB_SUBTITLE,
+ sizeof(DVBSubContext),
+ dvbsub_init_decoder,
+ NULL,
+ dvbsub_close_decoder,
+ dvbsub_decode,
+};
+
+/* Parser (mostly) copied from dvdsub.c */
+
+#define PARSE_BUF_SIZE (65536)
+
+
+/* parser definition */
+typedef struct DVBSubParseContext {
+ uint8_t *packet_buf;
+ int packet_start;
+ int packet_index;
+ int in_packet;
+} DVBSubParseContext;
+
+static int dvbsub_parse_init(AVCodecParserContext *s)
+{
+ DVBSubParseContext *pc = s->priv_data;
+ pc->packet_buf = av_malloc(PARSE_BUF_SIZE);
+
+ return 0;
+}
+
+static int dvbsub_parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ DVBSubParseContext *pc = s->priv_data;
+ uint8_t *p, *p_end;
+ int len, buf_pos = 0;
+
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "DVB parse packet pts=%"PRIx64", lpts=%"PRIx64", cpts=%"PRIx64":\n",
+ s->pts, s->last_pts, s->cur_frame_pts[s->cur_frame_start_index]);
+#endif
+
+#ifdef DEBUG_PACKET_CONTENTS
+ int i;
+
+ for (i=0; i < buf_size; i++)
+ {
+ av_log(avctx, AV_LOG_INFO, "%02x ", buf[i]);
+ if (i % 16 == 15)
+ av_log(avctx, AV_LOG_INFO, "\n");
+ }
+
+ if (i % 16 != 0)
+ av_log(avctx, AV_LOG_INFO, "\n");
+
+#endif
+
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+
+ s->fetch_timestamp = 1;
+
+ if (s->last_pts != s->pts && s->last_pts != AV_NOPTS_VALUE) /* Start of a new packet */
+ {
+ if (pc->packet_index != pc->packet_start)
+ {
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "Discarding %d bytes\n",
+ pc->packet_index - pc->packet_start);
+#endif
+ }
+
+ pc->packet_start = 0;
+ pc->packet_index = 0;
+
+ if (buf_size < 2 || buf[0] != 0x20 || buf[1] != 0x00) {
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "Bad packet header\n");
+#endif
+ return -1;
+ }
+
+ buf_pos = 2;
+
+ pc->in_packet = 1;
+ } else {
+ if (pc->packet_start != 0)
+ {
+ if (pc->packet_index != pc->packet_start)
+ {
+ memmove(pc->packet_buf, pc->packet_buf + pc->packet_start,
+ pc->packet_index - pc->packet_start);
+
+ pc->packet_index -= pc->packet_start;
+ pc->packet_start = 0;
+ } else {
+ pc->packet_start = 0;
+ pc->packet_index = 0;
+ }
+ }
+ }
+
+ if (buf_size - buf_pos + pc->packet_index > PARSE_BUF_SIZE)
+ return -1;
+
+/* if not currently in a packet, discard data */
+ if (pc->in_packet == 0)
+ return buf_size;
+
+ memcpy(pc->packet_buf + pc->packet_index, buf + buf_pos, buf_size - buf_pos);
+ pc->packet_index += buf_size - buf_pos;
+
+ p = pc->packet_buf;
+ p_end = pc->packet_buf + pc->packet_index;
+
+ while (p < p_end)
+ {
+ if (*p == 0x0f)
+ {
+ if (p + 6 <= p_end)
+ {
+ len = BE_16(p + 4);
+
+ if (p + len + 6 <= p_end)
+ {
+ *poutbuf_size += len + 6;
+
+ p += len + 6;
+ } else
+ break;
+ } else
+ break;
+ } else if (*p == 0xff) {
+ if (p + 1 < p_end)
+ {
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_INFO, "Junk at end of packet\n");
+#endif
+ }
+ pc->packet_index = p - pc->packet_buf;
+ pc->in_packet = 0;
+ break;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Junk in packet\n");
+
+ pc->packet_index = p - pc->packet_buf;
+ pc->in_packet = 0;
+ break;
+ }
+ }
+
+ if (*poutbuf_size > 0)
+ {
+ *poutbuf = pc->packet_buf;
+ pc->packet_start = *poutbuf_size;
+ }
+
+ if (s->last_pts == AV_NOPTS_VALUE)
+ s->last_pts = s->pts;
+
+ return buf_size;
+}
+
+static void dvbsub_parse_close(AVCodecParserContext *s)
+{
+ DVBSubParseContext *pc = s->priv_data;
+ av_freep(&pc->packet_buf);
+}
+
+AVCodecParser dvbsub_parser = {
+ { CODEC_ID_DVB_SUBTITLE },
+ sizeof(DVBSubParseContext),
+ dvbsub_parse_init,
+ dvbsub_parse,
+ dvbsub_parse_close,
+};
diff --git a/src/libffmpeg/libavcodec/dvdata.h b/contrib/ffmpeg/libavcodec/dvdata.h
index a3d42d66c..dce4aba98 100644
--- a/src/libffmpeg/libavcodec/dvdata.h
+++ b/contrib/ffmpeg/libavcodec/dvdata.h
@@ -2,18 +2,20 @@
* Constants for DV codec
* Copyright (c) 2002 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -2624,6 +2626,29 @@ static const DVprofile dv_profiles[] = {
}
};
+enum dv_section_type {
+ dv_sect_header = 0x1f,
+ dv_sect_subcode = 0x3f,
+ dv_sect_vaux = 0x56,
+ dv_sect_audio = 0x76,
+ dv_sect_video = 0x96,
+};
+
+enum dv_pack_type {
+ dv_header525 = 0x3f, /* see dv_write_pack for important details on */
+ dv_header625 = 0xbf, /* these two packs */
+ dv_timecode = 0x13,
+ dv_audio_source = 0x50,
+ dv_audio_control = 0x51,
+ dv_audio_recdate = 0x52,
+ dv_audio_rectime = 0x53,
+ dv_video_source = 0x60,
+ dv_video_control = 0x61,
+ dv_video_recdate = 0x62,
+ dv_video_rectime = 0x63,
+ dv_unknown_pack = 0xff,
+};
+
/* minimum number of bytes to read from a DV stream in order to determine the profile */
#define DV_PROFILE_BYTES (6*80) /* 6 DIF blocks */
@@ -2663,3 +2688,37 @@ static inline const DVprofile* dv_codec_profile(AVCodecContext* codec)
return NULL;
}
+
+static inline int dv_write_dif_id(enum dv_section_type t, uint8_t chan_num, uint8_t seq_num,
+ uint8_t dif_num, uint8_t* buf)
+{
+ buf[0] = (uint8_t)t; /* Section type */
+ buf[1] = (seq_num<<4) | /* DIF seq number 0-9 for 525/60; 0-11 for 625/50 */
+ (chan_num << 3) | /* FSC: for 50Mb/s 0 - first channel; 1 - second */
+ 7; /* reserved -- always 1 */
+ buf[2] = dif_num; /* DIF block number Video: 0-134, Audio: 0-8 */
+ return 3;
+}
+
+
+static inline int dv_write_ssyb_id(uint8_t syb_num, uint8_t fr, uint8_t* buf)
+{
+ if (syb_num == 0 || syb_num == 6) {
+ buf[0] = (fr<<7) | /* FR ID 1 - first half of each channel; 0 - second */
+ (0<<4) | /* AP3 (Subcode application ID) */
+ 0x0f; /* reserved -- always 1 */
+ }
+ else if (syb_num == 11) {
+ buf[0] = (fr<<7) | /* FR ID 1 - first half of each channel; 0 - second */
+ 0x7f; /* reserved -- always 1 */
+ }
+ else {
+ buf[0] = (fr<<7) | /* FR ID 1 - first half of each channel; 0 - second */
+ (0<<4) | /* APT (Track application ID) */
+ 0x0f; /* reserved -- always 1 */
+ }
+ buf[1] = 0xf0 | /* reserved -- always 1 */
+ (syb_num & 0x0f); /* SSYB number 0 - 11 */
+ buf[2] = 0xff; /* reserved -- always 1 */
+ return 3;
+}
diff --git a/contrib/ffmpeg/libavcodec/dvdsubdec.c b/contrib/ffmpeg/libavcodec/dvdsubdec.c
new file mode 100644
index 000000000..9a0dd7756
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/dvdsubdec.c
@@ -0,0 +1,477 @@
+/*
+ * DVD subtitle decoding for ffmpeg
+ * Copyright (c) 2005 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avcodec.h"
+
+//#define DEBUG
+
+static int dvdsub_init_decoder(AVCodecContext *avctx)
+{
+ return 0;
+}
+
+static uint16_t getbe16(const uint8_t *p)
+{
+ return (p[0] << 8) | p[1];
+}
+
+static int get_nibble(const uint8_t *buf, int nibble_offset)
+{
+ return (buf[nibble_offset >> 1] >> ((1 - (nibble_offset & 1)) << 2)) & 0xf;
+}
+
+static int decode_rle(uint8_t *bitmap, int linesize, int w, int h,
+ const uint8_t *buf, int nibble_offset, int buf_size)
+{
+ unsigned int v;
+ int x, y, len, color, nibble_end;
+ uint8_t *d;
+
+ nibble_end = buf_size * 2;
+ x = 0;
+ y = 0;
+ d = bitmap;
+ for(;;) {
+ if (nibble_offset >= nibble_end)
+ return -1;
+ v = get_nibble(buf, nibble_offset++);
+ if (v < 0x4) {
+ v = (v << 4) | get_nibble(buf, nibble_offset++);
+ if (v < 0x10) {
+ v = (v << 4) | get_nibble(buf, nibble_offset++);
+ if (v < 0x040) {
+ v = (v << 4) | get_nibble(buf, nibble_offset++);
+ if (v < 4) {
+ v |= (w - x) << 2;
+ }
+ }
+ }
+ }
+ len = v >> 2;
+ if (len > (w - x))
+ len = (w - x);
+ color = v & 0x03;
+ memset(d + x, color, len);
+ x += len;
+ if (x >= w) {
+ y++;
+ if (y >= h)
+ break;
+ d += linesize;
+ x = 0;
+ /* byte align */
+ nibble_offset += (nibble_offset & 1);
+ }
+ }
+ return 0;
+}
+
+static void guess_palette(uint32_t *rgba_palette,
+ uint8_t *palette,
+ uint8_t *alpha,
+ uint32_t subtitle_color)
+{
+ uint8_t color_used[16];
+ int nb_opaque_colors, i, level, j, r, g, b;
+
+ for(i = 0; i < 4; i++)
+ rgba_palette[i] = 0;
+
+ memset(color_used, 0, 16);
+ nb_opaque_colors = 0;
+ for(i = 0; i < 4; i++) {
+ if (alpha[i] != 0 && !color_used[palette[i]]) {
+ color_used[palette[i]] = 1;
+ nb_opaque_colors++;
+ }
+ }
+
+ if (nb_opaque_colors == 0)
+ return;
+
+ j = nb_opaque_colors;
+ memset(color_used, 0, 16);
+ for(i = 0; i < 4; i++) {
+ if (alpha[i] != 0) {
+ if (!color_used[palette[i]]) {
+ level = (0xff * j) / nb_opaque_colors;
+ r = (((subtitle_color >> 16) & 0xff) * level) >> 8;
+ g = (((subtitle_color >> 8) & 0xff) * level) >> 8;
+ b = (((subtitle_color >> 0) & 0xff) * level) >> 8;
+ rgba_palette[i] = b | (g << 8) | (r << 16) | ((alpha[i] * 17) << 24);
+ color_used[palette[i]] = (i + 1);
+ j--;
+ } else {
+ rgba_palette[i] = (rgba_palette[color_used[palette[i]] - 1] & 0x00ffffff) |
+ ((alpha[i] * 17) << 24);
+ }
+ }
+ }
+}
+
+static int decode_dvd_subtitles(AVSubtitle *sub_header,
+ const uint8_t *buf, int buf_size)
+{
+ int cmd_pos, pos, cmd, x1, y1, x2, y2, offset1, offset2, next_cmd_pos;
+ uint8_t palette[4], alpha[4];
+ int date;
+ int i;
+ int is_menu = 0;
+
+ if (buf_size < 4)
+ return -1;
+ sub_header->rects = NULL;
+ sub_header->num_rects = 0;
+ sub_header->start_display_time = 0;
+ sub_header->end_display_time = 0;
+
+ cmd_pos = getbe16(buf + 2);
+ while ((cmd_pos + 4) < buf_size) {
+ date = getbe16(buf + cmd_pos);
+ next_cmd_pos = getbe16(buf + cmd_pos + 2);
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_INFO, "cmd_pos=0x%04x next=0x%04x date=%d\n",
+ cmd_pos, next_cmd_pos, date);
+#endif
+ pos = cmd_pos + 4;
+ offset1 = -1;
+ offset2 = -1;
+ x1 = y1 = x2 = y2 = 0;
+ while (pos < buf_size) {
+ cmd = buf[pos++];
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_INFO, "cmd=%02x\n", cmd);
+#endif
+ switch(cmd) {
+ case 0x00:
+ /* menu subpicture */
+ is_menu = 1;
+ break;
+ case 0x01:
+ /* set start date */
+ sub_header->start_display_time = (date << 10) / 90;
+ break;
+ case 0x02:
+ /* set end date */
+ sub_header->end_display_time = (date << 10) / 90;
+ break;
+ case 0x03:
+ /* set palette */
+ if ((buf_size - pos) < 2)
+ goto fail;
+ palette[3] = buf[pos] >> 4;
+ palette[2] = buf[pos] & 0x0f;
+ palette[1] = buf[pos + 1] >> 4;
+ palette[0] = buf[pos + 1] & 0x0f;
+ pos += 2;
+ break;
+ case 0x04:
+ /* set alpha */
+ if ((buf_size - pos) < 2)
+ goto fail;
+ alpha[3] = buf[pos] >> 4;
+ alpha[2] = buf[pos] & 0x0f;
+ alpha[1] = buf[pos + 1] >> 4;
+ alpha[0] = buf[pos + 1] & 0x0f;
+ pos += 2;
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_INFO, "alpha=%x%x%x%x\n", alpha[0],alpha[1],alpha[2],alpha[3]);
+#endif
+ break;
+ case 0x05:
+ if ((buf_size - pos) < 6)
+ goto fail;
+ x1 = (buf[pos] << 4) | (buf[pos + 1] >> 4);
+ x2 = ((buf[pos + 1] & 0x0f) << 8) | buf[pos + 2];
+ y1 = (buf[pos + 3] << 4) | (buf[pos + 4] >> 4);
+ y2 = ((buf[pos + 4] & 0x0f) << 8) | buf[pos + 5];
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_INFO, "x1=%d x2=%d y1=%d y2=%d\n",
+ x1, x2, y1, y2);
+#endif
+ pos += 6;
+ break;
+ case 0x06:
+ if ((buf_size - pos) < 4)
+ goto fail;
+ offset1 = getbe16(buf + pos);
+ offset2 = getbe16(buf + pos + 2);
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_INFO, "offset1=0x%04x offset2=0x%04x\n", offset1, offset2);
+#endif
+ pos += 4;
+ break;
+ case 0xff:
+ default:
+ goto the_end;
+ }
+ }
+ the_end:
+ if (offset1 >= 0) {
+ int w, h;
+ uint8_t *bitmap;
+
+ /* decode the bitmap */
+ w = x2 - x1 + 1;
+ if (w < 0)
+ w = 0;
+ h = y2 - y1;
+ if (h < 0)
+ h = 0;
+ if (w > 0 && h > 0) {
+ if (sub_header->rects != NULL) {
+ for (i = 0; i < sub_header->num_rects; i++) {
+ av_free(sub_header->rects[i].bitmap);
+ av_free(sub_header->rects[i].rgba_palette);
+ }
+ av_freep(&sub_header->rects);
+ sub_header->num_rects = 0;
+ }
+
+ bitmap = av_malloc(w * h);
+ sub_header->rects = av_mallocz(sizeof(AVSubtitleRect));
+ sub_header->num_rects = 1;
+ sub_header->rects[0].rgba_palette = av_malloc(4 * 4);
+ decode_rle(bitmap, w * 2, w, h / 2,
+ buf, offset1 * 2, buf_size);
+ decode_rle(bitmap + w, w * 2, w, h / 2,
+ buf, offset2 * 2, buf_size);
+ guess_palette(sub_header->rects[0].rgba_palette,
+ palette, alpha, 0xffff00);
+ sub_header->rects[0].x = x1;
+ sub_header->rects[0].y = y1;
+ sub_header->rects[0].w = w;
+ sub_header->rects[0].h = h;
+ sub_header->rects[0].nb_colors = 4;
+ sub_header->rects[0].linesize = w;
+ sub_header->rects[0].bitmap = bitmap;
+ }
+ }
+ if (next_cmd_pos == cmd_pos)
+ break;
+ cmd_pos = next_cmd_pos;
+ }
+ if (sub_header->num_rects > 0)
+ return is_menu;
+ fail:
+ return -1;
+}
+
+static int is_transp(const uint8_t *buf, int pitch, int n,
+ const uint8_t *transp_color)
+{
+ int i;
+ for(i = 0; i < n; i++) {
+ if (!transp_color[*buf])
+ return 0;
+ buf += pitch;
+ }
+ return 1;
+}
+
+/* return 0 if empty rectangle, 1 if non empty */
+static int find_smallest_bounding_rectangle(AVSubtitle *s)
+{
+ uint8_t transp_color[256];
+ int y1, y2, x1, x2, y, w, h, i;
+ uint8_t *bitmap;
+
+ if (s->num_rects == 0 || s->rects == NULL || s->rects[0].w <= 0 || s->rects[0].h <= 0)
+ return 0;
+
+ memset(transp_color, 0, 256);
+ for(i = 0; i < s->rects[0].nb_colors; i++) {
+ if ((s->rects[0].rgba_palette[i] >> 24) == 0)
+ transp_color[i] = 1;
+ }
+ y1 = 0;
+ while (y1 < s->rects[0].h && is_transp(s->rects[0].bitmap + y1 * s->rects[0].linesize,
+ 1, s->rects[0].w, transp_color))
+ y1++;
+ if (y1 == s->rects[0].h) {
+ av_freep(&s->rects[0].bitmap);
+ s->rects[0].w = s->rects[0].h = 0;
+ return 0;
+ }
+
+ y2 = s->rects[0].h - 1;
+ while (y2 > 0 && is_transp(s->rects[0].bitmap + y2 * s->rects[0].linesize, 1,
+ s->rects[0].w, transp_color))
+ y2--;
+ x1 = 0;
+ while (x1 < (s->rects[0].w - 1) && is_transp(s->rects[0].bitmap + x1, s->rects[0].linesize,
+ s->rects[0].h, transp_color))
+ x1++;
+ x2 = s->rects[0].w - 1;
+ while (x2 > 0 && is_transp(s->rects[0].bitmap + x2, s->rects[0].linesize, s->rects[0].h,
+ transp_color))
+ x2--;
+ w = x2 - x1 + 1;
+ h = y2 - y1 + 1;
+ bitmap = av_malloc(w * h);
+ if (!bitmap)
+ return 1;
+ for(y = 0; y < h; y++) {
+ memcpy(bitmap + w * y, s->rects[0].bitmap + x1 + (y1 + y) * s->rects[0].linesize, w);
+ }
+ av_freep(&s->rects[0].bitmap);
+ s->rects[0].bitmap = bitmap;
+ s->rects[0].linesize = w;
+ s->rects[0].w = w;
+ s->rects[0].h = h;
+ s->rects[0].x += x1;
+ s->rects[0].y += y1;
+ return 1;
+}
+
+static int dvdsub_close_decoder(AVCodecContext *avctx)
+{
+ return 0;
+}
+
+#ifdef DEBUG
+#undef fprintf
+static void ppm_save(const char *filename, uint8_t *bitmap, int w, int h,
+ uint32_t *rgba_palette)
+{
+ int x, y, v;
+ FILE *f;
+
+ f = fopen(filename, "w");
+ if (!f) {
+ perror(filename);
+ exit(1);
+ }
+ fprintf(f, "P6\n"
+ "%d %d\n"
+ "%d\n",
+ w, h, 255);
+ for(y = 0; y < h; y++) {
+ for(x = 0; x < w; x++) {
+ v = rgba_palette[bitmap[y * w + x]];
+ putc((v >> 16) & 0xff, f);
+ putc((v >> 8) & 0xff, f);
+ putc((v >> 0) & 0xff, f);
+ }
+ }
+ fclose(f);
+}
+#endif
+
+static int dvdsub_decode(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ AVSubtitle *sub = (void *)data;
+ int is_menu;
+
+ is_menu = decode_dvd_subtitles(sub, buf, buf_size);
+
+ if (is_menu < 0) {
+ no_subtitle:
+ *data_size = 0;
+
+ return buf_size;
+ }
+ if (!is_menu && find_smallest_bounding_rectangle(sub) == 0)
+ goto no_subtitle;
+
+#if defined(DEBUG)
+ av_log(NULL, AV_LOG_INFO, "start=%d ms end =%d ms\n",
+ sub->start_display_time,
+ sub->end_display_time);
+ ppm_save("/tmp/a.ppm", sub->rects[0].bitmap,
+ sub->rects[0].w, sub->rects[0].h, sub->rects[0].rgba_palette);
+#endif
+
+ *data_size = 1;
+ return buf_size;
+}
+
+AVCodec dvdsub_decoder = {
+ "dvdsub",
+ CODEC_TYPE_SUBTITLE,
+ CODEC_ID_DVD_SUBTITLE,
+ 0,
+ dvdsub_init_decoder,
+ NULL,
+ dvdsub_close_decoder,
+ dvdsub_decode,
+};
+
+/* parser definition */
+typedef struct DVDSubParseContext {
+ uint8_t *packet;
+ int packet_len;
+ int packet_index;
+} DVDSubParseContext;
+
+static int dvdsub_parse_init(AVCodecParserContext *s)
+{
+ return 0;
+}
+
+static int dvdsub_parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ DVDSubParseContext *pc = s->priv_data;
+
+ if (pc->packet_index == 0) {
+ if (buf_size < 2)
+ return 0;
+ pc->packet_len = (buf[0] << 8) | buf[1];
+ av_freep(&pc->packet);
+ pc->packet = av_malloc(pc->packet_len);
+ }
+ if (pc->packet) {
+ if (pc->packet_index + buf_size <= pc->packet_len) {
+ memcpy(pc->packet + pc->packet_index, buf, buf_size);
+ pc->packet_index += buf_size;
+ if (pc->packet_index >= pc->packet_len) {
+ *poutbuf = pc->packet;
+ *poutbuf_size = pc->packet_len;
+ pc->packet_index = 0;
+ return buf_size;
+ }
+ } else {
+ /* erroneous size */
+ pc->packet_index = 0;
+ }
+ }
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+ return buf_size;
+}
+
+static void dvdsub_parse_close(AVCodecParserContext *s)
+{
+ DVDSubParseContext *pc = s->priv_data;
+ av_freep(&pc->packet);
+}
+
+AVCodecParser dvdsub_parser = {
+ { CODEC_ID_DVD_SUBTITLE },
+ sizeof(DVDSubParseContext),
+ dvdsub_parse_init,
+ dvdsub_parse,
+ dvdsub_parse_close,
+};
diff --git a/contrib/ffmpeg/libavcodec/dvdsubenc.c b/contrib/ffmpeg/libavcodec/dvdsubenc.c
new file mode 100644
index 000000000..fac29acc2
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/dvdsubenc.c
@@ -0,0 +1,247 @@
+/*
+ * DVD subtitle encoding for ffmpeg
+ * Copyright (c) 2005 Wolfram Gloger.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avcodec.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+// ncnt is the nibble counter
+#define PUTNIBBLE(val)\
+do {\
+ if (ncnt++ & 1)\
+ *q++ = bitbuf | ((val) & 0x0f);\
+ else\
+ bitbuf = (val) << 4;\
+} while(0)
+
+static void dvd_encode_rle(uint8_t **pq,
+ const uint8_t *bitmap, int linesize,
+ int w, int h,
+ const int cmap[256])
+{
+ uint8_t *q;
+ unsigned int bitbuf = 0;
+ int ncnt;
+ int x, y, len, color;
+
+ q = *pq;
+
+ for (y = 0; y < h; ++y) {
+ ncnt = 0;
+ for(x = 0; x < w; x += len) {
+ color = bitmap[x];
+ for (len=1; x+len < w; ++len)
+ if (bitmap[x+len] != color)
+ break;
+ color = cmap[color];
+ assert(color < 4);
+ if (len < 0x04) {
+ PUTNIBBLE((len << 2)|color);
+ } else if (len < 0x10) {
+ PUTNIBBLE(len >> 2);
+ PUTNIBBLE((len << 2)|color);
+ } else if (len < 0x40) {
+ PUTNIBBLE(0);
+ PUTNIBBLE(len >> 2);
+ PUTNIBBLE((len << 2)|color);
+ } else if (x+len == w) {
+ PUTNIBBLE(0);
+ PUTNIBBLE(0);
+ PUTNIBBLE(0);
+ PUTNIBBLE(color);
+ } else {
+ if (len > 0xff)
+ len = 0xff;
+ PUTNIBBLE(0);
+ PUTNIBBLE(len >> 6);
+ PUTNIBBLE(len >> 2);
+ PUTNIBBLE((len << 2)|color);
+ }
+ }
+ /* end of line */
+ if (ncnt & 1)
+ PUTNIBBLE(0);
+ bitmap += linesize;
+ }
+
+ *pq = q;
+}
+
+static inline void putbe16(uint8_t **pq, uint16_t v)
+{
+ uint8_t *q = *pq;
+ *q++ = v >> 8;
+ *q++ = v;
+ *pq = q;
+}
+
+static int encode_dvd_subtitles(uint8_t *outbuf, int outbuf_size,
+ const AVSubtitle *h)
+{
+ uint8_t *q, *qq;
+ int object_id;
+ int offset1[20], offset2[20];
+ int i, imax, color, alpha, rects = h->num_rects;
+ unsigned long hmax;
+ unsigned long hist[256];
+ int cmap[256];
+
+ if (rects == 0 || h->rects == NULL)
+ return -1;
+ if (rects > 20)
+ rects = 20;
+
+ // analyze bitmaps, compress to 4 colors
+ for (i=0; i<256; ++i) {
+ hist[i] = 0;
+ cmap[i] = 0;
+ }
+ for (object_id = 0; object_id < rects; object_id++)
+ for (i=0; i<h->rects[object_id].w*h->rects[object_id].h; ++i) {
+ color = h->rects[object_id].bitmap[i];
+ // only count non-transparent pixels
+ alpha = h->rects[object_id].rgba_palette[color] >> 24;
+ hist[color] += alpha;
+ }
+ for (color=3;; --color) {
+ hmax = 0;
+ imax = 0;
+ for (i=0; i<256; ++i)
+ if (hist[i] > hmax) {
+ imax = i;
+ hmax = hist[i];
+ }
+ if (hmax == 0)
+ break;
+ if (color == 0)
+ color = 3;
+ av_log(NULL, AV_LOG_DEBUG, "dvd_subtitle hist[%d]=%ld -> col %d\n",
+ imax, hist[imax], color);
+ cmap[imax] = color;
+ hist[imax] = 0;
+ }
+
+
+ // encode data block
+ q = outbuf + 4;
+ for (object_id = 0; object_id < rects; object_id++) {
+ offset1[object_id] = q - outbuf;
+ // worst case memory requirement: 1 nibble per pixel..
+ if ((q - outbuf) + h->rects[object_id].w*h->rects[object_id].h/2
+ + 17*rects + 21 > outbuf_size) {
+ av_log(NULL, AV_LOG_ERROR, "dvd_subtitle too big\n");
+ return -1;
+ }
+ dvd_encode_rle(&q, h->rects[object_id].bitmap,
+ h->rects[object_id].w*2,
+ h->rects[object_id].w, h->rects[object_id].h >> 1,
+ cmap);
+ offset2[object_id] = q - outbuf;
+ dvd_encode_rle(&q, h->rects[object_id].bitmap + h->rects[object_id].w,
+ h->rects[object_id].w*2,
+ h->rects[object_id].w, h->rects[object_id].h >> 1,
+ cmap);
+ }
+
+ // set data packet size
+ qq = outbuf + 2;
+ putbe16(&qq, q - outbuf);
+
+ // send start display command
+ putbe16(&q, (h->start_display_time*90) >> 10);
+ putbe16(&q, (q - outbuf) /*- 2 */ + 8 + 12*rects + 2);
+ *q++ = 0x03; // palette - 4 nibbles
+ *q++ = 0x03; *q++ = 0x7f;
+ *q++ = 0x04; // alpha - 4 nibbles
+ *q++ = 0xf0; *q++ = 0x00;
+ //*q++ = 0x0f; *q++ = 0xff;
+
+ // XXX not sure if more than one rect can really be encoded..
+ // 12 bytes per rect
+ for (object_id = 0; object_id < rects; object_id++) {
+ int x2 = h->rects[object_id].x + h->rects[object_id].w - 1;
+ int y2 = h->rects[object_id].y + h->rects[object_id].h - 1;
+
+ *q++ = 0x05;
+ // x1 x2 -> 6 nibbles
+ *q++ = h->rects[object_id].x >> 4;
+ *q++ = (h->rects[object_id].x << 4) | ((x2 >> 8) & 0xf);
+ *q++ = x2;
+ // y1 y2 -> 6 nibbles
+ *q++ = h->rects[object_id].y >> 4;
+ *q++ = (h->rects[object_id].y << 4) | ((y2 >> 8) & 0xf);
+ *q++ = y2;
+
+ *q++ = 0x06;
+ // offset1, offset2
+ putbe16(&q, offset1[object_id]);
+ putbe16(&q, offset2[object_id]);
+ }
+ *q++ = 0x01; // start command
+ *q++ = 0xff; // terminating command
+
+ // send stop display command last
+ putbe16(&q, (h->end_display_time*90) >> 10);
+ putbe16(&q, (q - outbuf) - 2 /*+ 4*/);
+ *q++ = 0x02; // set end
+ *q++ = 0xff; // terminating command
+
+ qq = outbuf;
+ putbe16(&qq, q - outbuf);
+
+ av_log(NULL, AV_LOG_DEBUG, "subtitle_packet size=%td\n", q - outbuf);
+ return q - outbuf;
+}
+
+static int dvdsub_init_encoder(AVCodecContext *avctx)
+{
+ return 0;
+}
+
+static int dvdsub_close_encoder(AVCodecContext *avctx)
+{
+ return 0;
+}
+
+static int dvdsub_encode(AVCodecContext *avctx,
+ unsigned char *buf, int buf_size, void *data)
+{
+ //DVDSubtitleContext *s = avctx->priv_data;
+ AVSubtitle *sub = data;
+ int ret;
+
+ ret = encode_dvd_subtitles(buf, buf_size, sub);
+ return ret;
+}
+
+AVCodec dvdsub_encoder = {
+ "dvdsub",
+ CODEC_TYPE_SUBTITLE,
+ CODEC_ID_DVD_SUBTITLE,
+ 0,
+ dvdsub_init_encoder,
+ dvdsub_encode,
+ dvdsub_close_encoder,
+};
+
+/* Local Variables: */
+/* c-basic-offset:4 */
+/* End: */
diff --git a/src/libffmpeg/libavcodec/error_resilience.c b/contrib/ffmpeg/libavcodec/error_resilience.c
index 9912044ec..0923721ee 100644
--- a/src/libffmpeg/libavcodec/error_resilience.c
+++ b/contrib/ffmpeg/libavcodec/error_resilience.c
@@ -3,18 +3,20 @@
*
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -197,7 +199,7 @@ static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, i
*/
static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
int b_x, b_y;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
for(b_y=0; b_y<h; b_y++){
for(b_x=0; b_x<w-1; b_x++){
@@ -215,7 +217,7 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
if(!(left_damage||right_damage)) continue; // both undamaged
if( (!left_intra) && (!right_intra)
- && ABS(left_mv[0]-right_mv[0]) + ABS(left_mv[1]+right_mv[1]) < 2) continue;
+ && FFABS(left_mv[0]-right_mv[0]) + FFABS(left_mv[1]+right_mv[1]) < 2) continue;
for(y=0; y<8; y++){
int a,b,c,d;
@@ -224,7 +226,7 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
b= dst[offset + 8 + y*stride] - dst[offset + 7 + y*stride];
c= dst[offset + 9 + y*stride] - dst[offset + 8 + y*stride];
- d= ABS(b) - ((ABS(a) + ABS(c) + 1)>>1);
+ d= FFABS(b) - ((FFABS(a) + FFABS(c) + 1)>>1);
d= FFMAX(d, 0);
if(b<0) d= -d;
@@ -257,7 +259,7 @@ static void h_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
*/
static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int stride, int is_luma){
int b_x, b_y;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
for(b_y=0; b_y<h-1; b_y++){
for(b_x=0; b_x<w; b_x++){
@@ -275,7 +277,7 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
if(!(top_damage||bottom_damage)) continue; // both undamaged
if( (!top_intra) && (!bottom_intra)
- && ABS(top_mv[0]-bottom_mv[0]) + ABS(top_mv[1]+bottom_mv[1]) < 2) continue;
+ && FFABS(top_mv[0]-bottom_mv[0]) + FFABS(top_mv[1]+bottom_mv[1]) < 2) continue;
for(x=0; x<8; x++){
int a,b,c,d;
@@ -284,7 +286,7 @@ static void v_block_filter(MpegEncContext *s, uint8_t *dst, int w, int h, int st
b= dst[offset + x + 8*stride] - dst[offset + x + 7*stride];
c= dst[offset + x + 9*stride] - dst[offset + x + 8*stride];
- d= ABS(b) - ((ABS(a) + ABS(c)+1)>>1);
+ d= FFABS(b) - ((FFABS(a) + FFABS(c)+1)>>1);
d= FFMAX(d, 0);
if(b<0) d= -d;
@@ -493,22 +495,22 @@ int score_sum=0;
if(mb_x>0 && fixed[mb_xy-1]){
int k;
for(k=0; k<16; k++)
- score += ABS(src[k*s->linesize-1 ]-src[k*s->linesize ]);
+ score += FFABS(src[k*s->linesize-1 ]-src[k*s->linesize ]);
}
if(mb_x+1<mb_width && fixed[mb_xy+1]){
int k;
for(k=0; k<16; k++)
- score += ABS(src[k*s->linesize+15]-src[k*s->linesize+16]);
+ score += FFABS(src[k*s->linesize+15]-src[k*s->linesize+16]);
}
if(mb_y>0 && fixed[mb_xy-mb_stride]){
int k;
for(k=0; k<16; k++)
- score += ABS(src[k-s->linesize ]-src[k ]);
+ score += FFABS(src[k-s->linesize ]-src[k ]);
}
if(mb_y+1<mb_height && fixed[mb_xy+mb_stride]){
int k;
for(k=0; k<16; k++)
- score += ABS(src[k+s->linesize*15]-src[k+s->linesize*16]);
+ score += FFABS(src[k+s->linesize*15]-src[k+s->linesize*16]);
}
if(score <= best_score){ // <= will favor the last MV
diff --git a/contrib/ffmpeg/libavcodec/eval.c b/contrib/ffmpeg/libavcodec/eval.c
new file mode 100644
index 000000000..961c8b5ac
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/eval.c
@@ -0,0 +1,466 @@
+/*
+ * simple arithmetic expression evaluator
+ *
+ * Copyright (c) 2002-2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2006 Oded Shimon <ods15@ods15.dyndns.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+/**
+ * @file eval.c
+ * simple arithmetic expression evaluator.
+ *
+ * see http://joe.hotchkiss.com/programming/eval/eval.html
+ */
+
+#include "avcodec.h"
+#include "mpegvideo.h"
+#include "eval.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+#ifndef NAN
+ #define NAN 0.0/0.0
+#endif
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+
+typedef struct Parser{
+ int stack_index;
+ char *s;
+ double *const_value;
+ const char **const_name; // NULL terminated
+ double (**func1)(void *, double a); // NULL terminated
+ const char **func1_name; // NULL terminated
+ double (**func2)(void *, double a, double b); // NULL terminated
+ char **func2_name; // NULL terminated
+ void *opaque;
+ char **error;
+#define VARS 10
+ double var[VARS];
+} Parser;
+
+static int8_t si_prefixes['z' - 'E' + 1]={
+ ['y'-'E']= -24,
+ ['z'-'E']= -21,
+ ['a'-'E']= -18,
+ ['f'-'E']= -15,
+ ['p'-'E']= -12,
+ ['n'-'E']= - 9,
+ ['u'-'E']= - 6,
+ ['m'-'E']= - 3,
+ ['c'-'E']= - 2,
+ ['d'-'E']= - 1,
+ ['h'-'E']= 2,
+ ['k'-'E']= 3,
+ ['K'-'E']= 3,
+ ['M'-'E']= 6,
+ ['G'-'E']= 9,
+ ['T'-'E']= 12,
+ ['P'-'E']= 15,
+ ['E'-'E']= 18,
+ ['Z'-'E']= 21,
+ ['Y'-'E']= 24,
+};
+
+/** strtod() function extended with 'k', 'M', 'G', 'ki', 'Mi', 'Gi' and 'B'
+ * postfixes. This allows using f.e. kB, MiB, G and B as a postfix. This
+ * function assumes that the unit of numbers is bits not bytes.
+ */
+static double av_strtod(const char *name, char **tail) {
+ double d;
+ char *next;
+ d = strtod(name, &next);
+ /* if parsing succeeded, check for and interpret postfixes */
+ if (next!=name) {
+
+ if(*next >= 'E' && *next <= 'z'){
+ int e= si_prefixes[*next - 'E'];
+ if(e){
+ if(next[1] == 'i'){
+ d*= pow( 2, e/0.3);
+ next+=2;
+ }else{
+ d*= pow(10, e);
+ next++;
+ }
+ }
+ }
+
+ if(*next=='B') {
+ d*=8;
+ *next++;
+ }
+ }
+ /* if requested, fill in tail with the position after the last parsed
+ character */
+ if (tail)
+ *tail = next;
+ return d;
+}
+
+static int strmatch(const char *s, const char *prefix){
+ int i;
+ for(i=0; prefix[i]; i++){
+ if(prefix[i] != s[i]) return 0;
+ }
+ return 1;
+}
+
+struct ff_expr_s {
+ enum {
+ e_value, e_const, e_func0, e_func1, e_func2,
+ e_squish, e_gauss, e_ld,
+ e_mod, e_max, e_min, e_eq, e_gt, e_gte,
+ e_pow, e_mul, e_div, e_add,
+ e_last, e_st, e_while,
+ } type;
+ double value; // is sign in other types
+ union {
+ int const_index;
+ double (*func0)(double);
+ double (*func1)(void *, double);
+ double (*func2)(void *, double, double);
+ } a;
+ AVEvalExpr * param[2];
+};
+
+static double eval_expr(Parser * p, AVEvalExpr * e) {
+ switch (e->type) {
+ case e_value: return e->value;
+ case e_const: return e->value * p->const_value[e->a.const_index];
+ case e_func0: return e->value * e->a.func0(eval_expr(p, e->param[0]));
+ case e_func1: return e->value * e->a.func1(p->opaque, eval_expr(p, e->param[0]));
+ case e_func2: return e->value * e->a.func2(p->opaque, eval_expr(p, e->param[0]), eval_expr(p, e->param[1]));
+ case e_squish: return 1/(1+exp(4*eval_expr(p, e->param[0])));
+ case e_gauss: { double d = eval_expr(p, e->param[0]); return exp(-d*d/2)/sqrt(2*M_PI); }
+ case e_ld: return e->value * p->var[clip(eval_expr(p, e->param[0]), 0, VARS-1)];
+ case e_while: {
+ double d = NAN;
+ while(eval_expr(p, e->param[0]))
+ d=eval_expr(p, e->param[1]);
+ return d;
+ }
+ default: {
+ double d = eval_expr(p, e->param[0]);
+ double d2 = eval_expr(p, e->param[1]);
+ switch (e->type) {
+ case e_mod: return e->value * (d - floor(d/d2)*d2);
+ case e_max: return e->value * (d > d2 ? d : d2);
+ case e_min: return e->value * (d < d2 ? d : d2);
+ case e_eq: return e->value * (d == d2 ? 1.0 : 0.0);
+ case e_gt: return e->value * (d > d2 ? 1.0 : 0.0);
+ case e_gte: return e->value * (d >= d2 ? 1.0 : 0.0);
+ case e_pow: return e->value * pow(d, d2);
+ case e_mul: return e->value * (d * d2);
+ case e_div: return e->value * (d / d2);
+ case e_add: return e->value * (d + d2);
+ case e_last:return e->value * d2;
+ case e_st : return e->value * (p->var[clip(d, 0, VARS-1)]= d2);
+ }
+ }
+ }
+ return NAN;
+}
+
+static AVEvalExpr * parse_expr(Parser *p);
+
+void ff_eval_free(AVEvalExpr * e) {
+ if (!e) return;
+ ff_eval_free(e->param[0]);
+ ff_eval_free(e->param[1]);
+ av_freep(&e);
+}
+
+static AVEvalExpr * parse_primary(Parser *p) {
+ AVEvalExpr * d = av_mallocz(sizeof(AVEvalExpr));
+ char *next= p->s;
+ int i;
+
+ /* number */
+ d->value = av_strtod(p->s, &next);
+ if(next != p->s){
+ d->type = e_value;
+ p->s= next;
+ return d;
+ }
+ d->value = 1;
+
+ /* named constants */
+ for(i=0; p->const_name && p->const_name[i]; i++){
+ if(strmatch(p->s, p->const_name[i])){
+ p->s+= strlen(p->const_name[i]);
+ d->type = e_const;
+ d->a.const_index = i;
+ return d;
+ }
+ }
+
+ p->s= strchr(p->s, '(');
+ if(p->s==NULL){
+ *p->error = "missing (";
+ p->s= next;
+ ff_eval_free(d);
+ return NULL;
+ }
+ p->s++; // "("
+ if (*next == '(') { // special case do-nothing
+ av_freep(&d);
+ d = parse_expr(p);
+ if(p->s[0] != ')'){
+ *p->error = "missing )";
+ ff_eval_free(d);
+ return NULL;
+ }
+ p->s++; // ")"
+ return d;
+ }
+ d->param[0] = parse_expr(p);
+ if(p->s[0]== ','){
+ p->s++; // ","
+ d->param[1] = parse_expr(p);
+ }
+ if(p->s[0] != ')'){
+ *p->error = "missing )";
+ ff_eval_free(d);
+ return NULL;
+ }
+ p->s++; // ")"
+
+ d->type = e_func0;
+ if( strmatch(next, "sinh" ) ) d->a.func0 = sinh;
+ else if( strmatch(next, "cosh" ) ) d->a.func0 = cosh;
+ else if( strmatch(next, "tanh" ) ) d->a.func0 = tanh;
+ else if( strmatch(next, "sin" ) ) d->a.func0 = sin;
+ else if( strmatch(next, "cos" ) ) d->a.func0 = cos;
+ else if( strmatch(next, "tan" ) ) d->a.func0 = tan;
+ else if( strmatch(next, "atan" ) ) d->a.func0 = atan;
+ else if( strmatch(next, "asin" ) ) d->a.func0 = asin;
+ else if( strmatch(next, "acos" ) ) d->a.func0 = acos;
+ else if( strmatch(next, "exp" ) ) d->a.func0 = exp;
+ else if( strmatch(next, "log" ) ) d->a.func0 = log;
+ else if( strmatch(next, "abs" ) ) d->a.func0 = fabs;
+ else if( strmatch(next, "squish") ) d->type = e_squish;
+ else if( strmatch(next, "gauss" ) ) d->type = e_gauss;
+ else if( strmatch(next, "mod" ) ) d->type = e_mod;
+ else if( strmatch(next, "max" ) ) d->type = e_max;
+ else if( strmatch(next, "min" ) ) d->type = e_min;
+ else if( strmatch(next, "eq" ) ) d->type = e_eq;
+ else if( strmatch(next, "gte" ) ) d->type = e_gte;
+ else if( strmatch(next, "gt" ) ) d->type = e_gt;
+ else if( strmatch(next, "lte" ) ) { AVEvalExpr * tmp = d->param[1]; d->param[1] = d->param[0]; d->param[0] = tmp; d->type = e_gt; }
+ else if( strmatch(next, "lt" ) ) { AVEvalExpr * tmp = d->param[1]; d->param[1] = d->param[0]; d->param[0] = tmp; d->type = e_gte; }
+ else if( strmatch(next, "ld" ) ) d->type = e_ld;
+ else if( strmatch(next, "st" ) ) d->type = e_st;
+ else if( strmatch(next, "while" ) ) d->type = e_while;
+ else {
+ for(i=0; p->func1_name && p->func1_name[i]; i++){
+ if(strmatch(next, p->func1_name[i])){
+ d->a.func1 = p->func1[i];
+ d->type = e_func1;
+ return d;
+ }
+ }
+
+ for(i=0; p->func2_name && p->func2_name[i]; i++){
+ if(strmatch(next, p->func2_name[i])){
+ d->a.func2 = p->func2[i];
+ d->type = e_func2;
+ return d;
+ }
+ }
+
+ *p->error = "unknown function";
+ ff_eval_free(d);
+ return NULL;
+ }
+
+ return d;
+}
+
+static AVEvalExpr * new_eval_expr(int type, int value, AVEvalExpr *p0, AVEvalExpr *p1){
+ AVEvalExpr * e = av_mallocz(sizeof(AVEvalExpr));
+ e->type =type ;
+ e->value =value ;
+ e->param[0] =p0 ;
+ e->param[1] =p1 ;
+ return e;
+}
+
+static AVEvalExpr * parse_pow(Parser *p, int *sign){
+ *sign= (*p->s == '+') - (*p->s == '-');
+ p->s += *sign&1;
+ return parse_primary(p);
+}
+
+static AVEvalExpr * parse_factor(Parser *p){
+ int sign, sign2;
+ AVEvalExpr * e = parse_pow(p, &sign);
+ while(p->s[0]=='^'){
+ p->s++;
+ e= new_eval_expr(e_pow, 1, e, parse_pow(p, &sign2));
+ if (e->param[1]) e->param[1]->value *= (sign2|1);
+ }
+ if (e) e->value *= (sign|1);
+ return e;
+}
+
+static AVEvalExpr * parse_term(Parser *p){
+ AVEvalExpr * e = parse_factor(p);
+ while(p->s[0]=='*' || p->s[0]=='/'){
+ int c= *p->s++;
+ e= new_eval_expr(c == '*' ? e_mul : e_div, 1, e, parse_factor(p));
+ }
+ return e;
+}
+
+static AVEvalExpr * parse_subexpr(Parser *p) {
+ AVEvalExpr * e = parse_term(p);
+ while(*p->s == '+' || *p->s == '-') {
+ e= new_eval_expr(e_add, 1, e, parse_term(p));
+ };
+
+ return e;
+}
+
+static AVEvalExpr * parse_expr(Parser *p) {
+ AVEvalExpr * e;
+
+ if(p->stack_index <= 0) //protect against stack overflows
+ return NULL;
+ p->stack_index--;
+
+ e = parse_subexpr(p);
+
+ while(*p->s == ';') {
+ p->s++;
+ e= new_eval_expr(e_last, 1, e, parse_subexpr(p));
+ };
+
+ p->stack_index++;
+
+ return e;
+}
+
+static int verify_expr(AVEvalExpr * e) {
+ if (!e) return 0;
+ switch (e->type) {
+ case e_value:
+ case e_const: return 1;
+ case e_func0:
+ case e_func1:
+ case e_squish:
+ case e_ld:
+ case e_gauss: return verify_expr(e->param[0]);
+ default: return verify_expr(e->param[0]) && verify_expr(e->param[1]);
+ }
+}
+
+AVEvalExpr * ff_parse(char *s, const char **const_name,
+ double (**func1)(void *, double), const char **func1_name,
+ double (**func2)(void *, double, double), char **func2_name,
+ char **error){
+ Parser p;
+ AVEvalExpr * e;
+ char w[strlen(s) + 1], * wp = w;
+
+ while (*s)
+ if (!isspace(*s++)) *wp++ = s[-1];
+ *wp++ = 0;
+
+ p.stack_index=100;
+ p.s= w;
+ p.const_name = const_name;
+ p.func1 = func1;
+ p.func1_name = func1_name;
+ p.func2 = func2;
+ p.func2_name = func2_name;
+ p.error= error;
+
+ e = parse_expr(&p);
+ if (!verify_expr(e)) {
+ ff_eval_free(e);
+ return NULL;
+ }
+ return e;
+}
+
+double ff_parse_eval(AVEvalExpr * e, double *const_value, void *opaque) {
+ Parser p;
+
+ p.const_value= const_value;
+ p.opaque = opaque;
+ return eval_expr(&p, e);
+}
+
+double ff_eval2(char *s, double *const_value, const char **const_name,
+ double (**func1)(void *, double), const char **func1_name,
+ double (**func2)(void *, double, double), char **func2_name,
+ void *opaque, char **error){
+ AVEvalExpr * e = ff_parse(s, const_name, func1, func1_name, func2, func2_name, error);
+ double d;
+ if (!e) return NAN;
+ d = ff_parse_eval(e, const_value, opaque);
+ ff_eval_free(e);
+ return d;
+}
+
+#if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
+attribute_deprecated double ff_eval(char *s, double *const_value, const char **const_name,
+ double (**func1)(void *, double), const char **func1_name,
+ double (**func2)(void *, double, double), char **func2_name,
+ void *opaque){
+ char *error=NULL;
+ double ret;
+ ret = ff_eval2(s, const_value, const_name, func1, func1_name, func2, func2_name, opaque, &error);
+ if (error)
+ av_log(NULL, AV_LOG_ERROR, "Error evaluating \"%s\": %s\n", s, error);
+ return ret;
+}
+#endif
+
+#ifdef TEST
+#undef printf
+static double const_values[]={
+ M_PI,
+ M_E,
+ 0
+};
+static const char *const_names[]={
+ "PI",
+ "E",
+ 0
+};
+main(){
+ int i;
+ printf("%f == 12.7\n", ff_eval("1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)", const_values, const_names, NULL, NULL, NULL, NULL, NULL));
+ printf("%f == 0.931322575\n", ff_eval("80G/80Gi", const_values, const_names, NULL, NULL, NULL, NULL, NULL));
+
+ for(i=0; i<1050; i++){
+ START_TIMER
+ ff_eval("1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)", const_values, const_names, NULL, NULL, NULL, NULL, NULL);
+ STOP_TIMER("ff_eval")
+ }
+}
+#endif
diff --git a/contrib/ffmpeg/libavcodec/eval.h b/contrib/ffmpeg/libavcodec/eval.h
new file mode 100644
index 000000000..b52199cf4
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/eval.h
@@ -0,0 +1,84 @@
+/*
+ * simple arithmetic expression evaluator
+ *
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file eval.h
+ * eval header.
+ */
+
+#ifndef AVCODEC_EVAL_H
+#define AVCODEC_EVAL_H
+
+#if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
+double ff_eval(char *s, double *const_value, const char **const_name,
+ double (**func1)(void *, double), const char **func1_name,
+ double (**func2)(void *, double, double), char **func2_name,
+ void *opaque);
+#endif
+
+/**
+ * Parses and evaluates an expression.
+ * Note, this is significantly slower than ff_parse_eval()
+ * @param s expression as a zero terminated string for example "1+2^3+5*5+sin(2/3)"
+ * @param func1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param const_name NULL terminated array of zero terminated strings of constant identifers for example {"PI", "E", 0}
+ * @param func1_name NULL terminated array of zero terminated strings of func1 identifers
+ * @param func2_name NULL terminated array of zero terminated strings of func2 identifers
+ * @param error pointer to a char* which is set to an error message if something goes wrong
+ * @param const_value a zero terminated array of values for the identifers from const_name
+ * @param opaque a pointer which will be passed to all functions from func1 and func2
+ * @return the value of the expression
+ */
+double ff_eval2(char *s, double *const_value, const char **const_name,
+ double (**func1)(void *, double), const char **func1_name,
+ double (**func2)(void *, double, double), char **func2_name,
+ void *opaque, char **error);
+
+typedef struct ff_expr_s AVEvalExpr;
+
+/**
+ * Parses a expression.
+ * @param s expression as a zero terminated string for example "1+2^3+5*5+sin(2/3)"
+ * @param func1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param const_name NULL terminated array of zero terminated strings of constant identifers for example {"PI", "E", 0}
+ * @param func1_name NULL terminated array of zero terminated strings of func1 identifers
+ * @param func2_name NULL terminated array of zero terminated strings of func2 identifers
+ * @param error pointer to a char* which is set to an error message if something goes wrong
+ * @return AVEvalExpr which must be freed with ff_eval_free by the user when its not needed anymore
+ * NULL if anything went wrong
+ */
+AVEvalExpr * ff_parse(char *s, const char **const_name,
+ double (**func1)(void *, double), const char **func1_name,
+ double (**func2)(void *, double, double), char **func2_name,
+ char **error);
+/**
+ * Evaluates a previously parsed expression.
+ * @param const_value a zero terminated array of values for the identifers from ff_parse const_name
+ * @param opaque a pointer which will be passed to all functions from func1 and func2
+ * @return the value of the expression
+ */
+double ff_parse_eval(AVEvalExpr * e, double *const_value, void *opaque);
+void ff_eval_free(AVEvalExpr * e);
+
+#endif /* AVCODEC_EVAL_H */
diff --git a/contrib/ffmpeg/libavcodec/faac.c b/contrib/ffmpeg/libavcodec/faac.c
new file mode 100644
index 000000000..06e0b4920
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/faac.c
@@ -0,0 +1,133 @@
+/*
+ * Interface to libfaac for aac encoding
+ * Copyright (c) 2002 Gildas Bazin <gbazin@netcourrier.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file faacaudio.c
+ * Interface to libfaac for aac encoding.
+ */
+
+#include "avcodec.h"
+#include <faac.h>
+
+typedef struct FaacAudioContext {
+ faacEncHandle faac_handle;
+} FaacAudioContext;
+
+static int Faac_encode_init(AVCodecContext *avctx)
+{
+ FaacAudioContext *s = avctx->priv_data;
+ faacEncConfigurationPtr faac_cfg;
+ unsigned long samples_input, max_bytes_output;
+
+ /* number of channels */
+ if (avctx->channels < 1 || avctx->channels > 6)
+ return -1;
+
+ s->faac_handle = faacEncOpen(avctx->sample_rate,
+ avctx->channels,
+ &samples_input, &max_bytes_output);
+
+ /* check faac version */
+ faac_cfg = faacEncGetCurrentConfiguration(s->faac_handle);
+ if (faac_cfg->version != FAAC_CFG_VERSION) {
+ av_log(avctx, AV_LOG_ERROR, "wrong libfaac version (compiled for: %d, using %d)\n", FAAC_CFG_VERSION, faac_cfg->version);
+ faacEncClose(s->faac_handle);
+ return -1;
+ }
+
+ /* put the options in the configuration struct */
+ faac_cfg->aacObjectType = LOW;
+ faac_cfg->mpegVersion = MPEG4;
+ faac_cfg->useTns = 0;
+ faac_cfg->allowMidside = 1;
+ faac_cfg->bitRate = avctx->bit_rate / avctx->channels;
+ faac_cfg->bandWidth = avctx->cutoff;
+ if(avctx->flags & CODEC_FLAG_QSCALE) {
+ faac_cfg->bitRate = 0;
+ faac_cfg->quantqual = avctx->global_quality / FF_QP2LAMBDA;
+ }
+ faac_cfg->outputFormat = 1;
+ faac_cfg->inputFormat = FAAC_INPUT_16BIT;
+
+ avctx->frame_size = samples_input / avctx->channels;
+
+ avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame->key_frame= 1;
+
+ /* Set decoder specific info */
+ avctx->extradata_size = 0;
+ if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
+
+ unsigned char *buffer;
+ unsigned long decoder_specific_info_size;
+
+ if (!faacEncGetDecoderSpecificInfo(s->faac_handle, &buffer,
+ &decoder_specific_info_size)) {
+ avctx->extradata = buffer;
+ avctx->extradata_size = decoder_specific_info_size;
+ faac_cfg->outputFormat = 0;
+ }
+ }
+
+ if (!faacEncSetConfiguration(s->faac_handle, faac_cfg)) {
+ av_log(avctx, AV_LOG_ERROR, "libfaac doesn't support this output format!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int Faac_encode_frame(AVCodecContext *avctx,
+ unsigned char *frame, int buf_size, void *data)
+{
+ FaacAudioContext *s = avctx->priv_data;
+ int bytes_written;
+
+ bytes_written = faacEncEncode(s->faac_handle,
+ data,
+ avctx->frame_size * avctx->channels,
+ frame,
+ buf_size);
+
+ return bytes_written;
+}
+
+static int Faac_encode_close(AVCodecContext *avctx)
+{
+ FaacAudioContext *s = avctx->priv_data;
+
+ av_freep(&avctx->coded_frame);
+
+ //if (avctx->extradata_size) free(avctx->extradata);
+
+ faacEncClose(s->faac_handle);
+ return 0;
+}
+
+AVCodec faac_encoder = {
+ "aac",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_AAC,
+ sizeof(FaacAudioContext),
+ Faac_encode_init,
+ Faac_encode_frame,
+ Faac_encode_close
+};
diff --git a/contrib/ffmpeg/libavcodec/faad.c b/contrib/ffmpeg/libavcodec/faad.c
new file mode 100644
index 000000000..df33ea0b2
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/faad.c
@@ -0,0 +1,334 @@
+/*
+ * Faad decoder
+ * Copyright (c) 2003 Zdenek Kabelac.
+ * Copyright (c) 2004 Thomas Raivio.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file faad.c
+ * AAC decoder.
+ *
+ * still a bit unfinished - but it plays something
+ */
+
+#include "avcodec.h"
+#include "faad.h"
+
+#ifndef FAADAPI
+#define FAADAPI
+#endif
+
+/*
+ * when CONFIG_FAADBIN is defined the libfaad will be opened at runtime
+ */
+//#undef CONFIG_FAADBIN
+//#define CONFIG_FAADBIN
+
+#ifdef CONFIG_FAADBIN
+#include <dlfcn.h>
+static const char* libfaadname = "libfaad.so.0";
+#else
+#define dlopen(a)
+#define dlclose(a)
+#endif
+
+typedef struct {
+ void* handle; /* dlopen handle */
+ void* faac_handle; /* FAAD library handle */
+ int sample_size;
+ int init;
+
+ /* faad calls */
+ faacDecHandle FAADAPI (*faacDecOpen)(void);
+ faacDecConfigurationPtr FAADAPI (*faacDecGetCurrentConfiguration)(faacDecHandle hDecoder);
+#ifndef FAAD2_VERSION
+ int FAADAPI (*faacDecSetConfiguration)(faacDecHandle hDecoder,
+ faacDecConfigurationPtr config);
+ int FAADAPI (*faacDecInit)(faacDecHandle hDecoder,
+ unsigned char *buffer,
+ unsigned long *samplerate,
+ unsigned long *channels);
+ int FAADAPI (*faacDecInit2)(faacDecHandle hDecoder, unsigned char *pBuffer,
+ unsigned long SizeOfDecoderSpecificInfo,
+ unsigned long *samplerate, unsigned long *channels);
+ int FAADAPI (*faacDecDecode)(faacDecHandle hDecoder,
+ unsigned char *buffer,
+ unsigned long *bytesconsumed,
+ short *sample_buffer,
+ unsigned long *samples);
+#else
+ unsigned char FAADAPI (*faacDecSetConfiguration)(faacDecHandle hDecoder,
+ faacDecConfigurationPtr config);
+ long FAADAPI (*faacDecInit)(faacDecHandle hDecoder,
+ unsigned char *buffer,
+ unsigned long buffer_size,
+ unsigned long *samplerate,
+ unsigned char *channels);
+ char FAADAPI (*faacDecInit2)(faacDecHandle hDecoder, unsigned char *pBuffer,
+ unsigned long SizeOfDecoderSpecificInfo,
+ unsigned long *samplerate, unsigned char *channels);
+ void *FAADAPI (*faacDecDecode)(faacDecHandle hDecoder,
+ faacDecFrameInfo *hInfo,
+ unsigned char *buffer,
+ unsigned long buffer_size);
+ char* FAADAPI (*faacDecGetErrorMessage)(unsigned char errcode);
+#endif
+
+ void FAADAPI (*faacDecClose)(faacDecHandle hDecoder);
+
+
+} FAACContext;
+
+static const unsigned long faac_srates[] =
+{
+ 96000, 88200, 64000, 48000, 44100, 32000,
+ 24000, 22050, 16000, 12000, 11025, 8000
+};
+
+static int faac_init_mp4(AVCodecContext *avctx)
+{
+ FAACContext *s = (FAACContext *) avctx->priv_data;
+ unsigned long samplerate;
+#ifndef FAAD2_VERSION
+ unsigned long channels;
+#else
+ unsigned char channels;
+#endif
+ int r = 0;
+
+ if (avctx->extradata){
+ r = s->faacDecInit2(s->faac_handle, (uint8_t*) avctx->extradata,
+ avctx->extradata_size,
+ &samplerate, &channels);
+ if (r < 0){
+ av_log(avctx, AV_LOG_ERROR,
+ "faacDecInit2 failed r:%d sr:%ld ch:%ld s:%d\n",
+ r, samplerate, (long)channels, avctx->extradata_size);
+ } else {
+ avctx->sample_rate = samplerate;
+ avctx->channels = channels;
+ s->init = 1;
+ }
+ }
+
+ return r;
+}
+
+static int faac_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ FAACContext *s = (FAACContext *) avctx->priv_data;
+#ifndef FAAD2_VERSION
+ unsigned long bytesconsumed;
+ short *sample_buffer = NULL;
+ unsigned long samples;
+ int out;
+#else
+ faacDecFrameInfo frame_info;
+ void *out;
+#endif
+ if(buf_size == 0)
+ return 0;
+#ifndef FAAD2_VERSION
+ out = s->faacDecDecode(s->faac_handle,
+ (unsigned char*)buf,
+ &bytesconsumed,
+ data,
+ &samples);
+ samples *= s->sample_size;
+ if (data_size)
+ *data_size = samples;
+ return (buf_size < (int)bytesconsumed)
+ ? buf_size : (int)bytesconsumed;
+#else
+
+ if(!s->init){
+ unsigned long srate;
+ unsigned char channels;
+ int r = s->faacDecInit(s->faac_handle, buf, buf_size, &srate, &channels);
+ if(r < 0){
+ av_log(avctx, AV_LOG_ERROR, "faac: codec init failed: %s\n",
+ s->faacDecGetErrorMessage(frame_info.error));
+ return -1;
+ }
+ avctx->sample_rate = srate;
+ avctx->channels = channels;
+ s->init = 1;
+ }
+
+ out = s->faacDecDecode(s->faac_handle, &frame_info, (unsigned char*)buf, (unsigned long)buf_size);
+
+ if (frame_info.error > 0) {
+ av_log(avctx, AV_LOG_ERROR, "faac: frame decoding failed: %s\n",
+ s->faacDecGetErrorMessage(frame_info.error));
+ return -1;
+ }
+
+ frame_info.samples *= s->sample_size;
+ memcpy(data, out, frame_info.samples); // CHECKME - can we cheat this one
+
+ if (data_size)
+ *data_size = frame_info.samples;
+
+ return (buf_size < (int)frame_info.bytesconsumed)
+ ? buf_size : (int)frame_info.bytesconsumed;
+#endif
+}
+
+static int faac_decode_end(AVCodecContext *avctx)
+{
+ FAACContext *s = (FAACContext *) avctx->priv_data;
+
+ if (s->faacDecClose)
+ s->faacDecClose(s->faac_handle);
+
+ dlclose(s->handle);
+ return 0;
+}
+
+static int faac_decode_init(AVCodecContext *avctx)
+{
+ FAACContext *s = (FAACContext *) avctx->priv_data;
+ faacDecConfigurationPtr faac_cfg;
+
+#ifdef CONFIG_FAADBIN
+ const char* err = 0;
+
+ s->handle = dlopen(libfaadname, RTLD_LAZY);
+ if (!s->handle)
+ {
+ av_log(avctx, AV_LOG_ERROR, "FAAD library: %s could not be opened! \n%s\n",
+ libfaadname, dlerror());
+ return -1;
+ }
+#define dfaac(a, b) \
+ do { static const char* n = "faacDec" #a; \
+ if ((s->faacDec ## a = b dlsym( s->handle, n )) == NULL) { err = n; break; } } while(0)
+ for(;;) {
+#else /* !CONFIG_FAADBIN */
+#define dfaac(a, b) s->faacDec ## a = faacDec ## a
+#endif /* CONFIG_FAADBIN */
+
+ // resolve all needed function calls
+ dfaac(Open, (faacDecHandle FAADAPI (*)(void)));
+ dfaac(GetCurrentConfiguration, (faacDecConfigurationPtr
+ FAADAPI (*)(faacDecHandle)));
+#ifndef FAAD2_VERSION
+ dfaac(SetConfiguration, (int FAADAPI (*)(faacDecHandle,
+ faacDecConfigurationPtr)));
+
+ dfaac(Init, (int FAADAPI (*)(faacDecHandle, unsigned char*,
+ unsigned long*, unsigned long*)));
+ dfaac(Init2, (int FAADAPI (*)(faacDecHandle, unsigned char*,
+ unsigned long, unsigned long*,
+ unsigned long*)));
+ dfaac(Close, (void FAADAPI (*)(faacDecHandle hDecoder)));
+ dfaac(Decode, (int FAADAPI (*)(faacDecHandle, unsigned char*,
+ unsigned long*, short*, unsigned long*)));
+#else
+ dfaac(SetConfiguration, (unsigned char FAADAPI (*)(faacDecHandle,
+ faacDecConfigurationPtr)));
+ dfaac(Init, (long FAADAPI (*)(faacDecHandle, unsigned char*,
+ unsigned long, unsigned long*, unsigned char*)));
+ dfaac(Init2, (char FAADAPI (*)(faacDecHandle, unsigned char*,
+ unsigned long, unsigned long*,
+ unsigned char*)));
+ dfaac(Decode, (void *FAADAPI (*)(faacDecHandle, faacDecFrameInfo*,
+ unsigned char*, unsigned long)));
+ dfaac(GetErrorMessage, (char* FAADAPI (*)(unsigned char)));
+#endif
+#undef dfacc
+
+#ifdef CONFIG_FAADBIN
+ break;
+ }
+ if (err) {
+ dlclose(s->handle);
+ av_log(avctx, AV_LOG_ERROR, "FAAD library: cannot resolve %s in %s!\n",
+ err, libfaadname);
+ return -1;
+ }
+#endif
+
+ s->faac_handle = s->faacDecOpen();
+ if (!s->faac_handle) {
+ av_log(avctx, AV_LOG_ERROR, "FAAD library: cannot create handler!\n");
+ faac_decode_end(avctx);
+ return -1;
+ }
+
+
+ faac_cfg = s->faacDecGetCurrentConfiguration(s->faac_handle);
+
+ if (faac_cfg) {
+ switch (avctx->bits_per_sample) {
+ case 8: av_log(avctx, AV_LOG_ERROR, "FAADlib unsupported bps %d\n", avctx->bits_per_sample); break;
+ default:
+ case 16:
+#ifdef FAAD2_VERSION
+ faac_cfg->outputFormat = FAAD_FMT_16BIT;
+#endif
+ s->sample_size = 2;
+ break;
+ case 24:
+#ifdef FAAD2_VERSION
+ faac_cfg->outputFormat = FAAD_FMT_24BIT;
+#endif
+ s->sample_size = 3;
+ break;
+ case 32:
+#ifdef FAAD2_VERSION
+ faac_cfg->outputFormat = FAAD_FMT_32BIT;
+#endif
+ s->sample_size = 4;
+ break;
+ }
+
+ faac_cfg->defSampleRate = (!avctx->sample_rate) ? 44100 : avctx->sample_rate;
+ faac_cfg->defObjectType = LC;
+ }
+
+ s->faacDecSetConfiguration(s->faac_handle, faac_cfg);
+
+ faac_init_mp4(avctx);
+
+ return 0;
+}
+
+#define AAC_CODEC(id, name) \
+AVCodec name ## _decoder = { \
+ #name, \
+ CODEC_TYPE_AUDIO, \
+ id, \
+ sizeof(FAACContext), \
+ faac_decode_init, \
+ NULL, \
+ faac_decode_end, \
+ faac_decode_frame, \
+}
+
+// FIXME - raw AAC files - maybe just one entry will be enough
+AAC_CODEC(CODEC_ID_AAC, aac);
+#if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
+// If it's mp4 file - usually embeded into Qt Mov
+AAC_CODEC(CODEC_ID_MPEG4AAC, mpeg4aac);
+#endif
+
+#undef AAC_CODEC
diff --git a/src/libffmpeg/libavcodec/faandct.c b/contrib/ffmpeg/libavcodec/faandct.c
index cd7ef7c6b..e3c0d84a2 100644
--- a/src/libffmpeg/libavcodec/faandct.c
+++ b/contrib/ffmpeg/libavcodec/faandct.c
@@ -2,18 +2,20 @@
* Floating point AAN DCT
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* this implementation is based upon the IJG integer AAN DCT (see jfdctfst.c)
diff --git a/src/libffmpeg/libavcodec/faandct.h b/contrib/ffmpeg/libavcodec/faandct.h
index 677594c04..77dd41dae 100644
--- a/src/libffmpeg/libavcodec/faandct.h
+++ b/contrib/ffmpeg/libavcodec/faandct.h
@@ -2,18 +2,20 @@
* Floating point AAN DCT
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/fdctref.c b/contrib/ffmpeg/libavcodec/fdctref.c
index 5eff36849..5eff36849 100644
--- a/src/libffmpeg/libavcodec/fdctref.c
+++ b/contrib/ffmpeg/libavcodec/fdctref.c
diff --git a/contrib/ffmpeg/libavcodec/fft-test.c b/contrib/ffmpeg/libavcodec/fft-test.c
new file mode 100644
index 000000000..e108a6f7b
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/fft-test.c
@@ -0,0 +1,297 @@
+/*
+ * (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file fft-test.c
+ * FFT and MDCT tests.
+ */
+
+#include "dsputil.h"
+#include <math.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+int mm_flags;
+
+/* reference fft */
+
+#define MUL16(a,b) ((a) * (b))
+
+#define CMAC(pre, pim, are, aim, bre, bim) \
+{\
+ pre += (MUL16(are, bre) - MUL16(aim, bim));\
+ pim += (MUL16(are, bim) + MUL16(bre, aim));\
+}
+
+FFTComplex *exptab;
+
+void fft_ref_init(int nbits, int inverse)
+{
+ int n, i;
+ float c1, s1, alpha;
+
+ n = 1 << nbits;
+ exptab = av_malloc((n / 2) * sizeof(FFTComplex));
+
+ for(i=0;i<(n/2);i++) {
+ alpha = 2 * M_PI * (float)i / (float)n;
+ c1 = cos(alpha);
+ s1 = sin(alpha);
+ if (!inverse)
+ s1 = -s1;
+ exptab[i].re = c1;
+ exptab[i].im = s1;
+ }
+}
+
+void fft_ref(FFTComplex *tabr, FFTComplex *tab, int nbits)
+{
+ int n, i, j, k, n2;
+ float tmp_re, tmp_im, s, c;
+ FFTComplex *q;
+
+ n = 1 << nbits;
+ n2 = n >> 1;
+ for(i=0;i<n;i++) {
+ tmp_re = 0;
+ tmp_im = 0;
+ q = tab;
+ for(j=0;j<n;j++) {
+ k = (i * j) & (n - 1);
+ if (k >= n2) {
+ c = -exptab[k - n2].re;
+ s = -exptab[k - n2].im;
+ } else {
+ c = exptab[k].re;
+ s = exptab[k].im;
+ }
+ CMAC(tmp_re, tmp_im, c, s, q->re, q->im);
+ q++;
+ }
+ tabr[i].re = tmp_re;
+ tabr[i].im = tmp_im;
+ }
+}
+
+void imdct_ref(float *out, float *in, int n)
+{
+ int k, i, a;
+ float sum, f;
+
+ for(i=0;i<n;i++) {
+ sum = 0;
+ for(k=0;k<n/2;k++) {
+ a = (2 * i + 1 + (n / 2)) * (2 * k + 1);
+ f = cos(M_PI * a / (double)(2 * n));
+ sum += f * in[k];
+ }
+ out[i] = -sum;
+ }
+}
+
+/* NOTE: no normalisation by 1 / N is done */
+void mdct_ref(float *output, float *input, int n)
+{
+ int k, i;
+ float a, s;
+
+ /* do it by hand */
+ for(k=0;k<n/2;k++) {
+ s = 0;
+ for(i=0;i<n;i++) {
+ a = (2*M_PI*(2*i+1+n/2)*(2*k+1) / (4 * n));
+ s += input[i] * cos(a);
+ }
+ output[k] = s;
+ }
+}
+
+
+float frandom(void)
+{
+ return (float)((random() & 0xffff) - 32768) / 32768.0;
+}
+
+int64_t gettime(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv,NULL);
+ return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+void check_diff(float *tab1, float *tab2, int n)
+{
+ int i;
+
+ for(i=0;i<n;i++) {
+ if (fabsf(tab1[i] - tab2[i]) >= 1e-3) {
+ av_log(NULL, AV_LOG_ERROR, "ERROR %d: %f %f\n",
+ i, tab1[i], tab2[i]);
+ }
+ }
+}
+
+
+void help(void)
+{
+ av_log(NULL, AV_LOG_INFO,"usage: fft-test [-h] [-s] [-i] [-n b]\n"
+ "-h print this help\n"
+ "-s speed test\n"
+ "-m (I)MDCT test\n"
+ "-i inverse transform test\n"
+ "-n b set the transform size to 2^b\n"
+ );
+ exit(1);
+}
+
+
+
+int main(int argc, char **argv)
+{
+ FFTComplex *tab, *tab1, *tab_ref;
+ FFTSample *tabtmp, *tab2;
+ int it, i, c;
+ int do_speed = 0;
+ int do_mdct = 0;
+ int do_inverse = 0;
+ FFTContext s1, *s = &s1;
+ MDCTContext m1, *m = &m1;
+ int fft_nbits, fft_size;
+
+ mm_flags = 0;
+ fft_nbits = 9;
+ for(;;) {
+ c = getopt(argc, argv, "hsimn:");
+ if (c == -1)
+ break;
+ switch(c) {
+ case 'h':
+ help();
+ break;
+ case 's':
+ do_speed = 1;
+ break;
+ case 'i':
+ do_inverse = 1;
+ break;
+ case 'm':
+ do_mdct = 1;
+ break;
+ case 'n':
+ fft_nbits = atoi(optarg);
+ break;
+ }
+ }
+
+ fft_size = 1 << fft_nbits;
+ tab = av_malloc(fft_size * sizeof(FFTComplex));
+ tab1 = av_malloc(fft_size * sizeof(FFTComplex));
+ tab_ref = av_malloc(fft_size * sizeof(FFTComplex));
+ tabtmp = av_malloc(fft_size / 2 * sizeof(FFTSample));
+ tab2 = av_malloc(fft_size * sizeof(FFTSample));
+
+ if (do_mdct) {
+ if (do_inverse)
+ av_log(NULL, AV_LOG_INFO,"IMDCT");
+ else
+ av_log(NULL, AV_LOG_INFO,"MDCT");
+ ff_mdct_init(m, fft_nbits, do_inverse);
+ } else {
+ if (do_inverse)
+ av_log(NULL, AV_LOG_INFO,"IFFT");
+ else
+ av_log(NULL, AV_LOG_INFO,"FFT");
+ ff_fft_init(s, fft_nbits, do_inverse);
+ fft_ref_init(fft_nbits, do_inverse);
+ }
+ av_log(NULL, AV_LOG_INFO," %d test\n", fft_size);
+
+ /* generate random data */
+
+ for(i=0;i<fft_size;i++) {
+ tab1[i].re = frandom();
+ tab1[i].im = frandom();
+ }
+
+ /* checking result */
+ av_log(NULL, AV_LOG_INFO,"Checking...\n");
+
+ if (do_mdct) {
+ if (do_inverse) {
+ imdct_ref((float *)tab_ref, (float *)tab1, fft_size);
+ ff_imdct_calc(m, tab2, (float *)tab1, tabtmp);
+ check_diff((float *)tab_ref, tab2, fft_size);
+ } else {
+ mdct_ref((float *)tab_ref, (float *)tab1, fft_size);
+
+ ff_mdct_calc(m, tab2, (float *)tab1, tabtmp);
+
+ check_diff((float *)tab_ref, tab2, fft_size / 2);
+ }
+ } else {
+ memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
+ ff_fft_permute(s, tab);
+ ff_fft_calc(s, tab);
+
+ fft_ref(tab_ref, tab1, fft_nbits);
+ check_diff((float *)tab_ref, (float *)tab, fft_size * 2);
+ }
+
+ /* do a speed test */
+
+ if (do_speed) {
+ int64_t time_start, duration;
+ int nb_its;
+
+ av_log(NULL, AV_LOG_INFO,"Speed test...\n");
+ /* we measure during about 1 seconds */
+ nb_its = 1;
+ for(;;) {
+ time_start = gettime();
+ for(it=0;it<nb_its;it++) {
+ if (do_mdct) {
+ if (do_inverse) {
+ ff_imdct_calc(m, (float *)tab, (float *)tab1, tabtmp);
+ } else {
+ ff_mdct_calc(m, (float *)tab, (float *)tab1, tabtmp);
+ }
+ } else {
+ memcpy(tab, tab1, fft_size * sizeof(FFTComplex));
+ ff_fft_calc(s, tab);
+ }
+ }
+ duration = gettime() - time_start;
+ if (duration >= 1000000)
+ break;
+ nb_its *= 2;
+ }
+ av_log(NULL, AV_LOG_INFO,"time: %0.1f us/transform [total time=%0.2f s its=%d]\n",
+ (double)duration / nb_its,
+ (double)duration / 1000000.0,
+ nb_its);
+ }
+
+ if (do_mdct) {
+ ff_mdct_end(m);
+ } else {
+ ff_fft_end(s);
+ }
+ return 0;
+}
diff --git a/src/libffmpeg/libavcodec/fft.c b/contrib/ffmpeg/libavcodec/fft.c
index 1c63f6889..62a6a5576 100644
--- a/src/libffmpeg/libavcodec/fft.c
+++ b/contrib/ffmpeg/libavcodec/fft.c
@@ -2,18 +2,20 @@
* FFT/IFFT transforms
* Copyright (c) 2002 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -54,24 +56,35 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
s->exptab[i].im = s1;
}
s->fft_calc = ff_fft_calc_c;
+ s->imdct_calc = ff_imdct_calc;
s->exptab1 = NULL;
/* compute constant table for HAVE_SSE version */
-#if (defined(HAVE_MMX) && (defined(HAVE_BUILTIN_VECTOR) || defined(HAVE_MM3DNOW))) || defined(HAVE_ALTIVEC)
+#if defined(HAVE_MMX) \
+ || (defined(HAVE_ALTIVEC) && !defined(ALTIVEC_USE_REFERENCE_C_CODE))
{
- int has_vectors = 0;
+ int has_vectors = mm_support();
+ if (has_vectors) {
#if defined(HAVE_MMX)
-#ifdef HAVE_MM3DNOW
- has_vectors = mm_support() & (MM_3DNOW | MM_3DNOWEXT | MM_SSE | MM_SSE2);
-#else
- has_vectors = mm_support() & (MM_SSE | MM_SSE2);
-#endif
-#endif
-#if defined(HAVE_ALTIVEC) && !defined(ALTIVEC_USE_REFERENCE_C_CODE)
- has_vectors = mm_support() & MM_ALTIVEC;
+ if (has_vectors & MM_3DNOWEXT) {
+ /* 3DNowEx for K7/K8 */
+ s->imdct_calc = ff_imdct_calc_3dn2;
+ s->fft_calc = ff_fft_calc_3dn2;
+ } else if (has_vectors & MM_3DNOW) {
+ /* 3DNow! for K6-2/3 */
+ s->fft_calc = ff_fft_calc_3dn;
+ } else if (has_vectors & MM_SSE) {
+ /* SSE for P3/P4 */
+ s->imdct_calc = ff_imdct_calc_sse;
+ s->fft_calc = ff_fft_calc_sse;
+ }
+#else /* HAVE_MMX */
+ if (has_vectors & MM_ALTIVEC)
+ s->fft_calc = ff_fft_calc_altivec;
#endif
- if (has_vectors) {
+ }
+ if (s->fft_calc != ff_fft_calc_c) {
int np, nblocks, np2, l;
FFTComplex *q;
@@ -97,27 +110,6 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse)
nblocks = nblocks >> 1;
} while (nblocks != 0);
av_freep(&s->exptab);
-#if defined(HAVE_MMX)
-#ifdef HAVE_MM3DNOW
- if (has_vectors & MM_3DNOWEXT)
- /* 3DNowEx for Athlon(XP) */
- s->fft_calc = ff_fft_calc_3dn2;
- else if (has_vectors & MM_3DNOW)
- /* 3DNow! for K6-2/3 */
- s->fft_calc = ff_fft_calc_3dn;
-#endif
-#ifdef HAVE_BUILTIN_VECTOR
- if (has_vectors & MM_SSE2)
- /* SSE for P4/K8 */
- s->fft_calc = ff_fft_calc_sse;
- else if ((has_vectors & MM_SSE) &&
- s->fft_calc == ff_fft_calc_c)
- /* SSE for P3 */
- s->fft_calc = ff_fft_calc_sse;
-#endif
-#else /* HAVE_MMX */
- s->fft_calc = ff_fft_calc_altivec;
-#endif
}
}
#endif
diff --git a/src/libffmpeg/libavcodec/ffv1.c b/contrib/ffmpeg/libavcodec/ffv1.c
index c987d84f6..62623e591 100644
--- a/src/libffmpeg/libavcodec/ffv1.c
+++ b/contrib/ffmpeg/libavcodec/ffv1.c
@@ -3,18 +3,20 @@
*
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -223,7 +225,7 @@ static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signe
int i;
if(v){
- const int a= ABS(v);
+ const int a= FFABS(v);
const int e= av_log2(a);
put_rac(c, state+0, 0);
@@ -271,7 +273,7 @@ static inline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
static inline void update_vlc_state(VlcState * const state, const int v){
int drift= state->drift;
int count= state->count;
- state->error_sum += ABS(v);
+ state->error_sum += FFABS(v);
drift += v;
if(count == 128){ //FIXME variable
@@ -354,6 +356,7 @@ static inline int get_vlc_symbol(GetBitContext *gb, VlcState * const state, int
return ret;
}
+#ifdef CONFIG_ENCODERS
static inline int encode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){
PlaneContext * const p= &s->plane[plane_index];
RangeCoder * const c= &s->c;
@@ -527,6 +530,7 @@ static void write_header(FFV1Context *f){
for(i=0; i<5; i++)
write_quant_table(c, f->quant_table[i]);
}
+#endif /* CONFIG_ENCODERS */
static int common_init(AVCodecContext *avctx){
FFV1Context *s = avctx->priv_data;
@@ -545,6 +549,7 @@ static int common_init(AVCodecContext *avctx){
return 0;
}
+#ifdef CONFIG_ENCODERS
static int encode_init(AVCodecContext *avctx)
{
FFV1Context *s = avctx->priv_data;
@@ -608,6 +613,7 @@ static int encode_init(AVCodecContext *avctx)
return 0;
}
+#endif /* CONFIG_ENCODERS */
static void clear_state(FFV1Context *f){
@@ -632,6 +638,7 @@ static void clear_state(FFV1Context *f){
}
}
+#ifdef CONFIG_ENCODERS
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
FFV1Context *f = avctx->priv_data;
RangeCoder * const c= &f->c;
@@ -687,6 +694,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
return used_count + (put_bits_count(&f->pb)+7)/8;
}
}
+#endif /* CONFIG_ENCODERS */
static int common_end(AVCodecContext *avctx){
FFV1Context *s = avctx->priv_data;
@@ -1027,5 +1035,6 @@ AVCodec ffv1_encoder = {
encode_init,
encode_frame,
common_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGBA32, -1},
};
#endif
diff --git a/src/libffmpeg/libavcodec/flac.c b/contrib/ffmpeg/libavcodec/flac.c
index 659112c77..6c64ad0a1 100644
--- a/src/libffmpeg/libavcodec/flac.c
+++ b/contrib/ffmpeg/libavcodec/flac.c
@@ -2,18 +2,20 @@
* FLAC (Free Lossless Audio Codec) decoder
* Copyright (c) 2003 Alex Beregszaszi
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -33,6 +35,7 @@
#include <limits.h>
+#define ALT_BITSTREAM_READER
#include "avcodec.h"
#include "bitstream.h"
#include "golomb.h"
@@ -92,18 +95,23 @@ static int64_t get_utf8(GetBitContext *gb){
}
static void metadata_streaminfo(FLACContext *s);
-static void dump_headers(FLACContext *s);
+static void allocate_buffers(FLACContext *s);
+static int metadata_parse(FLACContext *s);
static int flac_decode_init(AVCodecContext * avctx)
{
FLACContext *s = avctx->priv_data;
s->avctx = avctx;
- /* initialize based on the demuxer-supplied streamdata header */
- if (avctx->extradata_size == FLAC_STREAMINFO_SIZE) {
+ if (avctx->extradata_size > 4) {
+ /* initialize based on the demuxer-supplied streamdata header */
init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size*8);
- metadata_streaminfo(s);
- dump_headers(s);
+ if (avctx->extradata_size == FLAC_STREAMINFO_SIZE) {
+ metadata_streaminfo(s);
+ allocate_buffers(s);
+ } else {
+ metadata_parse(s);
+ }
}
return 0;
@@ -156,7 +164,51 @@ static void metadata_streaminfo(FLACContext *s)
skip_bits(&s->gb, 64); /* md5 sum */
skip_bits(&s->gb, 64); /* md5 sum */
- allocate_buffers(s);
+ dump_headers(s);
+}
+
+/**
+ * Parse a list of metadata blocks. This list of blocks must begin with
+ * the fLaC marker.
+ * @param s the flac decoding context containing the gb bit reader used to
+ * parse metadata
+ * @return 1 if some metadata was read, 0 if no fLaC marker was found
+ */
+static int metadata_parse(FLACContext *s)
+{
+ int i, metadata_last, metadata_type, metadata_size, streaminfo_updated=0;
+
+ if (show_bits_long(&s->gb, 32) == MKBETAG('f','L','a','C')) {
+ skip_bits(&s->gb, 32);
+
+ av_log(s->avctx, AV_LOG_DEBUG, "STREAM HEADER\n");
+ do {
+ metadata_last = get_bits(&s->gb, 1);
+ metadata_type = get_bits(&s->gb, 7);
+ metadata_size = get_bits_long(&s->gb, 24);
+
+ av_log(s->avctx, AV_LOG_DEBUG,
+ " metadata block: flag = %d, type = %d, size = %d\n",
+ metadata_last, metadata_type, metadata_size);
+ if (metadata_size) {
+ switch (metadata_type) {
+ case METADATA_TYPE_STREAMINFO:
+ metadata_streaminfo(s);
+ streaminfo_updated = 1;
+ break;
+
+ default:
+ for (i=0; i<metadata_size; i++)
+ skip_bits(&s->gb, 8);
+ }
+ }
+ } while (!metadata_last);
+
+ if (streaminfo_updated)
+ allocate_buffers(s);
+ return 1;
+ }
+ return 0;
}
static int decode_residuals(FLACContext *s, int channel, int pred_order)
@@ -525,7 +577,6 @@ static int flac_decode_frame(AVCodecContext *avctx,
uint8_t *buf, int buf_size)
{
FLACContext *s = avctx->priv_data;
- int metadata_last, metadata_type, metadata_size;
int tmp = 0, i, j = 0, input_buf_size = 0;
int16_t *samples = data;
@@ -556,47 +607,8 @@ static int flac_decode_frame(AVCodecContext *avctx,
init_get_bits(&s->gb, buf, buf_size*8);
- /* fLaC signature (be) */
- if (show_bits_long(&s->gb, 32) == bswap_32(ff_get_fourcc("fLaC")))
+ if (!metadata_parse(s))
{
- skip_bits(&s->gb, 32);
-
- av_log(s->avctx, AV_LOG_DEBUG, "STREAM HEADER\n");
- do {
- metadata_last = get_bits(&s->gb, 1);
- metadata_type = get_bits(&s->gb, 7);
- metadata_size = get_bits_long(&s->gb, 24);
-
- av_log(s->avctx, AV_LOG_DEBUG, " metadata block: flag = %d, type = %d, size = %d\n",
- metadata_last, metadata_type,
- metadata_size);
- if(metadata_size){
- switch(metadata_type)
- {
- case METADATA_TYPE_STREAMINFO:{
- metadata_streaminfo(s);
-
- /* Buffer might have been reallocated, reinit bitreader */
- if(buf != &s->bitstream[s->bitstream_index])
- {
- int bits_count = get_bits_count(&s->gb);
- buf= &s->bitstream[s->bitstream_index];
- init_get_bits(&s->gb, buf, buf_size*8);
- skip_bits(&s->gb, bits_count);
- }
-
- dump_headers(s);
- break;}
- default:
- for(i=0; i<metadata_size; i++)
- skip_bits(&s->gb, 8);
- }
- }
- } while(!metadata_last);
- }
- else
- {
-
tmp = show_bits(&s->gb, 16);
if(tmp != 0xFFF8){
av_log(s->avctx, AV_LOG_ERROR, "FRAME HEADER not here\n");
diff --git a/contrib/ffmpeg/libavcodec/flacenc.c b/contrib/ffmpeg/libavcodec/flacenc.c
new file mode 100644
index 000000000..b7b7d0d8e
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/flacenc.c
@@ -0,0 +1,1371 @@
+/**
+ * FLAC audio encoder
+ * Copyright (c) 2006 Justin Ruggles <jruggle@earthlink.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "bitstream.h"
+#include "crc.h"
+#include "golomb.h"
+#include "lls.h"
+
+#define FLAC_MAX_CH 8
+#define FLAC_MIN_BLOCKSIZE 16
+#define FLAC_MAX_BLOCKSIZE 65535
+
+#define FLAC_SUBFRAME_CONSTANT 0
+#define FLAC_SUBFRAME_VERBATIM 1
+#define FLAC_SUBFRAME_FIXED 8
+#define FLAC_SUBFRAME_LPC 32
+
+#define FLAC_CHMODE_NOT_STEREO 0
+#define FLAC_CHMODE_LEFT_RIGHT 1
+#define FLAC_CHMODE_LEFT_SIDE 8
+#define FLAC_CHMODE_RIGHT_SIDE 9
+#define FLAC_CHMODE_MID_SIDE 10
+
+#define ORDER_METHOD_EST 0
+#define ORDER_METHOD_2LEVEL 1
+#define ORDER_METHOD_4LEVEL 2
+#define ORDER_METHOD_8LEVEL 3
+#define ORDER_METHOD_SEARCH 4
+#define ORDER_METHOD_LOG 5
+
+#define FLAC_STREAMINFO_SIZE 34
+
+#define MIN_LPC_ORDER 1
+#define MAX_LPC_ORDER 32
+#define MAX_FIXED_ORDER 4
+#define MAX_PARTITION_ORDER 8
+#define MAX_PARTITIONS (1 << MAX_PARTITION_ORDER)
+#define MAX_LPC_PRECISION 15
+#define MAX_LPC_SHIFT 15
+#define MAX_RICE_PARAM 14
+
+typedef struct CompressionOptions {
+ int compression_level;
+ int block_time_ms;
+ int use_lpc;
+ int lpc_coeff_precision;
+ int min_prediction_order;
+ int max_prediction_order;
+ int prediction_order_method;
+ int min_partition_order;
+ int max_partition_order;
+} CompressionOptions;
+
+typedef struct RiceContext {
+ int porder;
+ int params[MAX_PARTITIONS];
+} RiceContext;
+
+typedef struct FlacSubframe {
+ int type;
+ int type_code;
+ int obits;
+ int order;
+ int32_t coefs[MAX_LPC_ORDER];
+ int shift;
+ RiceContext rc;
+ int32_t samples[FLAC_MAX_BLOCKSIZE];
+ int32_t residual[FLAC_MAX_BLOCKSIZE];
+} FlacSubframe;
+
+typedef struct FlacFrame {
+ FlacSubframe subframes[FLAC_MAX_CH];
+ int blocksize;
+ int bs_code[2];
+ uint8_t crc8;
+ int ch_mode;
+} FlacFrame;
+
+typedef struct FlacEncodeContext {
+ PutBitContext pb;
+ int channels;
+ int ch_code;
+ int samplerate;
+ int sr_code[2];
+ int blocksize;
+ int max_framesize;
+ uint32_t frame_count;
+ FlacFrame frame;
+ CompressionOptions options;
+ AVCodecContext *avctx;
+} FlacEncodeContext;
+
+static const int flac_samplerates[16] = {
+ 0, 0, 0, 0,
+ 8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000,
+ 0, 0, 0, 0
+};
+
+static const int flac_blocksizes[16] = {
+ 0,
+ 192,
+ 576, 1152, 2304, 4608,
+ 0, 0,
+ 256, 512, 1024, 2048, 4096, 8192, 16384, 32768
+};
+
+/**
+ * Writes streaminfo metadata block to byte array
+ */
+static void write_streaminfo(FlacEncodeContext *s, uint8_t *header)
+{
+ PutBitContext pb;
+
+ memset(header, 0, FLAC_STREAMINFO_SIZE);
+ init_put_bits(&pb, header, FLAC_STREAMINFO_SIZE);
+
+ /* streaminfo metadata block */
+ put_bits(&pb, 16, s->blocksize);
+ put_bits(&pb, 16, s->blocksize);
+ put_bits(&pb, 24, 0);
+ put_bits(&pb, 24, s->max_framesize);
+ put_bits(&pb, 20, s->samplerate);
+ put_bits(&pb, 3, s->channels-1);
+ put_bits(&pb, 5, 15); /* bits per sample - 1 */
+ flush_put_bits(&pb);
+ /* total samples = 0 */
+ /* MD5 signature = 0 */
+}
+
+/**
+ * Sets blocksize based on samplerate
+ * Chooses the closest predefined blocksize >= BLOCK_TIME_MS milliseconds
+ */
+static int select_blocksize(int samplerate, int block_time_ms)
+{
+ int i;
+ int target;
+ int blocksize;
+
+ assert(samplerate > 0);
+ blocksize = flac_blocksizes[1];
+ target = (samplerate * block_time_ms) / 1000;
+ for(i=0; i<16; i++) {
+ if(target >= flac_blocksizes[i] && flac_blocksizes[i] > blocksize) {
+ blocksize = flac_blocksizes[i];
+ }
+ }
+ return blocksize;
+}
+
+static int flac_encode_init(AVCodecContext *avctx)
+{
+ int freq = avctx->sample_rate;
+ int channels = avctx->channels;
+ FlacEncodeContext *s = avctx->priv_data;
+ int i, level;
+ uint8_t *streaminfo;
+
+ s->avctx = avctx;
+
+ if(avctx->sample_fmt != SAMPLE_FMT_S16) {
+ return -1;
+ }
+
+ if(channels < 1 || channels > FLAC_MAX_CH) {
+ return -1;
+ }
+ s->channels = channels;
+ s->ch_code = s->channels-1;
+
+ /* find samplerate in table */
+ if(freq < 1)
+ return -1;
+ for(i=4; i<12; i++) {
+ if(freq == flac_samplerates[i]) {
+ s->samplerate = flac_samplerates[i];
+ s->sr_code[0] = i;
+ s->sr_code[1] = 0;
+ break;
+ }
+ }
+ /* if not in table, samplerate is non-standard */
+ if(i == 12) {
+ if(freq % 1000 == 0 && freq < 255000) {
+ s->sr_code[0] = 12;
+ s->sr_code[1] = freq / 1000;
+ } else if(freq % 10 == 0 && freq < 655350) {
+ s->sr_code[0] = 14;
+ s->sr_code[1] = freq / 10;
+ } else if(freq < 65535) {
+ s->sr_code[0] = 13;
+ s->sr_code[1] = freq;
+ } else {
+ return -1;
+ }
+ s->samplerate = freq;
+ }
+
+ /* set compression option defaults based on avctx->compression_level */
+ if(avctx->compression_level < 0) {
+ s->options.compression_level = 5;
+ } else {
+ s->options.compression_level = avctx->compression_level;
+ }
+ av_log(avctx, AV_LOG_DEBUG, " compression: %d\n", s->options.compression_level);
+
+ level= s->options.compression_level;
+ if(level > 12) {
+ av_log(avctx, AV_LOG_ERROR, "invalid compression level: %d\n",
+ s->options.compression_level);
+ return -1;
+ }
+
+ s->options.block_time_ms = ((int[]){ 27, 27, 27,105,105,105,105,105,105,105,105,105,105})[level];
+ s->options.use_lpc = ((int[]){ 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})[level];
+ s->options.min_prediction_order= ((int[]){ 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})[level];
+ s->options.max_prediction_order= ((int[]){ 3, 4, 4, 6, 8, 8, 8, 8, 12, 12, 12, 32, 32})[level];
+ s->options.prediction_order_method = ((int[]){ ORDER_METHOD_EST, ORDER_METHOD_EST, ORDER_METHOD_EST,
+ ORDER_METHOD_EST, ORDER_METHOD_EST, ORDER_METHOD_EST,
+ ORDER_METHOD_4LEVEL, ORDER_METHOD_LOG, ORDER_METHOD_4LEVEL,
+ ORDER_METHOD_LOG, ORDER_METHOD_SEARCH, ORDER_METHOD_LOG,
+ ORDER_METHOD_SEARCH})[level];
+ s->options.min_partition_order = ((int[]){ 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})[level];
+ s->options.max_partition_order = ((int[]){ 2, 2, 3, 3, 3, 8, 8, 8, 8, 8, 8, 8, 8})[level];
+
+ /* set compression option overrides from AVCodecContext */
+ if(avctx->use_lpc >= 0) {
+ s->options.use_lpc = clip(avctx->use_lpc, 0, 11);
+ }
+ if(s->options.use_lpc == 1)
+ av_log(avctx, AV_LOG_DEBUG, " use lpc: Levinson-Durbin recursion with Welch window\n");
+ else if(s->options.use_lpc > 1)
+ av_log(avctx, AV_LOG_DEBUG, " use lpc: Cholesky factorization\n");
+
+ if(avctx->min_prediction_order >= 0) {
+ if(s->options.use_lpc) {
+ if(avctx->min_prediction_order < MIN_LPC_ORDER ||
+ avctx->min_prediction_order > MAX_LPC_ORDER) {
+ av_log(avctx, AV_LOG_ERROR, "invalid min prediction order: %d\n",
+ avctx->min_prediction_order);
+ return -1;
+ }
+ } else {
+ if(avctx->min_prediction_order > MAX_FIXED_ORDER) {
+ av_log(avctx, AV_LOG_ERROR, "invalid min prediction order: %d\n",
+ avctx->min_prediction_order);
+ return -1;
+ }
+ }
+ s->options.min_prediction_order = avctx->min_prediction_order;
+ }
+ if(avctx->max_prediction_order >= 0) {
+ if(s->options.use_lpc) {
+ if(avctx->max_prediction_order < MIN_LPC_ORDER ||
+ avctx->max_prediction_order > MAX_LPC_ORDER) {
+ av_log(avctx, AV_LOG_ERROR, "invalid max prediction order: %d\n",
+ avctx->max_prediction_order);
+ return -1;
+ }
+ } else {
+ if(avctx->max_prediction_order > MAX_FIXED_ORDER) {
+ av_log(avctx, AV_LOG_ERROR, "invalid max prediction order: %d\n",
+ avctx->max_prediction_order);
+ return -1;
+ }
+ }
+ s->options.max_prediction_order = avctx->max_prediction_order;
+ }
+ if(s->options.max_prediction_order < s->options.min_prediction_order) {
+ av_log(avctx, AV_LOG_ERROR, "invalid prediction orders: min=%d max=%d\n",
+ s->options.min_prediction_order, s->options.max_prediction_order);
+ return -1;
+ }
+ av_log(avctx, AV_LOG_DEBUG, " prediction order: %d, %d\n",
+ s->options.min_prediction_order, s->options.max_prediction_order);
+
+ if(avctx->prediction_order_method >= 0) {
+ if(avctx->prediction_order_method > ORDER_METHOD_LOG) {
+ av_log(avctx, AV_LOG_ERROR, "invalid prediction order method: %d\n",
+ avctx->prediction_order_method);
+ return -1;
+ }
+ s->options.prediction_order_method = avctx->prediction_order_method;
+ }
+ switch(s->options.prediction_order_method) {
+ case ORDER_METHOD_EST: av_log(avctx, AV_LOG_DEBUG, " order method: %s\n",
+ "estimate"); break;
+ case ORDER_METHOD_2LEVEL: av_log(avctx, AV_LOG_DEBUG, " order method: %s\n",
+ "2-level"); break;
+ case ORDER_METHOD_4LEVEL: av_log(avctx, AV_LOG_DEBUG, " order method: %s\n",
+ "4-level"); break;
+ case ORDER_METHOD_8LEVEL: av_log(avctx, AV_LOG_DEBUG, " order method: %s\n",
+ "8-level"); break;
+ case ORDER_METHOD_SEARCH: av_log(avctx, AV_LOG_DEBUG, " order method: %s\n",
+ "full search"); break;
+ case ORDER_METHOD_LOG: av_log(avctx, AV_LOG_DEBUG, " order method: %s\n",
+ "log search"); break;
+ }
+
+ if(avctx->min_partition_order >= 0) {
+ if(avctx->min_partition_order > MAX_PARTITION_ORDER) {
+ av_log(avctx, AV_LOG_ERROR, "invalid min partition order: %d\n",
+ avctx->min_partition_order);
+ return -1;
+ }
+ s->options.min_partition_order = avctx->min_partition_order;
+ }
+ if(avctx->max_partition_order >= 0) {
+ if(avctx->max_partition_order > MAX_PARTITION_ORDER) {
+ av_log(avctx, AV_LOG_ERROR, "invalid max partition order: %d\n",
+ avctx->max_partition_order);
+ return -1;
+ }
+ s->options.max_partition_order = avctx->max_partition_order;
+ }
+ if(s->options.max_partition_order < s->options.min_partition_order) {
+ av_log(avctx, AV_LOG_ERROR, "invalid partition orders: min=%d max=%d\n",
+ s->options.min_partition_order, s->options.max_partition_order);
+ return -1;
+ }
+ av_log(avctx, AV_LOG_DEBUG, " partition order: %d, %d\n",
+ s->options.min_partition_order, s->options.max_partition_order);
+
+ if(avctx->frame_size > 0) {
+ if(avctx->frame_size < FLAC_MIN_BLOCKSIZE ||
+ avctx->frame_size > FLAC_MAX_BLOCKSIZE) {
+ av_log(avctx, AV_LOG_ERROR, "invalid block size: %d\n",
+ avctx->frame_size);
+ return -1;
+ }
+ s->blocksize = avctx->frame_size;
+ } else {
+ s->blocksize = select_blocksize(s->samplerate, s->options.block_time_ms);
+ avctx->frame_size = s->blocksize;
+ }
+ av_log(avctx, AV_LOG_DEBUG, " block size: %d\n", s->blocksize);
+
+ /* set LPC precision */
+ if(avctx->lpc_coeff_precision > 0) {
+ if(avctx->lpc_coeff_precision > MAX_LPC_PRECISION) {
+ av_log(avctx, AV_LOG_ERROR, "invalid lpc coeff precision: %d\n",
+ avctx->lpc_coeff_precision);
+ return -1;
+ }
+ s->options.lpc_coeff_precision = avctx->lpc_coeff_precision;
+ } else {
+ /* select LPC precision based on block size */
+ if( s->blocksize <= 192) s->options.lpc_coeff_precision = 7;
+ else if(s->blocksize <= 384) s->options.lpc_coeff_precision = 8;
+ else if(s->blocksize <= 576) s->options.lpc_coeff_precision = 9;
+ else if(s->blocksize <= 1152) s->options.lpc_coeff_precision = 10;
+ else if(s->blocksize <= 2304) s->options.lpc_coeff_precision = 11;
+ else if(s->blocksize <= 4608) s->options.lpc_coeff_precision = 12;
+ else if(s->blocksize <= 8192) s->options.lpc_coeff_precision = 13;
+ else if(s->blocksize <= 16384) s->options.lpc_coeff_precision = 14;
+ else s->options.lpc_coeff_precision = 15;
+ }
+ av_log(avctx, AV_LOG_DEBUG, " lpc precision: %d\n",
+ s->options.lpc_coeff_precision);
+
+ /* set maximum encoded frame size in verbatim mode */
+ if(s->channels == 2) {
+ s->max_framesize = 14 + ((s->blocksize * 33 + 7) >> 3);
+ } else {
+ s->max_framesize = 14 + (s->blocksize * s->channels * 2);
+ }
+
+ streaminfo = av_malloc(FLAC_STREAMINFO_SIZE);
+ write_streaminfo(s, streaminfo);
+ avctx->extradata = streaminfo;
+ avctx->extradata_size = FLAC_STREAMINFO_SIZE;
+
+ s->frame_count = 0;
+
+ avctx->coded_frame = avcodec_alloc_frame();
+ avctx->coded_frame->key_frame = 1;
+
+ return 0;
+}
+
+static void init_frame(FlacEncodeContext *s)
+{
+ int i, ch;
+ FlacFrame *frame;
+
+ frame = &s->frame;
+
+ for(i=0; i<16; i++) {
+ if(s->blocksize == flac_blocksizes[i]) {
+ frame->blocksize = flac_blocksizes[i];
+ frame->bs_code[0] = i;
+ frame->bs_code[1] = 0;
+ break;
+ }
+ }
+ if(i == 16) {
+ frame->blocksize = s->blocksize;
+ if(frame->blocksize <= 256) {
+ frame->bs_code[0] = 6;
+ frame->bs_code[1] = frame->blocksize-1;
+ } else {
+ frame->bs_code[0] = 7;
+ frame->bs_code[1] = frame->blocksize-1;
+ }
+ }
+
+ for(ch=0; ch<s->channels; ch++) {
+ frame->subframes[ch].obits = 16;
+ }
+}
+
+/**
+ * Copy channel-interleaved input samples into separate subframes
+ */
+static void copy_samples(FlacEncodeContext *s, int16_t *samples)
+{
+ int i, j, ch;
+ FlacFrame *frame;
+
+ frame = &s->frame;
+ for(i=0,j=0; i<frame->blocksize; i++) {
+ for(ch=0; ch<s->channels; ch++,j++) {
+ frame->subframes[ch].samples[i] = samples[j];
+ }
+ }
+}
+
+
+#define rice_encode_count(sum, n, k) (((n)*((k)+1))+((sum-(n>>1))>>(k)))
+
+static int find_optimal_param(uint32_t sum, int n)
+{
+ int k, k_opt;
+ uint32_t nbits[MAX_RICE_PARAM+1];
+
+ k_opt = 0;
+ nbits[0] = UINT32_MAX;
+ for(k=0; k<=MAX_RICE_PARAM; k++) {
+ nbits[k] = rice_encode_count(sum, n, k);
+ if(nbits[k] < nbits[k_opt]) {
+ k_opt = k;
+ }
+ }
+ return k_opt;
+}
+
+static uint32_t calc_optimal_rice_params(RiceContext *rc, int porder,
+ uint32_t *sums, int n, int pred_order)
+{
+ int i;
+ int k, cnt, part;
+ uint32_t all_bits;
+
+ part = (1 << porder);
+ all_bits = 0;
+
+ cnt = (n >> porder) - pred_order;
+ for(i=0; i<part; i++) {
+ if(i == 1) cnt = (n >> porder);
+ k = find_optimal_param(sums[i], cnt);
+ rc->params[i] = k;
+ all_bits += rice_encode_count(sums[i], cnt, k);
+ }
+ all_bits += (4 * part);
+
+ rc->porder = porder;
+
+ return all_bits;
+}
+
+static void calc_sums(int pmin, int pmax, uint32_t *data, int n, int pred_order,
+ uint32_t sums[][MAX_PARTITIONS])
+{
+ int i, j;
+ int parts;
+ uint32_t *res, *res_end;
+
+ /* sums for highest level */
+ parts = (1 << pmax);
+ res = &data[pred_order];
+ res_end = &data[n >> pmax];
+ for(i=0; i<parts; i++) {
+ sums[pmax][i] = 0;
+ while(res < res_end){
+ sums[pmax][i] += *(res++);
+ }
+ res_end+= n >> pmax;
+ }
+ /* sums for lower levels */
+ for(i=pmax-1; i>=pmin; i--) {
+ parts = (1 << i);
+ for(j=0; j<parts; j++) {
+ sums[i][j] = sums[i+1][2*j] + sums[i+1][2*j+1];
+ }
+ }
+}
+
+static uint32_t calc_rice_params(RiceContext *rc, int pmin, int pmax,
+ int32_t *data, int n, int pred_order)
+{
+ int i;
+ uint32_t bits[MAX_PARTITION_ORDER+1];
+ int opt_porder;
+ RiceContext tmp_rc;
+ uint32_t *udata;
+ uint32_t sums[MAX_PARTITION_ORDER+1][MAX_PARTITIONS];
+
+ assert(pmin >= 0 && pmin <= MAX_PARTITION_ORDER);
+ assert(pmax >= 0 && pmax <= MAX_PARTITION_ORDER);
+ assert(pmin <= pmax);
+
+ udata = av_malloc(n * sizeof(uint32_t));
+ for(i=0; i<n; i++) {
+ udata[i] = (2*data[i]) ^ (data[i]>>31);
+ }
+
+ calc_sums(pmin, pmax, udata, n, pred_order, sums);
+
+ opt_porder = pmin;
+ bits[pmin] = UINT32_MAX;
+ for(i=pmin; i<=pmax; i++) {
+ bits[i] = calc_optimal_rice_params(&tmp_rc, i, sums[i], n, pred_order);
+ if(bits[i] <= bits[opt_porder]) {
+ opt_porder = i;
+ *rc= tmp_rc;
+ }
+ }
+
+ av_freep(&udata);
+ return bits[opt_porder];
+}
+
+static int get_max_p_order(int max_porder, int n, int order)
+{
+ int porder = FFMIN(max_porder, av_log2(n^(n-1)));
+ if(order > 0)
+ porder = FFMIN(porder, av_log2(n/order));
+ return porder;
+}
+
+static uint32_t calc_rice_params_fixed(RiceContext *rc, int pmin, int pmax,
+ int32_t *data, int n, int pred_order,
+ int bps)
+{
+ uint32_t bits;
+ pmin = get_max_p_order(pmin, n, pred_order);
+ pmax = get_max_p_order(pmax, n, pred_order);
+ bits = pred_order*bps + 6;
+ bits += calc_rice_params(rc, pmin, pmax, data, n, pred_order);
+ return bits;
+}
+
+static uint32_t calc_rice_params_lpc(RiceContext *rc, int pmin, int pmax,
+ int32_t *data, int n, int pred_order,
+ int bps, int precision)
+{
+ uint32_t bits;
+ pmin = get_max_p_order(pmin, n, pred_order);
+ pmax = get_max_p_order(pmax, n, pred_order);
+ bits = pred_order*bps + 4 + 5 + pred_order*precision + 6;
+ bits += calc_rice_params(rc, pmin, pmax, data, n, pred_order);
+ return bits;
+}
+
+/**
+ * Apply Welch window function to audio block
+ */
+static void apply_welch_window(const int32_t *data, int len, double *w_data)
+{
+ int i, n2;
+ double w;
+ double c;
+
+ n2 = (len >> 1);
+ c = 2.0 / (len - 1.0);
+ for(i=0; i<n2; i++) {
+ w = c - i - 1.0;
+ w = 1.0 - (w * w);
+ w_data[i] = data[i] * w;
+ w_data[len-1-i] = data[len-1-i] * w;
+ }
+}
+
+/**
+ * Calculates autocorrelation data from audio samples
+ * A Welch window function is applied before calculation.
+ */
+static void compute_autocorr(const int32_t *data, int len, int lag,
+ double *autoc)
+{
+ int i, lag_ptr;
+ double tmp[len + lag];
+ double *data1= tmp + lag;
+
+ apply_welch_window(data, len, data1);
+
+ for(i=0; i<lag; i++){
+ autoc[i] = 1.0;
+ data1[i-lag]= 0.0;
+ }
+
+ for(i=0; i<len; i++){
+ for(lag_ptr= i-lag; lag_ptr<=i; lag_ptr++){
+ autoc[i-lag_ptr] += data1[i] * data1[lag_ptr];
+ }
+ }
+}
+
+/**
+ * Levinson-Durbin recursion.
+ * Produces LPC coefficients from autocorrelation data.
+ */
+static void compute_lpc_coefs(const double *autoc, int max_order,
+ double lpc[][MAX_LPC_ORDER], double *ref)
+{
+ int i, j, i2;
+ double r, err, tmp;
+ double lpc_tmp[MAX_LPC_ORDER];
+
+ for(i=0; i<max_order; i++) lpc_tmp[i] = 0;
+ err = autoc[0];
+
+ for(i=0; i<max_order; i++) {
+ r = -autoc[i+1];
+ for(j=0; j<i; j++) {
+ r -= lpc_tmp[j] * autoc[i-j];
+ }
+ r /= err;
+ ref[i] = fabs(r);
+
+ err *= 1.0 - (r * r);
+
+ i2 = (i >> 1);
+ lpc_tmp[i] = r;
+ for(j=0; j<i2; j++) {
+ tmp = lpc_tmp[j];
+ lpc_tmp[j] += r * lpc_tmp[i-1-j];
+ lpc_tmp[i-1-j] += r * tmp;
+ }
+ if(i & 1) {
+ lpc_tmp[j] += lpc_tmp[j] * r;
+ }
+
+ for(j=0; j<=i; j++) {
+ lpc[i][j] = -lpc_tmp[j];
+ }
+ }
+}
+
+/**
+ * Quantize LPC coefficients
+ */
+static void quantize_lpc_coefs(double *lpc_in, int order, int precision,
+ int32_t *lpc_out, int *shift)
+{
+ int i;
+ double cmax, error;
+ int32_t qmax;
+ int sh;
+
+ /* define maximum levels */
+ qmax = (1 << (precision - 1)) - 1;
+
+ /* find maximum coefficient value */
+ cmax = 0.0;
+ for(i=0; i<order; i++) {
+ cmax= FFMAX(cmax, fabs(lpc_in[i]));
+ }
+
+ /* if maximum value quantizes to zero, return all zeros */
+ if(cmax * (1 << MAX_LPC_SHIFT) < 1.0) {
+ *shift = 0;
+ memset(lpc_out, 0, sizeof(int32_t) * order);
+ return;
+ }
+
+ /* calculate level shift which scales max coeff to available bits */
+ sh = MAX_LPC_SHIFT;
+ while((cmax * (1 << sh) > qmax) && (sh > 0)) {
+ sh--;
+ }
+
+ /* since negative shift values are unsupported in decoder, scale down
+ coefficients instead */
+ if(sh == 0 && cmax > qmax) {
+ double scale = ((double)qmax) / cmax;
+ for(i=0; i<order; i++) {
+ lpc_in[i] *= scale;
+ }
+ }
+
+ /* output quantized coefficients and level shift */
+ error=0;
+ for(i=0; i<order; i++) {
+ error += lpc_in[i] * (1 << sh);
+ lpc_out[i] = clip(lrintf(error), -qmax, qmax);
+ error -= lpc_out[i];
+ }
+ *shift = sh;
+}
+
+static int estimate_best_order(double *ref, int max_order)
+{
+ int i, est;
+
+ est = 1;
+ for(i=max_order-1; i>=0; i--) {
+ if(ref[i] > 0.10) {
+ est = i+1;
+ break;
+ }
+ }
+ return est;
+}
+
+/**
+ * Calculate LPC coefficients for multiple orders
+ */
+static int lpc_calc_coefs(const int32_t *samples, int blocksize, int max_order,
+ int precision, int32_t coefs[][MAX_LPC_ORDER],
+ int *shift, int use_lpc, int omethod)
+{
+ double autoc[MAX_LPC_ORDER+1];
+ double ref[MAX_LPC_ORDER];
+ double lpc[MAX_LPC_ORDER][MAX_LPC_ORDER];
+ int i, j, pass;
+ int opt_order;
+
+ assert(max_order >= MIN_LPC_ORDER && max_order <= MAX_LPC_ORDER);
+
+ if(use_lpc == 1){
+ compute_autocorr(samples, blocksize, max_order+1, autoc);
+
+ compute_lpc_coefs(autoc, max_order, lpc, ref);
+ }else{
+ LLSModel m[2];
+ double var[MAX_LPC_ORDER+1], eval, weight;
+
+ for(pass=0; pass<use_lpc-1; pass++){
+ av_init_lls(&m[pass&1], max_order);
+
+ weight=0;
+ for(i=max_order; i<blocksize; i++){
+ for(j=0; j<=max_order; j++)
+ var[j]= samples[i-j];
+
+ if(pass){
+ eval= av_evaluate_lls(&m[(pass-1)&1], var+1, max_order-1);
+ eval= (512>>pass) + fabs(eval - var[0]);
+ for(j=0; j<=max_order; j++)
+ var[j]/= sqrt(eval);
+ weight += 1/eval;
+ }else
+ weight++;
+
+ av_update_lls(&m[pass&1], var, 1.0);
+ }
+ av_solve_lls(&m[pass&1], 0.001, 0);
+ }
+
+ for(i=0; i<max_order; i++){
+ for(j=0; j<max_order; j++)
+ lpc[i][j]= m[(pass-1)&1].coeff[i][j];
+ ref[i]= sqrt(m[(pass-1)&1].variance[i] / weight) * (blocksize - max_order) / 4000;
+ }
+ for(i=max_order-1; i>0; i--)
+ ref[i] = ref[i-1] - ref[i];
+ }
+ opt_order = max_order;
+
+ if(omethod == ORDER_METHOD_EST) {
+ opt_order = estimate_best_order(ref, max_order);
+ i = opt_order-1;
+ quantize_lpc_coefs(lpc[i], i+1, precision, coefs[i], &shift[i]);
+ } else {
+ for(i=0; i<max_order; i++) {
+ quantize_lpc_coefs(lpc[i], i+1, precision, coefs[i], &shift[i]);
+ }
+ }
+
+ return opt_order;
+}
+
+
+static void encode_residual_verbatim(int32_t *res, int32_t *smp, int n)
+{
+ assert(n > 0);
+ memcpy(res, smp, n * sizeof(int32_t));
+}
+
+static void encode_residual_fixed(int32_t *res, const int32_t *smp, int n,
+ int order)
+{
+ int i;
+
+ for(i=0; i<order; i++) {
+ res[i] = smp[i];
+ }
+
+ if(order==0){
+ for(i=order; i<n; i++)
+ res[i]= smp[i];
+ }else if(order==1){
+ for(i=order; i<n; i++)
+ res[i]= smp[i] - smp[i-1];
+ }else if(order==2){
+ for(i=order; i<n; i++)
+ res[i]= smp[i] - 2*smp[i-1] + smp[i-2];
+ }else if(order==3){
+ for(i=order; i<n; i++)
+ res[i]= smp[i] - 3*smp[i-1] + 3*smp[i-2] - smp[i-3];
+ }else{
+ for(i=order; i<n; i++)
+ res[i]= smp[i] - 4*smp[i-1] + 6*smp[i-2] - 4*smp[i-3] + smp[i-4];
+ }
+}
+
+static void encode_residual_lpc(int32_t *res, const int32_t *smp, int n,
+ int order, const int32_t *coefs, int shift)
+{
+ int i, j;
+ int32_t pred;
+
+ for(i=0; i<order; i++) {
+ res[i] = smp[i];
+ }
+ for(i=order; i<n; i++) {
+ pred = 0;
+ for(j=0; j<order; j++) {
+ pred += coefs[j] * smp[i-j-1];
+ }
+ res[i] = smp[i] - (pred >> shift);
+ }
+}
+
+static int encode_residual(FlacEncodeContext *ctx, int ch)
+{
+ int i, n;
+ int min_order, max_order, opt_order, precision, omethod;
+ int min_porder, max_porder;
+ FlacFrame *frame;
+ FlacSubframe *sub;
+ int32_t coefs[MAX_LPC_ORDER][MAX_LPC_ORDER];
+ int shift[MAX_LPC_ORDER];
+ int32_t *res, *smp;
+
+ frame = &ctx->frame;
+ sub = &frame->subframes[ch];
+ res = sub->residual;
+ smp = sub->samples;
+ n = frame->blocksize;
+
+ /* CONSTANT */
+ for(i=1; i<n; i++) {
+ if(smp[i] != smp[0]) break;
+ }
+ if(i == n) {
+ sub->type = sub->type_code = FLAC_SUBFRAME_CONSTANT;
+ res[0] = smp[0];
+ return sub->obits;
+ }
+
+ /* VERBATIM */
+ if(n < 5) {
+ sub->type = sub->type_code = FLAC_SUBFRAME_VERBATIM;
+ encode_residual_verbatim(res, smp, n);
+ return sub->obits * n;
+ }
+
+ min_order = ctx->options.min_prediction_order;
+ max_order = ctx->options.max_prediction_order;
+ min_porder = ctx->options.min_partition_order;
+ max_porder = ctx->options.max_partition_order;
+ precision = ctx->options.lpc_coeff_precision;
+ omethod = ctx->options.prediction_order_method;
+
+ /* FIXED */
+ if(!ctx->options.use_lpc || max_order == 0 || (n <= max_order)) {
+ uint32_t bits[MAX_FIXED_ORDER+1];
+ if(max_order > MAX_FIXED_ORDER) max_order = MAX_FIXED_ORDER;
+ opt_order = 0;
+ bits[0] = UINT32_MAX;
+ for(i=min_order; i<=max_order; i++) {
+ encode_residual_fixed(res, smp, n, i);
+ bits[i] = calc_rice_params_fixed(&sub->rc, min_porder, max_porder, res,
+ n, i, sub->obits);
+ if(bits[i] < bits[opt_order]) {
+ opt_order = i;
+ }
+ }
+ sub->order = opt_order;
+ sub->type = FLAC_SUBFRAME_FIXED;
+ sub->type_code = sub->type | sub->order;
+ if(sub->order != max_order) {
+ encode_residual_fixed(res, smp, n, sub->order);
+ return calc_rice_params_fixed(&sub->rc, min_porder, max_porder, res, n,
+ sub->order, sub->obits);
+ }
+ return bits[sub->order];
+ }
+
+ /* LPC */
+ opt_order = lpc_calc_coefs(smp, n, max_order, precision, coefs, shift, ctx->options.use_lpc, omethod);
+
+ if(omethod == ORDER_METHOD_2LEVEL ||
+ omethod == ORDER_METHOD_4LEVEL ||
+ omethod == ORDER_METHOD_8LEVEL) {
+ int levels = 1 << omethod;
+ uint32_t bits[levels];
+ int order;
+ int opt_index = levels-1;
+ opt_order = max_order-1;
+ bits[opt_index] = UINT32_MAX;
+ for(i=levels-1; i>=0; i--) {
+ order = min_order + (((max_order-min_order+1) * (i+1)) / levels)-1;
+ if(order < 0) order = 0;
+ encode_residual_lpc(res, smp, n, order+1, coefs[order], shift[order]);
+ bits[i] = calc_rice_params_lpc(&sub->rc, min_porder, max_porder,
+ res, n, order+1, sub->obits, precision);
+ if(bits[i] < bits[opt_index]) {
+ opt_index = i;
+ opt_order = order;
+ }
+ }
+ opt_order++;
+ } else if(omethod == ORDER_METHOD_SEARCH) {
+ // brute-force optimal order search
+ uint32_t bits[MAX_LPC_ORDER];
+ opt_order = 0;
+ bits[0] = UINT32_MAX;
+ for(i=min_order-1; i<max_order; i++) {
+ encode_residual_lpc(res, smp, n, i+1, coefs[i], shift[i]);
+ bits[i] = calc_rice_params_lpc(&sub->rc, min_porder, max_porder,
+ res, n, i+1, sub->obits, precision);
+ if(bits[i] < bits[opt_order]) {
+ opt_order = i;
+ }
+ }
+ opt_order++;
+ } else if(omethod == ORDER_METHOD_LOG) {
+ uint32_t bits[MAX_LPC_ORDER];
+ int step;
+
+ opt_order= min_order - 1 + (max_order-min_order)/3;
+ memset(bits, -1, sizeof(bits));
+
+ for(step=16 ;step; step>>=1){
+ int last= opt_order;
+ for(i=last-step; i<=last+step; i+= step){
+ if(i<min_order-1 || i>=max_order || bits[i] < UINT32_MAX)
+ continue;
+ encode_residual_lpc(res, smp, n, i+1, coefs[i], shift[i]);
+ bits[i] = calc_rice_params_lpc(&sub->rc, min_porder, max_porder,
+ res, n, i+1, sub->obits, precision);
+ if(bits[i] < bits[opt_order])
+ opt_order= i;
+ }
+ }
+ opt_order++;
+ }
+
+ sub->order = opt_order;
+ sub->type = FLAC_SUBFRAME_LPC;
+ sub->type_code = sub->type | (sub->order-1);
+ sub->shift = shift[sub->order-1];
+ for(i=0; i<sub->order; i++) {
+ sub->coefs[i] = coefs[sub->order-1][i];
+ }
+ encode_residual_lpc(res, smp, n, sub->order, sub->coefs, sub->shift);
+ return calc_rice_params_lpc(&sub->rc, min_porder, max_porder, res, n, sub->order,
+ sub->obits, precision);
+}
+
+static int encode_residual_v(FlacEncodeContext *ctx, int ch)
+{
+ int i, n;
+ FlacFrame *frame;
+ FlacSubframe *sub;
+ int32_t *res, *smp;
+
+ frame = &ctx->frame;
+ sub = &frame->subframes[ch];
+ res = sub->residual;
+ smp = sub->samples;
+ n = frame->blocksize;
+
+ /* CONSTANT */
+ for(i=1; i<n; i++) {
+ if(smp[i] != smp[0]) break;
+ }
+ if(i == n) {
+ sub->type = sub->type_code = FLAC_SUBFRAME_CONSTANT;
+ res[0] = smp[0];
+ return sub->obits;
+ }
+
+ /* VERBATIM */
+ sub->type = sub->type_code = FLAC_SUBFRAME_VERBATIM;
+ encode_residual_verbatim(res, smp, n);
+ return sub->obits * n;
+}
+
+static int estimate_stereo_mode(int32_t *left_ch, int32_t *right_ch, int n)
+{
+ int i, best;
+ int32_t lt, rt;
+ uint64_t sum[4];
+ uint64_t score[4];
+ int k;
+
+ /* calculate sum of 2nd order residual for each channel */
+ sum[0] = sum[1] = sum[2] = sum[3] = 0;
+ for(i=2; i<n; i++) {
+ lt = left_ch[i] - 2*left_ch[i-1] + left_ch[i-2];
+ rt = right_ch[i] - 2*right_ch[i-1] + right_ch[i-2];
+ sum[2] += FFABS((lt + rt) >> 1);
+ sum[3] += FFABS(lt - rt);
+ sum[0] += FFABS(lt);
+ sum[1] += FFABS(rt);
+ }
+ /* estimate bit counts */
+ for(i=0; i<4; i++) {
+ k = find_optimal_param(2*sum[i], n);
+ sum[i] = rice_encode_count(2*sum[i], n, k);
+ }
+
+ /* calculate score for each mode */
+ score[0] = sum[0] + sum[1];
+ score[1] = sum[0] + sum[3];
+ score[2] = sum[1] + sum[3];
+ score[3] = sum[2] + sum[3];
+
+ /* return mode with lowest score */
+ best = 0;
+ for(i=1; i<4; i++) {
+ if(score[i] < score[best]) {
+ best = i;
+ }
+ }
+ if(best == 0) {
+ return FLAC_CHMODE_LEFT_RIGHT;
+ } else if(best == 1) {
+ return FLAC_CHMODE_LEFT_SIDE;
+ } else if(best == 2) {
+ return FLAC_CHMODE_RIGHT_SIDE;
+ } else {
+ return FLAC_CHMODE_MID_SIDE;
+ }
+}
+
+/**
+ * Perform stereo channel decorrelation
+ */
+static void channel_decorrelation(FlacEncodeContext *ctx)
+{
+ FlacFrame *frame;
+ int32_t *left, *right;
+ int i, n;
+
+ frame = &ctx->frame;
+ n = frame->blocksize;
+ left = frame->subframes[0].samples;
+ right = frame->subframes[1].samples;
+
+ if(ctx->channels != 2) {
+ frame->ch_mode = FLAC_CHMODE_NOT_STEREO;
+ return;
+ }
+
+ frame->ch_mode = estimate_stereo_mode(left, right, n);
+
+ /* perform decorrelation and adjust bits-per-sample */
+ if(frame->ch_mode == FLAC_CHMODE_LEFT_RIGHT) {
+ return;
+ }
+ if(frame->ch_mode == FLAC_CHMODE_MID_SIDE) {
+ int32_t tmp;
+ for(i=0; i<n; i++) {
+ tmp = left[i];
+ left[i] = (tmp + right[i]) >> 1;
+ right[i] = tmp - right[i];
+ }
+ frame->subframes[1].obits++;
+ } else if(frame->ch_mode == FLAC_CHMODE_LEFT_SIDE) {
+ for(i=0; i<n; i++) {
+ right[i] = left[i] - right[i];
+ }
+ frame->subframes[1].obits++;
+ } else {
+ for(i=0; i<n; i++) {
+ left[i] -= right[i];
+ }
+ frame->subframes[0].obits++;
+ }
+}
+
+static void put_sbits(PutBitContext *pb, int bits, int32_t val)
+{
+ assert(bits >= 0 && bits <= 31);
+
+ put_bits(pb, bits, val & ((1<<bits)-1));
+}
+
+static void write_utf8(PutBitContext *pb, uint32_t val)
+{
+ uint8_t tmp;
+ PUT_UTF8(val, tmp, put_bits(pb, 8, tmp);)
+}
+
+static void output_frame_header(FlacEncodeContext *s)
+{
+ FlacFrame *frame;
+ int crc;
+
+ frame = &s->frame;
+
+ put_bits(&s->pb, 16, 0xFFF8);
+ put_bits(&s->pb, 4, frame->bs_code[0]);
+ put_bits(&s->pb, 4, s->sr_code[0]);
+ if(frame->ch_mode == FLAC_CHMODE_NOT_STEREO) {
+ put_bits(&s->pb, 4, s->ch_code);
+ } else {
+ put_bits(&s->pb, 4, frame->ch_mode);
+ }
+ put_bits(&s->pb, 3, 4); /* bits-per-sample code */
+ put_bits(&s->pb, 1, 0);
+ write_utf8(&s->pb, s->frame_count);
+ if(frame->bs_code[0] == 6) {
+ put_bits(&s->pb, 8, frame->bs_code[1]);
+ } else if(frame->bs_code[0] == 7) {
+ put_bits(&s->pb, 16, frame->bs_code[1]);
+ }
+ if(s->sr_code[0] == 12) {
+ put_bits(&s->pb, 8, s->sr_code[1]);
+ } else if(s->sr_code[0] > 12) {
+ put_bits(&s->pb, 16, s->sr_code[1]);
+ }
+ flush_put_bits(&s->pb);
+ crc = av_crc(av_crc07, 0, s->pb.buf, put_bits_count(&s->pb)>>3);
+ put_bits(&s->pb, 8, crc);
+}
+
+static void output_subframe_constant(FlacEncodeContext *s, int ch)
+{
+ FlacSubframe *sub;
+ int32_t res;
+
+ sub = &s->frame.subframes[ch];
+ res = sub->residual[0];
+ put_sbits(&s->pb, sub->obits, res);
+}
+
+static void output_subframe_verbatim(FlacEncodeContext *s, int ch)
+{
+ int i;
+ FlacFrame *frame;
+ FlacSubframe *sub;
+ int32_t res;
+
+ frame = &s->frame;
+ sub = &frame->subframes[ch];
+
+ for(i=0; i<frame->blocksize; i++) {
+ res = sub->residual[i];
+ put_sbits(&s->pb, sub->obits, res);
+ }
+}
+
+static void output_residual(FlacEncodeContext *ctx, int ch)
+{
+ int i, j, p, n, parts;
+ int k, porder, psize, res_cnt;
+ FlacFrame *frame;
+ FlacSubframe *sub;
+ int32_t *res;
+
+ frame = &ctx->frame;
+ sub = &frame->subframes[ch];
+ res = sub->residual;
+ n = frame->blocksize;
+
+ /* rice-encoded block */
+ put_bits(&ctx->pb, 2, 0);
+
+ /* partition order */
+ porder = sub->rc.porder;
+ psize = n >> porder;
+ parts = (1 << porder);
+ put_bits(&ctx->pb, 4, porder);
+ res_cnt = psize - sub->order;
+
+ /* residual */
+ j = sub->order;
+ for(p=0; p<parts; p++) {
+ k = sub->rc.params[p];
+ put_bits(&ctx->pb, 4, k);
+ if(p == 1) res_cnt = psize;
+ for(i=0; i<res_cnt && j<n; i++, j++) {
+ set_sr_golomb_flac(&ctx->pb, res[j], k, INT32_MAX, 0);
+ }
+ }
+}
+
+static void output_subframe_fixed(FlacEncodeContext *ctx, int ch)
+{
+ int i;
+ FlacFrame *frame;
+ FlacSubframe *sub;
+
+ frame = &ctx->frame;
+ sub = &frame->subframes[ch];
+
+ /* warm-up samples */
+ for(i=0; i<sub->order; i++) {
+ put_sbits(&ctx->pb, sub->obits, sub->residual[i]);
+ }
+
+ /* residual */
+ output_residual(ctx, ch);
+}
+
+static void output_subframe_lpc(FlacEncodeContext *ctx, int ch)
+{
+ int i, cbits;
+ FlacFrame *frame;
+ FlacSubframe *sub;
+
+ frame = &ctx->frame;
+ sub = &frame->subframes[ch];
+
+ /* warm-up samples */
+ for(i=0; i<sub->order; i++) {
+ put_sbits(&ctx->pb, sub->obits, sub->residual[i]);
+ }
+
+ /* LPC coefficients */
+ cbits = ctx->options.lpc_coeff_precision;
+ put_bits(&ctx->pb, 4, cbits-1);
+ put_sbits(&ctx->pb, 5, sub->shift);
+ for(i=0; i<sub->order; i++) {
+ put_sbits(&ctx->pb, cbits, sub->coefs[i]);
+ }
+
+ /* residual */
+ output_residual(ctx, ch);
+}
+
+static void output_subframes(FlacEncodeContext *s)
+{
+ FlacFrame *frame;
+ FlacSubframe *sub;
+ int ch;
+
+ frame = &s->frame;
+
+ for(ch=0; ch<s->channels; ch++) {
+ sub = &frame->subframes[ch];
+
+ /* subframe header */
+ put_bits(&s->pb, 1, 0);
+ put_bits(&s->pb, 6, sub->type_code);
+ put_bits(&s->pb, 1, 0); /* no wasted bits */
+
+ /* subframe */
+ if(sub->type == FLAC_SUBFRAME_CONSTANT) {
+ output_subframe_constant(s, ch);
+ } else if(sub->type == FLAC_SUBFRAME_VERBATIM) {
+ output_subframe_verbatim(s, ch);
+ } else if(sub->type == FLAC_SUBFRAME_FIXED) {
+ output_subframe_fixed(s, ch);
+ } else if(sub->type == FLAC_SUBFRAME_LPC) {
+ output_subframe_lpc(s, ch);
+ }
+ }
+}
+
+static void output_frame_footer(FlacEncodeContext *s)
+{
+ int crc;
+ flush_put_bits(&s->pb);
+ crc = bswap_16(av_crc(av_crc8005, 0, s->pb.buf, put_bits_count(&s->pb)>>3));
+ put_bits(&s->pb, 16, crc);
+ flush_put_bits(&s->pb);
+}
+
+static int flac_encode_frame(AVCodecContext *avctx, uint8_t *frame,
+ int buf_size, void *data)
+{
+ int ch;
+ FlacEncodeContext *s;
+ int16_t *samples = data;
+ int out_bytes;
+
+ s = avctx->priv_data;
+
+ s->blocksize = avctx->frame_size;
+ init_frame(s);
+
+ copy_samples(s, samples);
+
+ channel_decorrelation(s);
+
+ for(ch=0; ch<s->channels; ch++) {
+ encode_residual(s, ch);
+ }
+ init_put_bits(&s->pb, frame, buf_size);
+ output_frame_header(s);
+ output_subframes(s);
+ output_frame_footer(s);
+ out_bytes = put_bits_count(&s->pb) >> 3;
+
+ if(out_bytes > s->max_framesize || out_bytes >= buf_size) {
+ /* frame too large. use verbatim mode */
+ for(ch=0; ch<s->channels; ch++) {
+ encode_residual_v(s, ch);
+ }
+ init_put_bits(&s->pb, frame, buf_size);
+ output_frame_header(s);
+ output_subframes(s);
+ output_frame_footer(s);
+ out_bytes = put_bits_count(&s->pb) >> 3;
+
+ if(out_bytes > s->max_framesize || out_bytes >= buf_size) {
+ /* still too large. must be an error. */
+ av_log(avctx, AV_LOG_ERROR, "error encoding frame\n");
+ return -1;
+ }
+ }
+
+ s->frame_count++;
+ return out_bytes;
+}
+
+static int flac_encode_close(AVCodecContext *avctx)
+{
+ av_freep(&avctx->extradata);
+ avctx->extradata_size = 0;
+ av_freep(&avctx->coded_frame);
+ return 0;
+}
+
+AVCodec flac_encoder = {
+ "flac",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_FLAC,
+ sizeof(FlacEncodeContext),
+ flac_encode_init,
+ flac_encode_frame,
+ flac_encode_close,
+ NULL,
+ .capabilities = CODEC_CAP_SMALL_LAST_FRAME,
+};
diff --git a/src/libffmpeg/libavcodec/flashsv.c b/contrib/ffmpeg/libavcodec/flashsv.c
index 3214d1860..fea8e2224 100644
--- a/src/libffmpeg/libavcodec/flashsv.c
+++ b/contrib/ffmpeg/libavcodec/flashsv.c
@@ -3,18 +3,20 @@
* Copyright (C) 2004 Alex Beregszaszi
* Copyright (C) 2006 Benjamin Larsson
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/flicvideo.c b/contrib/ffmpeg/libavcodec/flicvideo.c
index fa128d0d3..95cb26ce4 100644
--- a/src/libffmpeg/libavcodec/flicvideo.c
+++ b/contrib/ffmpeg/libavcodec/flicvideo.c
@@ -2,18 +2,20 @@
* FLI/FLC Animation Video Decoder
* Copyright (C) 2003, 2004 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -154,7 +156,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
int starting_line;
signed short line_packets;
int y_ptr;
- signed char byte_run;
+ int byte_run;
int pixel_skip;
int pixel_countdown;
unsigned char *pixels;
@@ -188,7 +190,6 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
case FLI_256_COLOR:
case FLI_COLOR:
stream_ptr_after_color_chunk = stream_ptr + chunk_size - 6;
- s->new_palette = 1;
/* check special case: If this file is from the Magic Carpet
* game and uses 6-bit colors even though it reports 256-color
@@ -214,6 +215,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
color_changes = 256;
for (j = 0; j < color_changes; j++) {
+ unsigned int entry;
/* wrap around, for good measure */
if ((unsigned)palette_ptr >= 256)
@@ -222,7 +224,10 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
r = buf[stream_ptr++] << color_shift;
g = buf[stream_ptr++] << color_shift;
b = buf[stream_ptr++] << color_shift;
- s->palette[palette_ptr++] = (r << 16) | (g << 8) | b;
+ entry = (r << 16) | (g << 8) | b;
+ if (s->palette[palette_ptr] != entry)
+ s->new_palette = 1;
+ s->palette[palette_ptr++] = entry;
}
}
@@ -241,9 +246,15 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
while (compressed_lines > 0) {
line_packets = LE_16(&buf[stream_ptr]);
stream_ptr += 2;
- if (line_packets < 0) {
+ if ((line_packets & 0xC000) == 0xC000) {
+ // line skip opcode
line_packets = -line_packets;
y_ptr += line_packets * s->frame.linesize[0];
+ } else if ((line_packets & 0xC000) == 0x4000) {
+ av_log(avctx, AV_LOG_ERROR, "Undefined opcode (%x) in DELTA_FLI\n", line_packets);
+ } else if ((line_packets & 0xC000) == 0x8000) {
+ // "last byte" opcode
+ pixels[y_ptr + s->frame.linesize[0] - 1] = line_packets & 0xff;
} else {
compressed_lines--;
pixel_ptr = y_ptr;
@@ -253,7 +264,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
pixel_skip = buf[stream_ptr++];
pixel_ptr += pixel_skip;
pixel_countdown -= pixel_skip;
- byte_run = buf[stream_ptr++];
+ byte_run = (signed char)(buf[stream_ptr++]);
if (byte_run < 0) {
byte_run = -byte_run;
palette_idx1 = buf[stream_ptr++];
@@ -296,14 +307,14 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
pixel_skip = buf[stream_ptr++];
pixel_ptr += pixel_skip;
pixel_countdown -= pixel_skip;
- byte_run = buf[stream_ptr++];
+ byte_run = (signed char)(buf[stream_ptr++]);
if (byte_run > 0) {
CHECK_PIXEL_PTR(byte_run);
for (j = 0; j < byte_run; j++, pixel_countdown--) {
palette_idx1 = buf[stream_ptr++];
pixels[pixel_ptr++] = palette_idx1;
}
- } else {
+ } else if (byte_run < 0) {
byte_run = -byte_run;
palette_idx1 = buf[stream_ptr++];
CHECK_PIXEL_PTR(byte_run);
@@ -336,7 +347,7 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
stream_ptr++;
pixel_countdown = s->avctx->width;
while (pixel_countdown > 0) {
- byte_run = buf[stream_ptr++];
+ byte_run = (signed char)(buf[stream_ptr++]);
if (byte_run > 0) {
palette_idx1 = buf[stream_ptr++];
CHECK_PIXEL_PTR(byte_run);
@@ -402,9 +413,8 @@ static int flic_decode_frame_8BPP(AVCodecContext *avctx,
"and final chunk ptr = %d\n", buf_size, stream_ptr);
/* make the palette available on the way out */
-// if (s->new_palette) {
- if (1) {
- memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
+ if (s->new_palette) {
s->frame.palette_has_changed = 1;
s->new_palette = 0;
}
@@ -439,7 +449,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
int compressed_lines;
signed short line_packets;
int y_ptr;
- signed char byte_run;
+ int byte_run;
int pixel_skip;
int pixel_countdown;
unsigned char *pixels;
@@ -499,7 +509,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
pixel_skip = buf[stream_ptr++];
pixel_ptr += (pixel_skip*2); /* Pixel is 2 bytes wide */
pixel_countdown -= pixel_skip;
- byte_run = buf[stream_ptr++];
+ byte_run = (signed char)(buf[stream_ptr++]);
if (byte_run < 0) {
byte_run = -byte_run;
pixel = LE_16(&buf[stream_ptr]);
@@ -530,9 +540,9 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
break;
case FLI_BLACK:
- /* set the whole frame to 0x0000 which is balck in both 15Bpp and 16Bpp modes. */
+ /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */
memset(pixels, 0x0000,
- s->frame.linesize[0] * s->avctx->height * 2);
+ s->frame.linesize[0] * s->avctx->height);
break;
case FLI_BRUN:
@@ -545,7 +555,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
pixel_countdown = (s->avctx->width * 2);
while (pixel_countdown > 0) {
- byte_run = buf[stream_ptr++];
+ byte_run = (signed char)(buf[stream_ptr++]);
if (byte_run > 0) {
palette_idx1 = buf[stream_ptr++];
CHECK_PIXEL_PTR(byte_run);
@@ -599,7 +609,7 @@ static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */
while (pixel_countdown > 0) {
- byte_run = buf[stream_ptr++];
+ byte_run = (signed char)(buf[stream_ptr++]);
if (byte_run > 0) {
pixel = LE_16(&buf[stream_ptr]);
stream_ptr += 2;
diff --git a/src/libffmpeg/libavcodec/fraps.c b/contrib/ffmpeg/libavcodec/fraps.c
index d107e47b1..18d270049 100644
--- a/src/libffmpeg/libavcodec/fraps.c
+++ b/contrib/ffmpeg/libavcodec/fraps.c
@@ -1,19 +1,22 @@
/*
* Fraps FPS1 decoder
* Copyright (c) 2005 Roine Gustafsson
+ * Copyright (c) 2006 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -22,24 +25,41 @@
* @file fraps.c
* Lossless Fraps 'FPS1' decoder
* @author Roine Gustafsson <roine at users sf net>
+ * @author Konstantin Shishkov
*
- * Only decodes version 0 and 1 files.
* Codec algorithm for version 0 is taken from Transcode <www.transcoding.org>
*
- * Version 2 files, which are the most commonly found Fraps files, cannot be
- * decoded yet.
+ * Version 2 files support by Konstantin Shishkov
*/
#include "avcodec.h"
+#include "bitstream.h"
+#include "dsputil.h"
#define FPS_TAG MKTAG('F', 'P', 'S', 'x')
+/* symbol for Huffman tree node */
+#define HNODE -1
+
+/**
+ * Huffman node
+ * FIXME one day this should belong to one general framework
+ */
+typedef struct Node{
+ int16_t sym;
+ int16_t n0;
+ int count;
+}Node;
+
/**
* local variable storage
*/
typedef struct FrapsContext{
AVCodecContext *avctx;
AVFrame frame;
+ Node nodes[512];
+ uint8_t *tmpbuf;
+ DSPContext dsp;
} FrapsContext;
@@ -58,10 +78,117 @@ static int decode_init(AVCodecContext *avctx)
s->avctx = avctx;
s->frame.data[0] = NULL;
+ s->tmpbuf = NULL;
+
+ dsputil_init(&s->dsp, avctx);
return 0;
}
+/**
+ * Comparator - our nodes should ascend by count
+ * but with preserved symbol order
+ */
+static int huff_cmp(const Node *a, const Node *b){
+ return (a->count - b->count)*256 + a->sym - b->sym;
+}
+
+static void get_tree_codes(uint32_t *bits, int16_t *lens, uint8_t *xlat, Node *nodes, int node, uint32_t pfx, int pl, int *pos)
+{
+ int s;
+
+ s = nodes[node].sym;
+ if(s != HNODE || !nodes[node].count){
+ bits[*pos] = pfx;
+ lens[*pos] = pl;
+ xlat[*pos] = s;
+ (*pos)++;
+ }else{
+ pfx <<= 1;
+ pl++;
+ get_tree_codes(bits, lens, xlat, nodes, nodes[node].n0, pfx, pl, pos);
+ pfx |= 1;
+ get_tree_codes(bits, lens, xlat, nodes, nodes[node].n0+1, pfx, pl, pos);
+ }
+}
+
+static int build_huff_tree(VLC *vlc, Node *nodes, uint8_t *xlat)
+{
+ uint32_t bits[256];
+ int16_t lens[256];
+ int pos = 0;
+
+ get_tree_codes(bits, lens, xlat, nodes, 510, 0, 0, &pos);
+ return init_vlc(vlc, 9, pos, lens, 2, 2, bits, 4, 4, 0);
+}
+
+
+/**
+ * decode Fraps v2 packed plane
+ */
+static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
+ int h, uint8_t *src, int size, int Uoff)
+{
+ int i, j;
+ int cur_node;
+ GetBitContext gb;
+ VLC vlc;
+ int64_t sum = 0;
+ uint8_t recode[256];
+
+ for(i = 0; i < 256; i++){
+ s->nodes[i].sym = i;
+ s->nodes[i].count = LE_32(src);
+ s->nodes[i].n0 = -2;
+ if(s->nodes[i].count < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Symbol count < 0\n");
+ return -1;
+ }
+ src += 4;
+ sum += s->nodes[i].count;
+ }
+ size -= 1024;
+
+ if(sum >> 31) {
+ av_log(s->avctx, AV_LOG_ERROR, "Too high symbol frequencies. Tree construction is not possible\n");
+ return -1;
+ }
+ qsort(s->nodes, 256, sizeof(Node), huff_cmp);
+ cur_node = 256;
+ for(i = 0; i < 511; i += 2){
+ s->nodes[cur_node].sym = HNODE;
+ s->nodes[cur_node].count = s->nodes[i].count + s->nodes[i+1].count;
+ s->nodes[cur_node].n0 = i;
+ for(j = cur_node; j > 0; j--){
+ if(s->nodes[j].count >= s->nodes[j - 1].count) break;
+ FFSWAP(Node, s->nodes[j], s->nodes[j - 1]);
+ }
+ cur_node++;
+ }
+ if(build_huff_tree(&vlc, s->nodes, recode) < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "Error building tree\n");
+ return -1;
+ }
+ /* we have built Huffman table and are ready to decode plane */
+
+ /* convert bits so they may be used by standard bitreader */
+ s->dsp.bswap_buf(s->tmpbuf, src, size >> 2);
+
+ init_get_bits(&gb, s->tmpbuf, size * 8);
+ for(j = 0; j < h; j++){
+ for(i = 0; i < w; i++){
+ dst[i] = recode[get_vlc2(&gb, vlc.table, 9, 3)];
+ /* lines are stored as deltas between previous lines
+ * and we need to add 0x80 to the first lines of chroma planes
+ */
+ if(j) dst[i] += dst[i - stride];
+ else if(Uoff) dst[i] += 0x80;
+ }
+ dst += stride;
+ }
+ free_vlc(&vlc);
+ return 0;
+}
/**
* decode a frame
@@ -84,16 +211,18 @@ static int decode_frame(AVCodecContext *avctx,
unsigned int x, y;
uint32_t *buf32;
uint32_t *luma1,*luma2,*cb,*cr;
+ uint32_t offs[4];
+ int i, is_chroma, planes;
header = LE_32(buf);
version = header & 0xff;
header_size = (header & (1<<30))? 8 : 4; /* bit 30 means pad to 8 bytes */
- if (version > 1) {
+ if (version > 2 && version != 4) {
av_log(avctx, AV_LOG_ERROR,
"This file is encoded with Fraps version %d. " \
- "This codec can only decode version 0 and 1.\n", version);
+ "This codec can only decode version 0, 1, 2 and 4.\n", version);
return -1;
}
@@ -185,30 +314,50 @@ static int decode_frame(AVCodecContext *avctx,
break;
case 2:
+ case 4:
/**
- * Fraps v2 sub-header description. All numbers are little-endian:
- * (this is all guesswork)
- *
- * 0: DWORD 'FPSx'
- * 4: DWORD 0x00000010 unknown, perhaps flags
- * 8: DWORD off_2 offset to plane 2
- * 12: DWORD off_3 offset to plane 3
- * 16: 256xDWORD freqtbl_1 frequency table for plane 1
- * 1040: plane_1
- * ...
- * off_2: 256xDWORD freqtbl_2 frequency table for plane 2
- * plane_2
- * ...
- * off_3: 256xDWORD freqtbl_3 frequency table for plane 3
- * plane_3
+ * Fraps v2 is Huffman-coded YUV420 planes
+ * Fraps v4 is virtually the same
*/
- if ((BE_32(buf) != FPS_TAG)||(buf_size < (3*1024 + 8))) {
+ avctx->pix_fmt = PIX_FMT_YUV420P;
+ planes = 3;
+ f->reference = 1;
+ f->buffer_hints = FF_BUFFER_HINTS_VALID |
+ FF_BUFFER_HINTS_PRESERVE |
+ FF_BUFFER_HINTS_REUSABLE;
+ if (avctx->reget_buffer(avctx, f)) {
+ av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
+ return -1;
+ }
+ /* skip frame */
+ if(buf_size == 8) {
+ f->pict_type = FF_P_TYPE;
+ f->key_frame = 0;
+ break;
+ }
+ f->pict_type = FF_I_TYPE;
+ f->key_frame = 1;
+ if ((LE_32(buf) != FPS_TAG)||(buf_size < (planes*1024 + 24))) {
av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
return -1;
}
-
- /* NOT FINISHED */
-
+ for(i = 0; i < planes; i++) {
+ offs[i] = LE_32(buf + 4 + i * 4);
+ if(offs[i] >= buf_size || (i && offs[i] <= offs[i - 1] + 1024)) {
+ av_log(avctx, AV_LOG_ERROR, "Fraps: plane %i offset is out of bounds\n", i);
+ return -1;
+ }
+ }
+ offs[planes] = buf_size;
+ for(i = 0; i < planes; i++){
+ is_chroma = !!i;
+ s->tmpbuf = av_realloc(s->tmpbuf, offs[i + 1] - offs[i] - 1024 + FF_INPUT_BUFFER_PADDING_SIZE);
+ if(fraps2_decode_plane(s, f->data[i], f->linesize[i], avctx->width >> is_chroma,
+ avctx->height >> is_chroma, buf + offs[i], offs[i + 1] - offs[i], is_chroma) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
+ return -1;
+ }
+ }
break;
}
@@ -231,6 +380,7 @@ static int decode_end(AVCodecContext *avctx)
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
+ av_freep(&s->tmpbuf);
return 0;
}
diff --git a/src/libffmpeg/libavcodec/g726.c b/contrib/ffmpeg/libavcodec/g726.c
index 8114fe0f3..c509292b6 100644
--- a/src/libffmpeg/libavcodec/g726.c
+++ b/contrib/ffmpeg/libavcodec/g726.c
@@ -5,18 +5,20 @@
* This is a very straightforward rendition of the G.726
* Section 4 "Computational Details".
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <limits.h>
@@ -31,20 +33,19 @@
* instead of simply using 32bit integer arithmetic.
*/
typedef struct Float11 {
- int sign; /**< 1bit sign */
- int exp; /**< 4bit exponent */
- int mant; /**< 6bit mantissa */
+ int sign; /**< 1bit sign */
+ int exp; /**< 4bit exponent */
+ int mant; /**< 6bit mantissa */
} Float11;
static inline Float11* i2f(int16_t i, Float11* f)
{
- f->sign = (i < 0);
- if (f->sign)
- i = -i;
- f->exp = av_log2_16bit(i) + !!i;
- f->mant = i? (i<<6) >> f->exp :
- 1<<5;
- return f;
+ f->sign = (i < 0);
+ if (f->sign)
+ i = -i;
+ f->exp = av_log2_16bit(i) + !!i;
+ f->mant = i? (i<<6) >> f->exp : 1<<5;
+ return f;
}
static inline int16_t mult(Float11* f1, Float11* f2)
@@ -59,39 +60,39 @@ static inline int16_t mult(Float11* f1, Float11* f2)
static inline int sgn(int value)
{
- return (value < 0) ? -1 : 1;
+ return (value < 0) ? -1 : 1;
}
typedef struct G726Tables {
- int bits; /**< bits per sample */
- int* quant; /**< quantization table */
- int* iquant; /**< inverse quantization table */
- int* W; /**< special table #1 ;-) */
- int* F; /**< special table #2 */
+ int bits; /**< bits per sample */
+ int* quant; /**< quantization table */
+ int* iquant; /**< inverse quantization table */
+ int* W; /**< special table #1 ;-) */
+ int* F; /**< special table #2 */
} G726Tables;
typedef struct G726Context {
- G726Tables* tbls; /**< static tables needed for computation */
-
- Float11 sr[2]; /**< prev. reconstructed samples */
- Float11 dq[6]; /**< prev. difference */
- int a[2]; /**< second order predictor coeffs */
- int b[6]; /**< sixth order predictor coeffs */
- int pk[2]; /**< signs of prev. 2 sez + dq */
-
- int ap; /**< scale factor control */
- int yu; /**< fast scale factor */
- int yl; /**< slow scale factor */
- int dms; /**< short average magnitude of F[i] */
- int dml; /**< long average magnitude of F[i] */
- int td; /**< tone detect */
-
- int se; /**< estimated signal for the next iteration */
- int sez; /**< estimated second order prediction */
- int y; /**< quantizer scaling factor for the next iteration */
+ G726Tables* tbls; /**< static tables needed for computation */
+
+ Float11 sr[2]; /**< prev. reconstructed samples */
+ Float11 dq[6]; /**< prev. difference */
+ int a[2]; /**< second order predictor coeffs */
+ int b[6]; /**< sixth order predictor coeffs */
+ int pk[2]; /**< signs of prev. 2 sez + dq */
+
+ int ap; /**< scale factor control */
+ int yu; /**< fast scale factor */
+ int yl; /**< slow scale factor */
+ int dms; /**< short average magnitude of F[i] */
+ int dml; /**< long average magnitude of F[i] */
+ int td; /**< tone detect */
+
+ int se; /**< estimated signal for the next iteration */
+ int sez; /**< estimated second order prediction */
+ int y; /**< quantizer scaling factor for the next iteration */
} G726Context;
-static int quant_tbl16[] = /**< 16kbit/s 2bits per sample */
+static int quant_tbl16[] = /**< 16kbit/s 2bits per sample */
{ 260, INT_MAX };
static int iquant_tbl16[] =
{ 116, 365, 365, 116 };
@@ -100,7 +101,7 @@ static int W_tbl16[] =
static int F_tbl16[] =
{ 0, 7, 7, 0 };
-static int quant_tbl24[] = /**< 24kbit/s 3bits per sample */
+static int quant_tbl24[] = /**< 24kbit/s 3bits per sample */
{ 7, 217, 330, INT_MAX };
static int iquant_tbl24[] =
{ INT_MIN, 135, 273, 373, 373, 273, 135, INT_MIN };
@@ -109,7 +110,7 @@ static int W_tbl24[] =
static int F_tbl24[] =
{ 0, 1, 2, 7, 7, 2, 1, 0 };
-static int quant_tbl32[] = /**< 32kbit/s 4bits per sample */
+static int quant_tbl32[] = /**< 32kbit/s 4bits per sample */
{ -125, 79, 177, 245, 299, 348, 399, INT_MAX };
static int iquant_tbl32[] =
{ INT_MIN, 4, 135, 213, 273, 323, 373, 425,
@@ -120,7 +121,7 @@ static int W_tbl32[] =
static int F_tbl32[] =
{ 0, 0, 0, 1, 1, 1, 3, 7, 7, 3, 1, 1, 1, 0, 0, 0 };
-static int quant_tbl40[] = /**< 40kbit/s 5bits per sample */
+static int quant_tbl40[] = /**< 40kbit/s 5bits per sample */
{ -122, -16, 67, 138, 197, 249, 297, 338,
377, 412, 444, 474, 501, 527, 552, INT_MAX };
static int iquant_tbl40[] =
@@ -149,25 +150,25 @@ static G726Tables G726Tables_pool[] =
*/
static inline uint8_t quant(G726Context* c, int d)
{
- int sign, exp, i, dln;
+ int sign, exp, i, dln;
- sign = i = 0;
- if (d < 0) {
- sign = 1;
- d = -d;
- }
- exp = av_log2_16bit(d);
- dln = ((exp<<7) + (((d<<7)>>exp)&0x7f)) - (c->y>>2);
+ sign = i = 0;
+ if (d < 0) {
+ sign = 1;
+ d = -d;
+ }
+ exp = av_log2_16bit(d);
+ dln = ((exp<<7) + (((d<<7)>>exp)&0x7f)) - (c->y>>2);
- while (c->tbls->quant[i] < INT_MAX && c->tbls->quant[i] < dln)
+ while (c->tbls->quant[i] < INT_MAX && c->tbls->quant[i] < dln)
++i;
- if (sign)
- i = ~i;
- if (c->tbls->bits != 2 && i == 0) /* I'm not sure this is a good idea */
- i = 0xff;
+ if (sign)
+ i = ~i;
+ if (c->tbls->bits != 2 && i == 0) /* I'm not sure this is a good idea */
+ i = 0xff;
- return i;
+ return i;
}
/**
@@ -209,7 +210,7 @@ static inline int16_t g726_iterate(G726Context* c, int16_t I)
c->a[0] = 0;
c->a[1] = 0;
for (i=0; i<6; i++)
- c->b[i] = 0;
+ c->b[i] = 0;
} else {
/* This is a bit crazy, but it really is +255 not +256 */
fa1 = clip((-c->a[0]*c->pk[0]*pk0)>>5, -256, 255);
@@ -220,7 +221,7 @@ static inline int16_t g726_iterate(G726Context* c, int16_t I)
c->a[0] = clip(c->a[0], -(15360 - c->a[1]), 15360 - c->a[1]);
for (i=0; i<6; i++)
- c->b[i] += 128*dq0*sgn(-c->dq[i].sign) - (c->b[i]>>8);
+ c->b[i] += 128*dq0*sgn(-c->dq[i].sign) - (c->b[i]>>8);
}
/* Update Dq and Sr and Pk */
@@ -229,7 +230,7 @@ static inline int16_t g726_iterate(G726Context* c, int16_t I)
c->sr[1] = c->sr[0];
i2f(re_signal, &c->sr[0]);
for (i=5; i>0; i--)
- c->dq[i] = c->dq[i-1];
+ c->dq[i] = c->dq[i-1];
i2f(dq, &c->dq[0]);
c->dq[0].sign = I >> (c->tbls->bits - 1); /* Isn't it crazy ?!?! */
@@ -240,11 +241,11 @@ static inline int16_t g726_iterate(G726Context* c, int16_t I)
c->dms += ((c->tbls->F[I]<<9) - c->dms) >> 5;
c->dml += ((c->tbls->F[I]<<11) - c->dml) >> 7;
if (tr)
- c->ap = 256;
+ c->ap = 256;
else if (c->y > 1535 && !c->td && (abs((c->dms << 2) - c->dml) < (c->dml >> 3)))
- c->ap += (-c->ap) >> 4;
+ c->ap += (-c->ap) >> 4;
else
- c->ap += (0x200 - c->ap) >> 4;
+ c->ap += (0x200 - c->ap) >> 4;
/* Update Yu and Yl */
c->yu = clip(c->y + (((c->tbls->W[I] << 5) - c->y) >> 5), 544, 5120);
@@ -257,10 +258,10 @@ static inline int16_t g726_iterate(G726Context* c, int16_t I)
/* Next iteration for SE and SEZ */
c->se = 0;
for (i=0; i<6; i++)
- c->se += mult(i2f(c->b[i] >> 2, &f), &c->dq[i]);
+ c->se += mult(i2f(c->b[i] >> 2, &f), &c->dq[i]);
c->sez = c->se >> 1;
for (i=0; i<2; i++)
- c->se += mult(i2f(c->a[i] >> 2, &f), &c->sr[i]);
+ c->se += mult(i2f(c->a[i] >> 2, &f), &c->sr[i]);
c->se >>= 1;
return clip(re_signal << 2, -0xffff, 0xffff);
@@ -272,13 +273,13 @@ static int g726_reset(G726Context* c, int bit_rate)
c->tbls = &G726Tables_pool[bit_rate/8000 - 2];
for (i=0; i<2; i++) {
- i2f(0, &c->sr[i]);
- c->a[i] = 0;
- c->pk[i] = 1;
+ i2f(0, &c->sr[i]);
+ c->a[i] = 0;
+ c->pk[i] = 1;
}
for (i=0; i<6; i++) {
- i2f(0, &c->dq[i]);
- c->b[i] = 0;
+ i2f(0, &c->dq[i]);
+ c->b[i] = 0;
}
c->ap = 0;
c->dms = 0;
@@ -299,22 +300,24 @@ static int16_t g726_decode(G726Context* c, int16_t i)
return g726_iterate(c, i);
}
+#ifdef CONFIG_ENCODERS
static int16_t g726_encode(G726Context* c, int16_t sig)
{
- uint8_t i;
+ uint8_t i;
- i = quant(c, sig/4 - c->se) & ((1<<c->tbls->bits) - 1);
- g726_iterate(c, i);
- return i;
+ i = quant(c, sig/4 - c->se) & ((1<<c->tbls->bits) - 1);
+ g726_iterate(c, i);
+ return i;
}
+#endif
/* Interfacing to the libavcodec */
typedef struct AVG726Context {
- G726Context c;
- int bits_left;
- int bit_buffer;
- int code_size;
+ G726Context c;
+ int bits_left;
+ int bit_buffer;
+ int code_size;
} AVG726Context;
static int g726_init(AVCodecContext * avctx)
@@ -350,6 +353,7 @@ static int g726_close(AVCodecContext *avctx)
return 0;
}
+#ifdef CONFIG_ENCODERS
static int g726_encode_frame(AVCodecContext *avctx,
uint8_t *dst, int buf_size, void *data)
{
@@ -360,12 +364,13 @@ static int g726_encode_frame(AVCodecContext *avctx,
init_put_bits(&pb, dst, 1024*1024);
for (; buf_size; buf_size--)
- put_bits(&pb, c->code_size, g726_encode(&c->c, *samples++));
+ put_bits(&pb, c->code_size, g726_encode(&c->c, *samples++));
flush_put_bits(&pb);
return put_bits_count(&pb)>>3;
}
+#endif
static int g726_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
diff --git a/contrib/ffmpeg/libavcodec/gif.c b/contrib/ffmpeg/libavcodec/gif.c
new file mode 100644
index 000000000..f67ab52c2
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/gif.c
@@ -0,0 +1,350 @@
+/*
+ * GIF encoder.
+ * Copyright (c) 2000 Fabrice Bellard.
+ * Copyright (c) 2002 Francois Revol.
+ * Copyright (c) 2006 Baptiste Coudurier.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * First version by Francois Revol revol@free.fr
+ *
+ * Features and limitations:
+ * - currently no compression is performed,
+ * in fact the size of the data is 9/8 the size of the image in 8bpp
+ * - uses only a global standard palette
+ * - tested with IE 5.0, Opera for BeOS, NetPositive (BeOS), and Mozilla (BeOS).
+ *
+ * Reference documents:
+ * http://www.goice.co.jp/member/mo/formats/gif.html
+ * http://astronomy.swin.edu.au/pbourke/dataformats/gif/
+ * http://www.dcs.ed.ac.uk/home/mxr/gfx/2d/GIF89a.txt
+ *
+ * this url claims to have an LZW algorithm not covered by Unisys patent:
+ * http://www.msg.net/utility/whirlgif/gifencod.html
+ * could help reduce the size of the files _a lot_...
+ * some sites mentions an RLE type compression also.
+ */
+
+#include "avcodec.h"
+#include "bytestream.h"
+#include "bitstream.h"
+
+/* bitstream minipacket size */
+#define GIF_CHUNKS 100
+
+/* slows down the decoding (and some browsers don't like it) */
+/* update on the 'some browsers don't like it issue from above: this was probably due to missing 'Data Sub-block Terminator' (byte 19) in the app_header */
+#define GIF_ADD_APP_HEADER // required to enable looping of animated gif
+
+typedef struct {
+ unsigned char r;
+ unsigned char g;
+ unsigned char b;
+} rgb_triplet;
+
+/* we use the standard 216 color palette */
+
+/* this script was used to create the palette:
+ * for r in 00 33 66 99 cc ff; do for g in 00 33 66 99 cc ff; do echo -n " "; for b in 00 33 66 99 cc ff; do
+ * echo -n "{ 0x$r, 0x$g, 0x$b }, "; done; echo ""; done; done
+ */
+
+static const rgb_triplet gif_clut[216] = {
+ { 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x33 }, { 0x00, 0x00, 0x66 }, { 0x00, 0x00, 0x99 }, { 0x00, 0x00, 0xcc }, { 0x00, 0x00, 0xff },
+ { 0x00, 0x33, 0x00 }, { 0x00, 0x33, 0x33 }, { 0x00, 0x33, 0x66 }, { 0x00, 0x33, 0x99 }, { 0x00, 0x33, 0xcc }, { 0x00, 0x33, 0xff },
+ { 0x00, 0x66, 0x00 }, { 0x00, 0x66, 0x33 }, { 0x00, 0x66, 0x66 }, { 0x00, 0x66, 0x99 }, { 0x00, 0x66, 0xcc }, { 0x00, 0x66, 0xff },
+ { 0x00, 0x99, 0x00 }, { 0x00, 0x99, 0x33 }, { 0x00, 0x99, 0x66 }, { 0x00, 0x99, 0x99 }, { 0x00, 0x99, 0xcc }, { 0x00, 0x99, 0xff },
+ { 0x00, 0xcc, 0x00 }, { 0x00, 0xcc, 0x33 }, { 0x00, 0xcc, 0x66 }, { 0x00, 0xcc, 0x99 }, { 0x00, 0xcc, 0xcc }, { 0x00, 0xcc, 0xff },
+ { 0x00, 0xff, 0x00 }, { 0x00, 0xff, 0x33 }, { 0x00, 0xff, 0x66 }, { 0x00, 0xff, 0x99 }, { 0x00, 0xff, 0xcc }, { 0x00, 0xff, 0xff },
+ { 0x33, 0x00, 0x00 }, { 0x33, 0x00, 0x33 }, { 0x33, 0x00, 0x66 }, { 0x33, 0x00, 0x99 }, { 0x33, 0x00, 0xcc }, { 0x33, 0x00, 0xff },
+ { 0x33, 0x33, 0x00 }, { 0x33, 0x33, 0x33 }, { 0x33, 0x33, 0x66 }, { 0x33, 0x33, 0x99 }, { 0x33, 0x33, 0xcc }, { 0x33, 0x33, 0xff },
+ { 0x33, 0x66, 0x00 }, { 0x33, 0x66, 0x33 }, { 0x33, 0x66, 0x66 }, { 0x33, 0x66, 0x99 }, { 0x33, 0x66, 0xcc }, { 0x33, 0x66, 0xff },
+ { 0x33, 0x99, 0x00 }, { 0x33, 0x99, 0x33 }, { 0x33, 0x99, 0x66 }, { 0x33, 0x99, 0x99 }, { 0x33, 0x99, 0xcc }, { 0x33, 0x99, 0xff },
+ { 0x33, 0xcc, 0x00 }, { 0x33, 0xcc, 0x33 }, { 0x33, 0xcc, 0x66 }, { 0x33, 0xcc, 0x99 }, { 0x33, 0xcc, 0xcc }, { 0x33, 0xcc, 0xff },
+ { 0x33, 0xff, 0x00 }, { 0x33, 0xff, 0x33 }, { 0x33, 0xff, 0x66 }, { 0x33, 0xff, 0x99 }, { 0x33, 0xff, 0xcc }, { 0x33, 0xff, 0xff },
+ { 0x66, 0x00, 0x00 }, { 0x66, 0x00, 0x33 }, { 0x66, 0x00, 0x66 }, { 0x66, 0x00, 0x99 }, { 0x66, 0x00, 0xcc }, { 0x66, 0x00, 0xff },
+ { 0x66, 0x33, 0x00 }, { 0x66, 0x33, 0x33 }, { 0x66, 0x33, 0x66 }, { 0x66, 0x33, 0x99 }, { 0x66, 0x33, 0xcc }, { 0x66, 0x33, 0xff },
+ { 0x66, 0x66, 0x00 }, { 0x66, 0x66, 0x33 }, { 0x66, 0x66, 0x66 }, { 0x66, 0x66, 0x99 }, { 0x66, 0x66, 0xcc }, { 0x66, 0x66, 0xff },
+ { 0x66, 0x99, 0x00 }, { 0x66, 0x99, 0x33 }, { 0x66, 0x99, 0x66 }, { 0x66, 0x99, 0x99 }, { 0x66, 0x99, 0xcc }, { 0x66, 0x99, 0xff },
+ { 0x66, 0xcc, 0x00 }, { 0x66, 0xcc, 0x33 }, { 0x66, 0xcc, 0x66 }, { 0x66, 0xcc, 0x99 }, { 0x66, 0xcc, 0xcc }, { 0x66, 0xcc, 0xff },
+ { 0x66, 0xff, 0x00 }, { 0x66, 0xff, 0x33 }, { 0x66, 0xff, 0x66 }, { 0x66, 0xff, 0x99 }, { 0x66, 0xff, 0xcc }, { 0x66, 0xff, 0xff },
+ { 0x99, 0x00, 0x00 }, { 0x99, 0x00, 0x33 }, { 0x99, 0x00, 0x66 }, { 0x99, 0x00, 0x99 }, { 0x99, 0x00, 0xcc }, { 0x99, 0x00, 0xff },
+ { 0x99, 0x33, 0x00 }, { 0x99, 0x33, 0x33 }, { 0x99, 0x33, 0x66 }, { 0x99, 0x33, 0x99 }, { 0x99, 0x33, 0xcc }, { 0x99, 0x33, 0xff },
+ { 0x99, 0x66, 0x00 }, { 0x99, 0x66, 0x33 }, { 0x99, 0x66, 0x66 }, { 0x99, 0x66, 0x99 }, { 0x99, 0x66, 0xcc }, { 0x99, 0x66, 0xff },
+ { 0x99, 0x99, 0x00 }, { 0x99, 0x99, 0x33 }, { 0x99, 0x99, 0x66 }, { 0x99, 0x99, 0x99 }, { 0x99, 0x99, 0xcc }, { 0x99, 0x99, 0xff },
+ { 0x99, 0xcc, 0x00 }, { 0x99, 0xcc, 0x33 }, { 0x99, 0xcc, 0x66 }, { 0x99, 0xcc, 0x99 }, { 0x99, 0xcc, 0xcc }, { 0x99, 0xcc, 0xff },
+ { 0x99, 0xff, 0x00 }, { 0x99, 0xff, 0x33 }, { 0x99, 0xff, 0x66 }, { 0x99, 0xff, 0x99 }, { 0x99, 0xff, 0xcc }, { 0x99, 0xff, 0xff },
+ { 0xcc, 0x00, 0x00 }, { 0xcc, 0x00, 0x33 }, { 0xcc, 0x00, 0x66 }, { 0xcc, 0x00, 0x99 }, { 0xcc, 0x00, 0xcc }, { 0xcc, 0x00, 0xff },
+ { 0xcc, 0x33, 0x00 }, { 0xcc, 0x33, 0x33 }, { 0xcc, 0x33, 0x66 }, { 0xcc, 0x33, 0x99 }, { 0xcc, 0x33, 0xcc }, { 0xcc, 0x33, 0xff },
+ { 0xcc, 0x66, 0x00 }, { 0xcc, 0x66, 0x33 }, { 0xcc, 0x66, 0x66 }, { 0xcc, 0x66, 0x99 }, { 0xcc, 0x66, 0xcc }, { 0xcc, 0x66, 0xff },
+ { 0xcc, 0x99, 0x00 }, { 0xcc, 0x99, 0x33 }, { 0xcc, 0x99, 0x66 }, { 0xcc, 0x99, 0x99 }, { 0xcc, 0x99, 0xcc }, { 0xcc, 0x99, 0xff },
+ { 0xcc, 0xcc, 0x00 }, { 0xcc, 0xcc, 0x33 }, { 0xcc, 0xcc, 0x66 }, { 0xcc, 0xcc, 0x99 }, { 0xcc, 0xcc, 0xcc }, { 0xcc, 0xcc, 0xff },
+ { 0xcc, 0xff, 0x00 }, { 0xcc, 0xff, 0x33 }, { 0xcc, 0xff, 0x66 }, { 0xcc, 0xff, 0x99 }, { 0xcc, 0xff, 0xcc }, { 0xcc, 0xff, 0xff },
+ { 0xff, 0x00, 0x00 }, { 0xff, 0x00, 0x33 }, { 0xff, 0x00, 0x66 }, { 0xff, 0x00, 0x99 }, { 0xff, 0x00, 0xcc }, { 0xff, 0x00, 0xff },
+ { 0xff, 0x33, 0x00 }, { 0xff, 0x33, 0x33 }, { 0xff, 0x33, 0x66 }, { 0xff, 0x33, 0x99 }, { 0xff, 0x33, 0xcc }, { 0xff, 0x33, 0xff },
+ { 0xff, 0x66, 0x00 }, { 0xff, 0x66, 0x33 }, { 0xff, 0x66, 0x66 }, { 0xff, 0x66, 0x99 }, { 0xff, 0x66, 0xcc }, { 0xff, 0x66, 0xff },
+ { 0xff, 0x99, 0x00 }, { 0xff, 0x99, 0x33 }, { 0xff, 0x99, 0x66 }, { 0xff, 0x99, 0x99 }, { 0xff, 0x99, 0xcc }, { 0xff, 0x99, 0xff },
+ { 0xff, 0xcc, 0x00 }, { 0xff, 0xcc, 0x33 }, { 0xff, 0xcc, 0x66 }, { 0xff, 0xcc, 0x99 }, { 0xff, 0xcc, 0xcc }, { 0xff, 0xcc, 0xff },
+ { 0xff, 0xff, 0x00 }, { 0xff, 0xff, 0x33 }, { 0xff, 0xff, 0x66 }, { 0xff, 0xff, 0x99 }, { 0xff, 0xff, 0xcc }, { 0xff, 0xff, 0xff },
+};
+
+/* The GIF format uses reversed order for bitstreams... */
+/* at least they don't use PDP_ENDIAN :) */
+/* so we 'extend' PutBitContext. hmmm, OOP :) */
+/* seems this thing changed slightly since I wrote it... */
+
+#ifdef ALT_BITSTREAM_WRITER
+# error no ALT_BITSTREAM_WRITER support for now
+#endif
+
+static void gif_put_bits_rev(PutBitContext *s, int n, unsigned int value)
+{
+ unsigned int bit_buf;
+ int bit_cnt;
+
+ // printf("put_bits=%d %x\n", n, value);
+ assert(n == 32 || value < (1U << n));
+
+ bit_buf = s->bit_buf;
+ bit_cnt = 32 - s->bit_left; /* XXX:lazyness... was = s->bit_cnt; */
+
+ // printf("n=%d value=%x cnt=%d buf=%x\n", n, value, bit_cnt, bit_buf);
+ /* XXX: optimize */
+ if (n < (32-bit_cnt)) {
+ bit_buf |= value << (bit_cnt);
+ bit_cnt+=n;
+ } else {
+ bit_buf |= value << (bit_cnt);
+
+ *s->buf_ptr = bit_buf & 0xff;
+ s->buf_ptr[1] = (bit_buf >> 8) & 0xff;
+ s->buf_ptr[2] = (bit_buf >> 16) & 0xff;
+ s->buf_ptr[3] = (bit_buf >> 24) & 0xff;
+
+ //printf("bitbuf = %08x\n", bit_buf);
+ s->buf_ptr+=4;
+ if (s->buf_ptr >= s->buf_end)
+ puts("bit buffer overflow !!"); // should never happen ! who got rid of the callback ???
+// flush_buffer_rev(s);
+ bit_cnt=bit_cnt + n - 32;
+ if (bit_cnt == 0) {
+ bit_buf = 0;
+ } else {
+ bit_buf = value >> (n - bit_cnt);
+ }
+ }
+
+ s->bit_buf = bit_buf;
+ s->bit_left = 32 - bit_cnt;
+}
+
+/* pad the end of the output stream with zeros */
+static void gif_flush_put_bits_rev(PutBitContext *s)
+{
+ while (s->bit_left < 32) {
+ /* XXX: should test end of buffer */
+ *s->buf_ptr++=s->bit_buf & 0xff;
+ s->bit_buf>>=8;
+ s->bit_left+=8;
+ }
+// flush_buffer_rev(s);
+ s->bit_left=32;
+ s->bit_buf=0;
+}
+
+/* !RevPutBitContext */
+
+/* GIF header */
+static int gif_image_write_header(uint8_t **bytestream,
+ int width, int height, int loop_count,
+ uint32_t *palette)
+{
+ int i;
+ unsigned int v;
+
+ bytestream_put_buffer(bytestream, "GIF", 3);
+ bytestream_put_buffer(bytestream, "89a", 3);
+ bytestream_put_le16(bytestream, width);
+ bytestream_put_le16(bytestream, height);
+
+ bytestream_put_byte(bytestream, 0xf7); /* flags: global clut, 256 entries */
+ bytestream_put_byte(bytestream, 0x1f); /* background color index */
+ bytestream_put_byte(bytestream, 0); /* aspect ratio */
+
+ /* the global palette */
+ if (!palette) {
+ bytestream_put_buffer(bytestream, (const unsigned char *)gif_clut, 216*3);
+ for(i=0;i<((256-216)*3);i++)
+ bytestream_put_byte(bytestream, 0);
+ } else {
+ for(i=0;i<256;i++) {
+ v = palette[i];
+ bytestream_put_byte(bytestream, (v >> 16) & 0xff);
+ bytestream_put_byte(bytestream, (v >> 8) & 0xff);
+ bytestream_put_byte(bytestream, (v) & 0xff);
+ }
+ }
+
+ /* update: this is the 'NETSCAPE EXTENSION' that allows for looped animated gif
+ see http://members.aol.com/royalef/gifabout.htm#net-extension
+
+ byte 1 : 33 (hex 0x21) GIF Extension code
+ byte 2 : 255 (hex 0xFF) Application Extension Label
+ byte 3 : 11 (hex (0x0B) Length of Application Block
+ (eleven bytes of data to follow)
+ bytes 4 to 11 : "NETSCAPE"
+ bytes 12 to 14 : "2.0"
+ byte 15 : 3 (hex 0x03) Length of Data Sub-Block
+ (three bytes of data to follow)
+ byte 16 : 1 (hex 0x01)
+ bytes 17 to 18 : 0 to 65535, an unsigned integer in
+ lo-hi byte format. This indicate the
+ number of iterations the loop should
+ be executed.
+ bytes 19 : 0 (hex 0x00) a Data Sub-block Terminator
+ */
+
+ /* application extension header */
+#ifdef GIF_ADD_APP_HEADER
+ if (loop_count >= 0 && loop_count <= 65535) {
+ bytestream_put_byte(bytestream, 0x21);
+ bytestream_put_byte(bytestream, 0xff);
+ bytestream_put_byte(bytestream, 0x0b);
+ bytestream_put_buffer(bytestream, "NETSCAPE2.0", 11); // bytes 4 to 14
+ bytestream_put_byte(bytestream, 0x03); // byte 15
+ bytestream_put_byte(bytestream, 0x01); // byte 16
+ bytestream_put_le16(bytestream, (uint16_t)loop_count);
+ bytestream_put_byte(bytestream, 0x00); // byte 19
+ }
+#endif
+ return 0;
+}
+
+/* this is maybe slow, but allows for extensions */
+static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
+{
+ return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
+}
+
+
+static int gif_image_write_image(uint8_t **bytestream,
+ int x1, int y1, int width, int height,
+ const uint8_t *buf, int linesize, int pix_fmt)
+{
+ PutBitContext p;
+ uint8_t buffer[200]; /* 100 * 9 / 8 = 113 */
+ int i, left, w, v;
+ const uint8_t *ptr;
+ /* image block */
+
+ bytestream_put_byte(bytestream, 0x2c);
+ bytestream_put_le16(bytestream, x1);
+ bytestream_put_le16(bytestream, y1);
+ bytestream_put_le16(bytestream, width);
+ bytestream_put_le16(bytestream, height);
+ bytestream_put_byte(bytestream, 0x00); /* flags */
+ /* no local clut */
+
+ bytestream_put_byte(bytestream, 0x08);
+
+ left= width * height;
+
+ init_put_bits(&p, buffer, 130);
+
+/*
+ * the thing here is the bitstream is written as little packets, with a size byte before
+ * but it's still the same bitstream between packets (no flush !)
+ */
+ ptr = buf;
+ w = width;
+ while(left>0) {
+
+ gif_put_bits_rev(&p, 9, 0x0100); /* clear code */
+
+ for(i=(left<GIF_CHUNKS)?left:GIF_CHUNKS;i;i--) {
+ if (pix_fmt == PIX_FMT_RGB24) {
+ v = gif_clut_index(ptr[0], ptr[1], ptr[2]);
+ ptr+=3;
+ } else {
+ v = *ptr++;
+ }
+ gif_put_bits_rev(&p, 9, v);
+ if (--w == 0) {
+ w = width;
+ buf += linesize;
+ ptr = buf;
+ }
+ }
+
+ if(left<=GIF_CHUNKS) {
+ gif_put_bits_rev(&p, 9, 0x101); /* end of stream */
+ gif_flush_put_bits_rev(&p);
+ }
+ if(pbBufPtr(&p) - p.buf > 0) {
+ bytestream_put_byte(bytestream, pbBufPtr(&p) - p.buf); /* byte count of the packet */
+ bytestream_put_buffer(bytestream, p.buf, pbBufPtr(&p) - p.buf); /* the actual buffer */
+ p.buf_ptr = p.buf; /* dequeue the bytes off the bitstream */
+ }
+ left-=GIF_CHUNKS;
+ }
+ bytestream_put_byte(bytestream, 0x00); /* end of image block */
+ bytestream_put_byte(bytestream, 0x3b);
+ return 0;
+}
+
+typedef struct {
+ int64_t time, file_time;
+ uint8_t buffer[100]; /* data chunks */
+ AVFrame picture;
+} GIFContext;
+
+static int gif_encode_init(AVCodecContext *avctx)
+{
+ GIFContext *s = avctx->priv_data;
+
+ avctx->coded_frame = &s->picture;
+ return 0;
+}
+
+/* better than nothing gif encoder */
+static int gif_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data)
+{
+ GIFContext *s = avctx->priv_data;
+ AVFrame *pict = data;
+ AVFrame *const p = (AVFrame *)&s->picture;
+ uint8_t *outbuf_ptr = outbuf;
+
+ *p = *pict;
+ p->pict_type = FF_I_TYPE;
+ p->key_frame = 1;
+ gif_image_write_header(&outbuf_ptr, avctx->width, avctx->height, -1, (uint32_t *)pict->data[1]);
+ gif_image_write_image(&outbuf_ptr, 0, 0, avctx->width, avctx->height, pict->data[0], pict->linesize[0], PIX_FMT_PAL8);
+ return outbuf_ptr - outbuf;
+}
+
+AVCodec gif_encoder = {
+ "gif",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_GIF,
+ sizeof(GIFContext),
+ gif_encode_init,
+ gif_encode_frame,
+ NULL, //encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_PAL8, -1},
+};
diff --git a/contrib/ffmpeg/libavcodec/gifdec.c b/contrib/ffmpeg/libavcodec/gifdec.c
new file mode 100644
index 000000000..5a5712299
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/gifdec.c
@@ -0,0 +1,339 @@
+/*
+ * GIF decoder
+ * Copyright (c) 2003 Fabrice Bellard.
+ * Copyright (c) 2006 Baptiste Coudurier.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+//#define DEBUG
+
+#include "avcodec.h"
+#include "bytestream.h"
+#include "lzw.h"
+
+#define GCE_DISPOSAL_NONE 0
+#define GCE_DISPOSAL_INPLACE 1
+#define GCE_DISPOSAL_BACKGROUND 2
+#define GCE_DISPOSAL_RESTORE 3
+
+typedef struct GifState {
+ AVFrame picture;
+ int screen_width;
+ int screen_height;
+ int bits_per_pixel;
+ int background_color_index;
+ int transparent_color_index;
+ int color_resolution;
+ uint32_t *image_palette;
+
+ /* after the frame is displayed, the disposal method is used */
+ int gce_disposal;
+ /* delay during which the frame is shown */
+ int gce_delay;
+
+ /* LZW compatible decoder */
+ uint8_t *bytestream;
+ LZWState *lzw;
+
+ /* aux buffers */
+ uint8_t global_palette[256 * 3];
+ uint8_t local_palette[256 * 3];
+} GifState;
+
+static const uint8_t gif87a_sig[6] = "GIF87a";
+static const uint8_t gif89a_sig[6] = "GIF89a";
+
+static int gif_read_image(GifState *s)
+{
+ int left, top, width, height, bits_per_pixel, code_size, flags;
+ int is_interleaved, has_local_palette, y, pass, y1, linesize, n, i;
+ uint8_t *ptr, *spal, *palette, *ptr1;
+
+ left = bytestream_get_le16(&s->bytestream);
+ top = bytestream_get_le16(&s->bytestream);
+ width = bytestream_get_le16(&s->bytestream);
+ height = bytestream_get_le16(&s->bytestream);
+ flags = bytestream_get_byte(&s->bytestream);
+ is_interleaved = flags & 0x40;
+ has_local_palette = flags & 0x80;
+ bits_per_pixel = (flags & 0x07) + 1;
+#ifdef DEBUG
+ dprintf("gif: image x=%d y=%d w=%d h=%d\n", left, top, width, height);
+#endif
+
+ if (has_local_palette) {
+ bytestream_get_buffer(&s->bytestream, s->local_palette, 3 * (1 << bits_per_pixel));
+ palette = s->local_palette;
+ } else {
+ palette = s->global_palette;
+ bits_per_pixel = s->bits_per_pixel;
+ }
+
+ /* verify that all the image is inside the screen dimensions */
+ if (left + width > s->screen_width ||
+ top + height > s->screen_height)
+ return -EINVAL;
+
+ /* build the palette */
+ n = (1 << bits_per_pixel);
+ spal = palette;
+ for(i = 0; i < n; i++) {
+ s->image_palette[i] = (0xff << 24) |
+ (spal[0] << 16) | (spal[1] << 8) | (spal[2]);
+ spal += 3;
+ }
+ for(; i < 256; i++)
+ s->image_palette[i] = (0xff << 24);
+ /* handle transparency */
+ if (s->transparent_color_index >= 0)
+ s->image_palette[s->transparent_color_index] = 0;
+
+ /* now get the image data */
+ code_size = bytestream_get_byte(&s->bytestream);
+ //TODO: add proper data size
+ ff_lzw_decode_init(s->lzw, code_size, s->bytestream, 0, FF_LZW_GIF);
+
+ /* read all the image */
+ linesize = s->picture.linesize[0];
+ ptr1 = s->picture.data[0] + top * linesize + left;
+ ptr = ptr1;
+ pass = 0;
+ y1 = 0;
+ for (y = 0; y < height; y++) {
+ ff_lzw_decode(s->lzw, ptr, width);
+ if (is_interleaved) {
+ switch(pass) {
+ default:
+ case 0:
+ case 1:
+ y1 += 8;
+ ptr += linesize * 8;
+ if (y1 >= height) {
+ y1 = 4;
+ if (pass == 0)
+ ptr = ptr1 + linesize * 4;
+ else
+ ptr = ptr1 + linesize * 2;
+ pass++;
+ }
+ break;
+ case 2:
+ y1 += 4;
+ ptr += linesize * 4;
+ if (y1 >= height) {
+ y1 = 1;
+ ptr = ptr1 + linesize;
+ pass++;
+ }
+ break;
+ case 3:
+ y1 += 2;
+ ptr += linesize * 2;
+ break;
+ }
+ } else {
+ ptr += linesize;
+ }
+ }
+ /* read the garbage data until end marker is found */
+ ff_lzw_decode_tail(s->lzw);
+ s->bytestream = ff_lzw_cur_ptr(s->lzw);
+ return 0;
+}
+
+static int gif_read_extension(GifState *s)
+{
+ int ext_code, ext_len, i, gce_flags, gce_transparent_index;
+
+ /* extension */
+ ext_code = bytestream_get_byte(&s->bytestream);
+ ext_len = bytestream_get_byte(&s->bytestream);
+#ifdef DEBUG
+ dprintf("gif: ext_code=0x%x len=%d\n", ext_code, ext_len);
+#endif
+ switch(ext_code) {
+ case 0xf9:
+ if (ext_len != 4)
+ goto discard_ext;
+ s->transparent_color_index = -1;
+ gce_flags = bytestream_get_byte(&s->bytestream);
+ s->gce_delay = bytestream_get_le16(&s->bytestream);
+ gce_transparent_index = bytestream_get_byte(&s->bytestream);
+ if (gce_flags & 0x01)
+ s->transparent_color_index = gce_transparent_index;
+ else
+ s->transparent_color_index = -1;
+ s->gce_disposal = (gce_flags >> 2) & 0x7;
+#ifdef DEBUG
+ dprintf("gif: gce_flags=%x delay=%d tcolor=%d disposal=%d\n",
+ gce_flags, s->gce_delay,
+ s->transparent_color_index, s->gce_disposal);
+#endif
+ ext_len = bytestream_get_byte(&s->bytestream);
+ break;
+ }
+
+ /* NOTE: many extension blocks can come after */
+ discard_ext:
+ while (ext_len != 0) {
+ for (i = 0; i < ext_len; i++)
+ bytestream_get_byte(&s->bytestream);
+ ext_len = bytestream_get_byte(&s->bytestream);
+#ifdef DEBUG
+ dprintf("gif: ext_len1=%d\n", ext_len);
+#endif
+ }
+ return 0;
+}
+
+static int gif_read_header1(GifState *s)
+{
+ uint8_t sig[6];
+ int v, n;
+ int has_global_palette;
+
+ /* read gif signature */
+ bytestream_get_buffer(&s->bytestream, sig, 6);
+ if (memcmp(sig, gif87a_sig, 6) != 0 &&
+ memcmp(sig, gif89a_sig, 6) != 0)
+ return -1;
+
+ /* read screen header */
+ s->transparent_color_index = -1;
+ s->screen_width = bytestream_get_le16(&s->bytestream);
+ s->screen_height = bytestream_get_le16(&s->bytestream);
+ if( (unsigned)s->screen_width > 32767
+ || (unsigned)s->screen_height > 32767){
+ av_log(NULL, AV_LOG_ERROR, "picture size too large\n");
+ return -1;
+ }
+
+ v = bytestream_get_byte(&s->bytestream);
+ s->color_resolution = ((v & 0x70) >> 4) + 1;
+ has_global_palette = (v & 0x80);
+ s->bits_per_pixel = (v & 0x07) + 1;
+ s->background_color_index = bytestream_get_byte(&s->bytestream);
+ bytestream_get_byte(&s->bytestream); /* ignored */
+#ifdef DEBUG
+ dprintf("gif: screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
+ s->screen_width, s->screen_height, s->bits_per_pixel,
+ has_global_palette);
+#endif
+ if (has_global_palette) {
+ n = 1 << s->bits_per_pixel;
+ bytestream_get_buffer(&s->bytestream, s->global_palette, n * 3);
+ }
+ return 0;
+}
+
+static int gif_parse_next_image(GifState *s)
+{
+ int ret, code;
+
+ for (;;) {
+ code = bytestream_get_byte(&s->bytestream);
+#ifdef DEBUG
+ dprintf("gif: code=%02x '%c'\n", code, code);
+#endif
+ switch (code) {
+ case ',':
+ if (gif_read_image(s) < 0)
+ return -1;
+ ret = 0;
+ goto the_end;
+ case ';':
+ /* end of image */
+ ret = -1;
+ goto the_end;
+ case '!':
+ if (gif_read_extension(s) < 0)
+ return -1;
+ break;
+ case EOF:
+ default:
+ /* error or errneous EOF */
+ ret = -1;
+ goto the_end;
+ }
+ }
+ the_end:
+ return ret;
+}
+
+static int gif_decode_init(AVCodecContext *avctx)
+{
+ GifState *s = avctx->priv_data;
+
+ avcodec_get_frame_defaults(&s->picture);
+ avctx->coded_frame= &s->picture;
+ s->picture.data[0] = NULL;
+ ff_lzw_decode_open(&s->lzw);
+ return 0;
+}
+
+static int gif_decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size)
+{
+ GifState *s = avctx->priv_data;
+ AVFrame *picture = data;
+ int ret;
+
+ s->bytestream = buf;
+ if (gif_read_header1(s) < 0)
+ return -1;
+
+ avctx->pix_fmt = PIX_FMT_PAL8;
+ if (avcodec_check_dimensions(avctx, s->screen_width, s->screen_height))
+ return -1;
+ avcodec_set_dimensions(avctx, s->screen_width, s->screen_height);
+
+ if (s->picture.data[0])
+ avctx->release_buffer(avctx, &s->picture);
+ if (avctx->get_buffer(avctx, &s->picture) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+ s->image_palette = (uint32_t *)s->picture.data[1];
+ ret = gif_parse_next_image(s);
+ if (ret < 0)
+ return ret;
+
+ *picture = s->picture;
+ *data_size = sizeof(AVPicture);
+ return 0;
+}
+
+static int gif_decode_close(AVCodecContext *avctx)
+{
+ GifState *s = avctx->priv_data;
+
+ ff_lzw_decode_close(&s->lzw);
+ if(s->picture.data[0])
+ avctx->release_buffer(avctx, &s->picture);
+ return 0;
+}
+
+AVCodec gif_decoder = {
+ "gif",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_GIF,
+ sizeof(GifState),
+ gif_decode_init,
+ NULL,
+ gif_decode_close,
+ gif_decode_frame,
+};
diff --git a/src/libffmpeg/libavcodec/golomb.c b/contrib/ffmpeg/libavcodec/golomb.c
index c140b8b07..50df4fc40 100644
--- a/src/libffmpeg/libavcodec/golomb.c
+++ b/contrib/ffmpeg/libavcodec/golomb.c
@@ -2,18 +2,20 @@
* exp golomb vlc stuff
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/golomb.h b/contrib/ffmpeg/libavcodec/golomb.h
index a8221ec29..9bf7aec46 100644
--- a/src/libffmpeg/libavcodec/golomb.h
+++ b/contrib/ffmpeg/libavcodec/golomb.h
@@ -3,18 +3,20 @@
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2004 Alex Beregszaszi
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -443,6 +445,10 @@ static inline void set_ur_golomb_jpegls(PutBitContext *pb, int i, int k, int lim
if(k)
put_bits(pb, k, i&((1<<k)-1));
}else{
+ while(limit > 31) {
+ put_bits(pb, 31, 0);
+ limit -= 31;
+ }
put_bits(pb, limit , 1);
put_bits(pb, esc_len, i - 1);
}
diff --git a/src/libffmpeg/libavcodec/h261.c b/contrib/ffmpeg/libavcodec/h261.c
index e56978e61..8d4ca08cd 100644
--- a/src/libffmpeg/libavcodec/h261.c
+++ b/contrib/ffmpeg/libavcodec/h261.c
@@ -3,18 +3,20 @@
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2004 Maarten Daniels
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -75,7 +77,7 @@ void ff_h261_loop_filter(MpegEncContext *s){
s->dsp.h261_loop_filter(dest_cr, uvlinesize);
}
-static int ff_h261_get_picture_format(int width, int height){
+int ff_h261_get_picture_format(int width, int height){
// QCIF
if (width == 176 && height == 144)
return 0;
@@ -373,8 +375,6 @@ static VLC h261_mtype_vlc;
static VLC h261_mv_vlc;
static VLC h261_cbp_vlc;
-void init_vlc_rl(RLTable *rl, int use_static);
-
static void h261_decode_init_vlc(H261Context *h){
static int done = 0;
@@ -781,7 +781,14 @@ static int h261_decode_picture_header(H261Context *h){
}
/* temporal reference */
- s->picture_number = get_bits(&s->gb, 5); /* picture timestamp */
+ i= get_bits(&s->gb, 5); /* picture timestamp */
+ if(i < (s->picture_number&31))
+ i += 32;
+ s->picture_number = (s->picture_number&~31) + i;
+
+ s->avctx->time_base= (AVRational){1001, 30000};
+ s->current_picture.pts= s->picture_number;
+
/* PTYPE starts here */
skip_bits1(&s->gb); /* split screen off */
@@ -859,7 +866,6 @@ static int h261_find_frame_end(ParseContext *pc, AVCodecContext* avctx, const ui
state= (state<<8) | buf[i];
for(j=0; j<8; j++){
if(((state>>j)&0xFFFFF) == 0x00010){
- i++;
vop_found=1;
break;
}
@@ -999,10 +1005,6 @@ assert(s->current_picture.pict_type == s->pict_type);
*pict= *(AVFrame*)s->current_picture_ptr;
ff_print_debug_info(s, pict);
- /* Return the Picture timestamp as the frame number */
- /* we substract 1 because it is added on utils.c */
- avctx->frame_number = s->picture_number - 1;
-
*data_size = sizeof(AVFrame);
return get_consumed_bytes(s, buf_size);
@@ -1026,6 +1028,7 @@ AVCodec h261_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
#endif
diff --git a/src/libffmpeg/libavcodec/h261data.h b/contrib/ffmpeg/libavcodec/h261data.h
index 9ea991b23..2a93b73e3 100755..100644
--- a/src/libffmpeg/libavcodec/h261data.h
+++ b/contrib/ffmpeg/libavcodec/h261data.h
@@ -1,3 +1,24 @@
+/*
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ * copyright (c) 2004 Maarten Daniels
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file h261data.h
* H.261 tables.
@@ -5,7 +26,7 @@
#define MB_TYPE_H261_FIL 0x800000
// H.261 VLC table for macroblock addressing
-const uint8_t h261_mba_code[35] = {
+static const uint8_t h261_mba_code[35] = {
1, 3, 2, 3,
2, 3, 2, 7,
6, 11, 10, 9,
@@ -19,7 +40,7 @@ const uint8_t h261_mba_code[35] = {
1 //(start code)
};
-const uint8_t h261_mba_bits[35] = {
+static const uint8_t h261_mba_bits[35] = {
1, 3, 3, 4,
4, 5, 5, 7,
7, 8, 8, 8,
@@ -34,13 +55,13 @@ const uint8_t h261_mba_bits[35] = {
};
//H.261 VLC table for macroblock type
-const uint8_t h261_mtype_code[10] = {
+static const uint8_t h261_mtype_code[10] = {
1, 1, 1, 1,
1, 1, 1, 1,
1, 1
};
-const uint8_t h261_mtype_bits[10] = {
+static const uint8_t h261_mtype_bits[10] = {
4, 7, 1, 5,
9, 8, 10, 3,
2, 6
@@ -60,7 +81,7 @@ static const int h261_mtype_map[10]= {
};
//H.261 VLC table for motion vectors
-const uint8_t h261_mv_tab[17][2] = {
+static const uint8_t h261_mv_tab[17][2] = {
{1,1}, {1,2}, {1,3}, {1,4}, {3,6}, {5,7}, {4,7}, {3,7},
{11,9}, {10,9}, {9,9}, {17,10}, {16,10}, {15,10}, {14,10}, {13,10}, {12,10}
};
@@ -71,7 +92,7 @@ static const int mvmap[17] =
};
//H.261 VLC table for coded block pattern
-const uint8_t h261_cbp_tab[63][2] =
+static const uint8_t h261_cbp_tab[63][2] =
{
{11,5}, {9,5}, {13,6}, {13,4}, {23,7}, {19,7}, {31,8}, {12,4},
{22,7}, {18,7}, {30,8}, {19,5}, {27,8}, {23,8}, {19,8}, {11,4},
@@ -84,7 +105,7 @@ const uint8_t h261_cbp_tab[63][2] =
};
//H.261 VLC table for transform coefficients
-const uint16_t h261_tcoeff_vlc[65][2] = {
+static const uint16_t h261_tcoeff_vlc[65][2] = {
{ 0x2, 2 }, { 0x3, 2 },{ 0x4, 4 },{ 0x5, 5 },
{ 0x6, 7 },{ 0x26, 8 },{ 0x21, 8 },{ 0xa, 10 },
{ 0x1d, 12 },{ 0x18, 12 },{ 0x13, 12 },{ 0x10 , 12 },
@@ -104,7 +125,7 @@ const uint16_t h261_tcoeff_vlc[65][2] = {
{ 0x1, 6 } //escape
};
-const int8_t h261_tcoeff_level[64] = {
+static const int8_t h261_tcoeff_level[64] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
1, 2, 3, 4, 5, 6, 7, 1,
@@ -115,7 +136,7 @@ const int8_t h261_tcoeff_level[64] = {
1, 1, 1, 1, 1, 1, 1, 1
};
-const int8_t h261_tcoeff_run[64] = {
+static const int8_t h261_tcoeff_run[64] = {
0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1,
diff --git a/src/libffmpeg/libavcodec/h263.c b/contrib/ffmpeg/libavcodec/h263.c
index f88114f70..ba51c245a 100644
--- a/src/libffmpeg/libavcodec/h263.c
+++ b/contrib/ffmpeg/libavcodec/h263.c
@@ -5,18 +5,20 @@
* Copyright (c) 2001 Juan J. Sierralta P.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* ac prediction encoding, b-frame support, error resilience, optimizations,
@@ -60,6 +62,8 @@ static void h263p_encode_umotion(MpegEncContext * s, int val);
static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block,
int n, int dc, uint8_t *scan_table,
PutBitContext *dc_pb, PutBitContext *ac_pb);
+static int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
+ uint8_t *scan_table);
#endif
static int h263_decode_motion(MpegEncContext * s, int pred, int fcode);
@@ -69,10 +73,8 @@ static int h263_decode_block(MpegEncContext * s, DCTELEM * block,
static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr);
static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
int n, int coded, int intra, int rvlc);
-static int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
- uint8_t *scan_table);
-static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr);
#ifdef CONFIG_ENCODERS
+static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
static void mpeg4_encode_visual_object_header(MpegEncContext * s);
static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_number);
#endif //CONFIG_ENCODERS
@@ -111,7 +113,7 @@ max run: 29/41
#endif
#if 0 //3IV1 is quite rare and it slows things down a tiny bit
-#define IS_3IV1 s->avctx->codec_tag == ff_get_fourcc("3IV1")
+#define IS_3IV1 s->codec_tag == ff_get_fourcc("3IV1")
#else
#define IS_3IV1 0
#endif
@@ -210,7 +212,7 @@ void h263_encode_picture_header(MpegEncContext * s, int picture_number)
int div, error;
div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den);
div= clip(1, div, 127);
- error= ABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div);
+ error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div);
if(error < best_error){
best_error= error;
best_divisor= div;
@@ -556,7 +558,7 @@ void ff_clean_mpeg4_qscales(MpegEncContext *s){
#define tab_size ((signed)(sizeof(s->direct_scale_mv[0])/sizeof(int16_t)))
#define tab_bias (tab_size/2)
-static void ff_mpeg4_init_direct_mv(MpegEncContext *s){
+void ff_mpeg4_init_direct_mv(MpegEncContext *s){
int i;
for(i=0; i<tab_size; i++){
s->direct_scale_mv[0][i] = (i-tab_bias)*s->pb_time/s->pp_time;
@@ -1231,7 +1233,7 @@ void h263_encode_mb(MpegEncContext * s,
int cbpc, cbpy, i, cbp, pred_x, pred_y;
int16_t pred_dc;
int16_t rec_intradc[6];
- uint16_t *dc_ptr[6];
+ int16_t *dc_ptr[6];
const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1);
const int dquant_code[5]= {1,0,9,2,3};
@@ -1515,7 +1517,8 @@ void ff_h263_loop_filter(MpegEncContext * s){
}
}
-static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr)
+#ifdef CONFIG_ENCODERS
+static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
{
int x, y, wrap, a, c, pred_dc, scale;
int16_t *dc_val;
@@ -1559,6 +1562,7 @@ static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr)
*dc_val_ptr = &dc_val[x + y * wrap];
return pred_dc;
}
+#endif /* CONFIG_ENCODERS */
static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
{
@@ -2527,7 +2531,7 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
static inline int ff_mpeg4_pred_dc(MpegEncContext * s, int n, int level, int *dir_ptr, int encoding)
{
int a, b, c, wrap, pred, scale, ret;
- uint16_t *dc_val;
+ int16_t *dc_val;
/* find prediction */
if (n < 4) {
@@ -3184,20 +3188,29 @@ void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
* @return 0 if not
*/
static inline int mpeg4_is_resync(MpegEncContext *s){
- const int bits_count= get_bits_count(&s->gb);
+ int bits_count= get_bits_count(&s->gb);
+ int v= show_bits(&s->gb, 16);
if(s->workaround_bugs&FF_BUG_NO_PADDING){
return 0;
}
+ while(v<=0xFF){
+ if(s->pict_type==B_TYPE || (v>>(8-s->pict_type)!=1) || s->partitioned_frame)
+ break;
+ skip_bits(&s->gb, 8+s->pict_type);
+ bits_count+= 8+s->pict_type;
+ v= show_bits(&s->gb, 16);
+ }
+
if(bits_count + 8 >= s->gb.size_in_bits){
- int v= show_bits(&s->gb, 8);
+ v>>=8;
v|= 0x7F >> (7-(bits_count&7));
if(v==0x7F)
return 1;
}else{
- if(show_bits(&s->gb, 16) == ff_mpeg4_resync_prefix[bits_count&7]){
+ if(v == ff_mpeg4_resync_prefix[bits_count&7]){
int len;
GetBitContext gb= s->gb;
@@ -4516,12 +4529,6 @@ end:
/* per-MB end of slice check */
if(s->codec_id==CODEC_ID_MPEG4){
-#if 0 //http://standards.iso.org/ittf/PubliclyAvailableStandards/ISO_IEC_14496-4_2004_Conformance_Testing/video_conformance/version_1/simple/ERROR.ZIP/mit025.m4v needs this but its unclear if the mpeg4 standard allows this at all (MN)
- if(s->pict_type != B_TYPE){
- while(show_bits(&s->gb, 9 + (s->pict_type == P_TYPE)) == 1)
- skip_bits(&s->gb, 9 + (s->pict_type == P_TYPE));
- }
-#endif
if(mpeg4_is_resync(s)){
const int delta= s->mb_x + 1 == s->mb_width ? 2 : 1;
if(s->pict_type==B_TYPE && s->next_picture.mbskip_table[xy + delta])
@@ -4929,7 +4936,7 @@ static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
#if 0
if(s->error_resilience >= FF_ER_COMPLIANT){
- const int abs_level= ABS(level);
+ const int abs_level= FFABS(level);
if(abs_level<=MAX_LEVEL && run<=MAX_RUN){
const int run1= run - rl->max_run[last][abs_level] - 1;
if(abs_level <= rl->max_level[last][run]){
@@ -5290,7 +5297,7 @@ int h263_decode_picture_header(MpegEncContext *s)
);
}
#if 1
- if (s->pict_type == I_TYPE && s->avctx->codec_tag == ff_get_fourcc("ZYGO")){
+ if (s->pict_type == I_TYPE && s->codec_tag == ff_get_fourcc("ZYGO")){
int i,j;
for(i=0; i<85; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
av_log(s->avctx, AV_LOG_DEBUG, "\n");
@@ -5615,7 +5622,7 @@ static int decode_vol_header(MpegEncContext *s, GetBitContext *gb){
skip_bits1(gb); /* marker */
height = get_bits(gb, 13);
skip_bits1(gb); /* marker */
- if(width && height && !(s->width && s->avctx->codec_tag == ff_get_fourcc("MP4S"))){ /* they should be non zero but who knows ... */
+ if(width && height && !(s->width && s->codec_tag == ff_get_fourcc("MP4S"))){ /* they should be non zero but who knows ... */
s->width = width;
s->height = height;
// printf("width/height: %d %d\n", width, height);
@@ -5799,7 +5806,7 @@ static int decode_user_data(MpegEncContext *s, GetBitContext *gb){
int ver = 0, build = 0, ver2 = 0, ver3 = 0;
char last;
- for(i=0; i<255 && gb->index < gb->size_in_bits; i++){
+ for(i=0; i<255 && get_bits_count(gb) < gb->size_in_bits; i++){
if(show_bits(gb, 23) == 0) break;
buf[i]= get_bits(gb, 8);
}
@@ -5908,7 +5915,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
s->pb_field_time= ( ROUNDED_DIV(s->time, s->t_frame)
- ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2;
}
-//av_log(s->avctx, AV_LOG_DEBUG, "last nonb %Ld last_base %d time %Ld pp %d pb %d t %d ppf %d pbf %d\n", s->last_non_b_time, s->last_time_base, s->time, s->pp_time, s->pb_time, s->t_frame, s->pp_field_time, s->pb_field_time);
+//av_log(s->avctx, AV_LOG_DEBUG, "last nonb %"PRId64" last_base %d time %"PRId64" pp %d pb %d t %d ppf %d pbf %d\n", s->last_non_b_time, s->last_time_base, s->time, s->pp_time, s->pb_time, s->t_frame, s->pp_field_time, s->pb_field_time);
if(s->avctx->time_base.num)
s->current_picture_ptr->pts= (s->time + s->avctx->time_base.num/2) / s->avctx->time_base.num;
@@ -5925,7 +5932,7 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
av_log(s->avctx, AV_LOG_ERROR, "vop not coded\n");
return FRAME_SKIPPED;
}
-//printf("time %d %d %d || %Ld %Ld %Ld\n", s->time_increment_bits, s->avctx->time_base.den, s->time_base,
+//printf("time %d %d %d || %"PRId64" %"PRId64" %"PRId64"\n", s->time_increment_bits, s->avctx->time_base.den, s->time_base,
//s->time, s->last_non_b_time, s->last_non_b_time - s->pp_time);
if (s->shape != BIN_ONLY_SHAPE && ( s->pict_type == P_TYPE
|| (s->pict_type == S_TYPE && s->vol_sprite_usage==GMC_SPRITE))) {
@@ -6059,7 +6066,7 @@ int ff_mpeg4_decode_picture_header(MpegEncContext * s, GetBitContext *gb)
/* search next start code */
align_get_bits(gb);
- if(s->avctx->codec_tag == ff_get_fourcc("WV1F") && show_bits(gb, 24) == 0x575630){
+ if(s->codec_tag == ff_get_fourcc("WV1F") && show_bits(gb, 24) == 0x575630){
skip_bits(gb, 24);
if(get_bits(gb, 8) == 0xF0)
return decode_vop_header(s, gb);
diff --git a/src/libffmpeg/libavcodec/h263data.h b/contrib/ffmpeg/libavcodec/h263data.h
index 01bcaedb4..5eddc3b54 100644
--- a/src/libffmpeg/libavcodec/h263data.h
+++ b/contrib/ffmpeg/libavcodec/h263data.h
@@ -1,3 +1,26 @@
+/*
+ * copyright (c) 2000,2001 Fabrice Bellard
+ * H263+ support
+ * copyright (c) 2001 Juan J. Sierralta P.
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file h263data.h
* H.263 tables.
@@ -65,7 +88,7 @@ static const int h263_mb_type_b_map[15]= {
MB_TYPE_INTRA4x4 | MB_TYPE_CBP | MB_TYPE_QUANT,
};
-const uint8_t cbpc_b_tab[4][2] = {
+static const uint8_t cbpc_b_tab[4][2] = {
{0, 1},
{2, 2},
{7, 3},
@@ -157,7 +180,7 @@ static RLTable rl_inter = {
inter_level,
};
-const uint16_t intra_vlc_aic[103][2] = {
+static const uint16_t intra_vlc_aic[103][2] = {
{ 0x2, 2 }, { 0x6, 3 }, { 0xe, 4 }, { 0xc, 5 },
{ 0xd, 5 }, { 0x10, 6 }, { 0x11, 6 }, { 0x12, 6 },
{ 0x16, 7 }, { 0x1b, 8 }, { 0x20, 9 }, { 0x21, 9 },
@@ -186,7 +209,7 @@ const uint16_t intra_vlc_aic[103][2] = {
{ 0x59, 12 }, { 0x5a, 12 }, { 0x3, 7 },
};
-const int8_t intra_run_aic[102] = {
+static const int8_t intra_run_aic[102] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
@@ -202,7 +225,7 @@ const int8_t intra_run_aic[102] = {
18, 19, 20, 21, 22, 23,
};
-const int8_t intra_level_aic[102] = {
+static const int8_t intra_level_aic[102] = {
1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24,
diff --git a/src/libffmpeg/libavcodec/h263dec.c b/contrib/ffmpeg/libavcodec/h263dec.c
index b53192d74..66370c179 100644
--- a/src/libffmpeg/libavcodec/h263dec.c
+++ b/contrib/ffmpeg/libavcodec/h263dec.c
@@ -3,18 +3,20 @@
* Copyright (c) 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -516,19 +518,19 @@ retry:
avctx->has_b_frames= !s->low_delay;
if(s->xvid_build==0 && s->divx_version==0 && s->lavc_build==0){
- if(s->avctx->stream_codec_tag == ff_get_fourcc("XVID") ||
- s->avctx->codec_tag == ff_get_fourcc("XVID") || s->avctx->codec_tag == ff_get_fourcc("XVIX") ||
- s->avctx->codec_tag == ff_get_fourcc("RMP4"))
+ if(s->stream_codec_tag == ff_get_fourcc("XVID") ||
+ s->codec_tag == ff_get_fourcc("XVID") || s->codec_tag == ff_get_fourcc("XVIX") ||
+ s->codec_tag == ff_get_fourcc("RMP4"))
s->xvid_build= -1;
#if 0
- if(s->avctx->codec_tag == ff_get_fourcc("DIVX") && s->vo_type==0 && s->vol_control_parameters==1
+ if(s->codec_tag == ff_get_fourcc("DIVX") && s->vo_type==0 && s->vol_control_parameters==1
&& s->padding_bug_score > 0 && s->low_delay) // XVID with modified fourcc
s->xvid_build= -1;
#endif
}
if(s->xvid_build==0 && s->divx_version==0 && s->lavc_build==0){
- if(s->avctx->codec_tag == ff_get_fourcc("DIVX") && s->vo_type==0 && s->vol_control_parameters==0)
+ if(s->codec_tag == ff_get_fourcc("DIVX") && s->vo_type==0 && s->vol_control_parameters==0)
s->divx_version= 400; //divx 4
}
@@ -538,10 +540,10 @@ retry:
}
if(s->workaround_bugs&FF_BUG_AUTODETECT){
- if(s->avctx->codec_tag == ff_get_fourcc("XVIX"))
+ if(s->codec_tag == ff_get_fourcc("XVIX"))
s->workaround_bugs|= FF_BUG_XVID_ILACE;
- if(s->avctx->codec_tag == ff_get_fourcc("UMP4")){
+ if(s->codec_tag == ff_get_fourcc("UMP4")){
s->workaround_bugs|= FF_BUG_UMP4;
}
@@ -693,6 +695,17 @@ retry:
s->next_p_frame_damaged=0;
}
+ if((s->avctx->flags2 & CODEC_FLAG2_FAST) && s->pict_type==B_TYPE){
+ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
+ }else if((!s->no_rounding) || s->pict_type==B_TYPE){
+ s->me.qpel_put= s->dsp.put_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
+ }else{
+ s->me.qpel_put= s->dsp.put_no_rnd_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
+ }
+
if(MPV_frame_start(s, avctx) < 0)
return -1;
@@ -785,7 +798,7 @@ assert(s->current_picture.pict_type == s->pict_type);
avctx->frame_number = s->picture_number - 1;
#ifdef PRINT_FRAME_TIME
-av_log(avctx, AV_LOG_DEBUG, "%Ld\n", rdtsc()-time);
+av_log(avctx, AV_LOG_DEBUG, "%"PRId64"\n", rdtsc()-time);
#endif
return get_consumed_bytes(s, buf_size);
diff --git a/src/libffmpeg/libavcodec/h264.c b/contrib/ffmpeg/libavcodec/h264.c
index 1a7fb76b4..ad23ae120 100644
--- a/src/libffmpeg/libavcodec/h264.c
+++ b/contrib/ffmpeg/libavcodec/h264.c
@@ -2,18 +2,20 @@
* H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -371,6 +373,7 @@ typedef struct H264Context{
/* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0,1,2), 0x0? luma_cbp */
uint16_t *cbp_table;
+ int cbp;
int top_cbp;
int left_cbp;
/* chroma_pred_mode for i4x4 or i16x16, else 0 */
@@ -409,6 +412,7 @@ static VLC run7_vlc;
static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp);
static void svq3_add_idct_c(uint8_t *dst, DCTELEM *block, int stride, int qp, int dc);
static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
+static void filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
static always_inline uint32_t pack16to32(int a, int b){
#ifdef WORDS_BIGENDIAN
@@ -617,7 +621,7 @@ static void fill_caches(H264Context *h, int mb_type, int for_deblock){
if(USES_LIST(mb_type,list)){
uint32_t *src = (uint32_t*)s->current_picture.motion_val[list][h->mb2b_xy[mb_xy]];
uint32_t *dst = (uint32_t*)h->mv_cache[list][scan8[0]];
- uint8_t *ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
+ int8_t *ref = &s->current_picture.ref_index[list][h->mb2b8_xy[mb_xy]];
for(i=0; i<4; i++, dst+=8, src+=h->b_stride){
dst[0] = src[0];
dst[1] = src[1];
@@ -1131,7 +1135,7 @@ static inline int fetch_diagonal_mv(H264Context *h, const int16_t **C, int i, in
* make mbaff happy, so we can't move all this logic to fill_caches */
if(FRAME_MBAFF){
MpegEncContext *s = &h->s;
- const int *mb_types = s->current_picture_ptr->mb_type;
+ const uint32_t *mb_types = s->current_picture_ptr->mb_type;
const int16_t *mv;
*(uint32_t*)h->mv_cache[list][scan8[0]-2] = 0;
*C = h->mv_cache[list][scan8[0]-2];
@@ -1339,7 +1343,7 @@ static inline void direct_dist_scale_factor(H264Context * const h){
h->dist_scale_factor[i] = 256;
}else{
int tb = clip(poc - poc0, -128, 127);
- int tx = (16384 + (ABS(td) >> 1)) / td;
+ int tx = (16384 + (FFABS(td) >> 1)) / td;
h->dist_scale_factor[i] = clip((tb*tx + 32) >> 6, -1024, 1023);
}
}
@@ -1470,8 +1474,8 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
if(!IS_INTRA(mb_type_col)
- && ( (l1ref0[0] == 0 && ABS(l1mv0[0][0]) <= 1 && ABS(l1mv0[0][1]) <= 1)
- || (l1ref0[0] < 0 && l1ref1[0] == 0 && ABS(l1mv1[0][0]) <= 1 && ABS(l1mv1[0][1]) <= 1
+ && ( (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1)
+ || (l1ref0[0] < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1
&& (h->x264_build>33 || !h->x264_build)))){
if(ref[0] > 0)
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4);
@@ -1506,7 +1510,7 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
const int16_t (*l1mv)[2]= l1ref0[x8 + y8*h->b8_stride] == 0 ? l1mv0 : l1mv1;
if(IS_SUB_8X8(sub_mb_type)){
const int16_t *mv_col = l1mv[x8*3 + y8*3*h->b_stride];
- if(ABS(mv_col[0]) <= 1 && ABS(mv_col[1]) <= 1){
+ if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
if(ref[0] == 0)
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
if(ref[1] == 0)
@@ -1515,7 +1519,7 @@ static inline void pred_direct_motion(H264Context * const h, int *mb_type){
}else
for(i4=0; i4<4; i4++){
const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
- if(ABS(mv_col[0]) <= 1 && ABS(mv_col[1]) <= 1){
+ if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
if(ref[0] == 0)
*(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0;
if(ref[1] == 0)
@@ -1712,6 +1716,9 @@ static inline void write_back_motion(H264Context *h, int mb_type){
*(uint64_t*)s->current_picture.motion_val[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mv_cache[list][scan8[0]+2 + 8*y];
}
if( h->pps.cabac ) {
+ if(IS_SKIP(mb_type))
+ fill_rectangle(h->mvd_table[list][b_xy], 4, 4, h->b_stride, 0, 4);
+ else
for(y=0; y<4; y++){
*(uint64_t*)h->mvd_table[list][b_xy + 0 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+0 + 8*y];
*(uint64_t*)h->mvd_table[list][b_xy + 2 + y*h->b_stride]= *(uint64_t*)h->mvd_cache[list][scan8[0]+2 + 8*y];
@@ -1719,7 +1726,7 @@ static inline void write_back_motion(H264Context *h, int mb_type){
}
{
- uint8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
+ int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
ref_index[0+0*h->b8_stride]= h->ref_cache[list][scan8[0]];
ref_index[1+0*h->b8_stride]= h->ref_cache[list][scan8[4]];
ref_index[0+1*h->b8_stride]= h->ref_cache[list][scan8[8]];
@@ -2444,7 +2451,7 @@ static void pred16x16_128_dc_c(uint8_t *src, int stride){
static inline void pred16x16_plane_compat_c(uint8_t *src, int stride, const int svq3){
int i, j, k;
int a;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
const uint8_t * const src0 = src+7-stride;
const uint8_t *src1 = src+8*stride-1;
const uint8_t *src2 = src1-2*stride; // == src+6*stride-1;
@@ -2587,7 +2594,7 @@ static void pred8x8_dc_c(uint8_t *src, int stride){
static void pred8x8_plane_c(uint8_t *src, int stride){
int j, k;
int a;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
const uint8_t * const src0 = src+3-stride;
const uint8_t *src1 = src+4*stride-1;
const uint8_t *src2 = src1-2*stride; // == src+2*stride-1;
@@ -3142,7 +3149,7 @@ static void hl_motion(H264Context *h, uint8_t *dest_y, uint8_t *dest_cb, uint8_t
prefetch_motion(h, 1);
}
-static void decode_init_vlc(H264Context *h){
+static void decode_init_vlc(){
static int done = 0;
if (!done) {
@@ -3399,7 +3406,7 @@ static int decode_init(AVCodecContext *avctx){
s->low_delay= 1;
avctx->pix_fmt= PIX_FMT_YUV420P;
- decode_init_vlc(h);
+ decode_init_vlc();
if(avctx->extradata_size > 0 && avctx->extradata &&
*(char *)avctx->extradata == 1){
@@ -3632,6 +3639,9 @@ static void hl_decode_mb(H264Context *h){
dest_cb = s->current_picture.data[1] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
dest_cr = s->current_picture.data[2] + (mb_y * 8 * s->uvlinesize) + mb_x * 8;
+ s->dsp.prefetch(dest_y + (s->mb_x&3)*4*s->linesize + 64, s->linesize, 4);
+ s->dsp.prefetch(dest_cb + (s->mb_x&7)*s->uvlinesize + 64, dest_cr - dest_cb, 2);
+
if (MB_FIELD) {
linesize = h->mb_linesize = s->linesize * 2;
uvlinesize = h->mb_uvlinesize = s->uvlinesize * 2;
@@ -3780,8 +3790,8 @@ static void hl_decode_mb(H264Context *h){
xchg_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize, 0);
}else if(s->codec_id == CODEC_ID_H264){
hl_motion(h, dest_y, dest_cb, dest_cr,
- s->dsp.put_h264_qpel_pixels_tab, s->dsp.put_h264_chroma_pixels_tab,
- s->dsp.avg_h264_qpel_pixels_tab, s->dsp.avg_h264_chroma_pixels_tab,
+ s->me.qpel_put, s->dsp.put_h264_chroma_pixels_tab,
+ s->me.qpel_avg, s->dsp.avg_h264_chroma_pixels_tab,
s->dsp.weight_h264_pixels_tab, s->dsp.biweight_h264_pixels_tab);
}
@@ -3879,7 +3889,7 @@ static void hl_decode_mb(H264Context *h){
tprintf("call filter_mb\n");
backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
fill_caches(h, mb_type, 1); //FIXME don't fill stuff which isn't used by filter_mb
- filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
+ filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb, dest_cr, linesize, uvlinesize);
}
}
}
@@ -4203,7 +4213,7 @@ static void implicit_weight_table(H264Context *h){
int td = clip(poc1 - poc0, -128, 127);
if(td){
int tb = clip(cur_poc - poc0, -128, 127);
- int tx = (16384 + (ABS(td) >> 1)) / td;
+ int tx = (16384 + (FFABS(td) >> 1)) / td;
int dist_scale_factor = clip((tb*tx + 32) >> 6, -1024, 1023) >> 2;
if(dist_scale_factor < -64 || dist_scale_factor > 128)
h->implicit_weight[ref0][ref1] = 32;
@@ -4883,6 +4893,14 @@ static int decode_slice_header(H264Context *h){
);
}
+ if((s->avctx->flags2 & CODEC_FLAG2_FAST) && !s->current_picture.reference){
+ s->me.qpel_put= s->dsp.put_2tap_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_2tap_qpel_pixels_tab;
+ }else{
+ s->me.qpel_put= s->dsp.put_h264_qpel_pixels_tab;
+ s->me.qpel_avg= s->dsp.avg_h264_qpel_pixels_tab;
+ }
+
return 0;
}
@@ -5100,10 +5118,7 @@ static void decode_mb_skip(H264Context *h){
fill_caches(h, mb_type, 0); //FIXME check what is needed and what not ...
pred_direct_motion(h, &mb_type);
- if(h->pps.cabac){
- fill_rectangle(h->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 4);
- fill_rectangle(h->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 4);
- }
+ mb_type|= MB_TYPE_SKIP;
}
else
{
@@ -5114,12 +5129,10 @@ static void decode_mb_skip(H264Context *h){
pred_pskip_motion(h, &mx, &my);
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
fill_rectangle( h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mx,my), 4);
- if(h->pps.cabac)
- fill_rectangle(h->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 4);
}
write_back_motion(h, mb_type);
- s->current_picture.mb_type[mb_xy]= mb_type|MB_TYPE_SKIP;
+ s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.qscale_table[mb_xy]= s->qscale;
h->slice_table[ mb_xy ]= h->slice_num;
h->prev_mb_skipped= 1;
@@ -5184,7 +5197,7 @@ static int decode_mb_cavlc(H264Context *h){
assert(h->slice_type == I_TYPE);
decode_intra_mb:
if(mb_type > 25){
- av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice to large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
+ av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
return -1;
}
partition_count=0;
@@ -5478,6 +5491,7 @@ decode_intra_mb:
else
cbp= golomb_to_inter_cbp[cbp];
}
+ h->cbp = cbp;
if(dct8x8_allowed && (cbp&15) && !IS_INTRA(mb_type)){
if(get_bits1(&s->gb))
@@ -5619,7 +5633,7 @@ static int decode_cabac_field_decoding_flag(H264Context *h) {
ctx += 1;
}
- return get_cabac( &h->cabac, &h->cabac_state[70 + ctx] );
+ return get_cabac_noinline( &h->cabac, &h->cabac_state[70 + ctx] );
}
static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_slice) {
@@ -5635,11 +5649,11 @@ static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_sl
ctx++;
if( h->slice_table[mbb_xy] == h->slice_num && !IS_INTRA4x4( s->current_picture.mb_type[mbb_xy] ) )
ctx++;
- if( get_cabac( &h->cabac, &state[ctx] ) == 0 )
+ if( get_cabac_noinline( &h->cabac, &state[ctx] ) == 0 )
return 0; /* I4x4 */
state += 2;
}else{
- if( get_cabac( &h->cabac, &state[0] ) == 0 )
+ if( get_cabac_noinline( &h->cabac, &state[0] ) == 0 )
return 0; /* I4x4 */
}
@@ -5647,11 +5661,11 @@ static int decode_cabac_intra_mb_type(H264Context *h, int ctx_base, int intra_sl
return 25; /* PCM */
mb_type = 1; /* I16x16 */
- mb_type += 12 * get_cabac( &h->cabac, &state[1] ); /* cbp_luma != 0 */
- if( get_cabac( &h->cabac, &state[2] ) ) /* cbp_chroma */
- mb_type += 4 + 4 * get_cabac( &h->cabac, &state[2+intra_slice] );
- mb_type += 2 * get_cabac( &h->cabac, &state[3+intra_slice] );
- mb_type += 1 * get_cabac( &h->cabac, &state[3+2*intra_slice] );
+ mb_type += 12 * get_cabac_noinline( &h->cabac, &state[1] ); /* cbp_luma != 0 */
+ if( get_cabac_noinline( &h->cabac, &state[2] ) ) /* cbp_chroma */
+ mb_type += 4 + 4 * get_cabac_noinline( &h->cabac, &state[2+intra_slice] );
+ mb_type += 2 * get_cabac_noinline( &h->cabac, &state[3+intra_slice] );
+ mb_type += 1 * get_cabac_noinline( &h->cabac, &state[3+2*intra_slice] );
return mb_type;
}
@@ -5661,14 +5675,14 @@ static int decode_cabac_mb_type( H264Context *h ) {
if( h->slice_type == I_TYPE ) {
return decode_cabac_intra_mb_type(h, 3, 1);
} else if( h->slice_type == P_TYPE ) {
- if( get_cabac( &h->cabac, &h->cabac_state[14] ) == 0 ) {
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[14] ) == 0 ) {
/* P-type */
- if( get_cabac( &h->cabac, &h->cabac_state[15] ) == 0 ) {
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[15] ) == 0 ) {
/* P_L0_D16x16, P_8x8 */
- return 3 * get_cabac( &h->cabac, &h->cabac_state[16] );
+ return 3 * get_cabac_noinline( &h->cabac, &h->cabac_state[16] );
} else {
/* P_L0_D8x16, P_L0_D16x8 */
- return 2 - get_cabac( &h->cabac, &h->cabac_state[17] );
+ return 2 - get_cabac_noinline( &h->cabac, &h->cabac_state[17] );
}
} else {
return decode_cabac_intra_mb_type(h, 17, 0) + 5;
@@ -5684,17 +5698,17 @@ static int decode_cabac_mb_type( H264Context *h ) {
if( h->slice_table[mbb_xy] == h->slice_num && !IS_DIRECT( s->current_picture.mb_type[mbb_xy] ) )
ctx++;
- if( !get_cabac( &h->cabac, &h->cabac_state[27+ctx] ) )
+ if( !get_cabac_noinline( &h->cabac, &h->cabac_state[27+ctx] ) )
return 0; /* B_Direct_16x16 */
- if( !get_cabac( &h->cabac, &h->cabac_state[27+3] ) ) {
- return 1 + get_cabac( &h->cabac, &h->cabac_state[27+5] ); /* B_L[01]_16x16 */
+ if( !get_cabac_noinline( &h->cabac, &h->cabac_state[27+3] ) ) {
+ return 1 + get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ); /* B_L[01]_16x16 */
}
- bits = get_cabac( &h->cabac, &h->cabac_state[27+4] ) << 3;
- bits|= get_cabac( &h->cabac, &h->cabac_state[27+5] ) << 2;
- bits|= get_cabac( &h->cabac, &h->cabac_state[27+5] ) << 1;
- bits|= get_cabac( &h->cabac, &h->cabac_state[27+5] );
+ bits = get_cabac_noinline( &h->cabac, &h->cabac_state[27+4] ) << 3;
+ bits|= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ) << 2;
+ bits|= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] ) << 1;
+ bits|= get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] );
if( bits < 8 )
return bits + 3; /* B_Bi_16x16 through B_L1_L0_16x8 */
else if( bits == 13 ) {
@@ -5704,7 +5718,7 @@ static int decode_cabac_mb_type( H264Context *h ) {
else if( bits == 15 )
return 22; /* B_8x8 */
- bits= ( bits<<1 ) | get_cabac( &h->cabac, &h->cabac_state[27+5] );
+ bits= ( bits<<1 ) | get_cabac_noinline( &h->cabac, &h->cabac_state[27+5] );
return bits - 4; /* B_L0_Bi_* through B_Bi_Bi_* */
} else {
/* TODO SI/SP frames? */
@@ -5745,7 +5759,7 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
if( h->slice_type == B_TYPE )
ctx += 13;
- return get_cabac( &h->cabac, &h->cabac_state[11+ctx] );
+ return get_cabac_noinline( &h->cabac, &h->cabac_state[11+ctx] );
}
static int decode_cabac_mb_intra4x4_pred_mode( H264Context *h, int pred_mode ) {
@@ -5777,12 +5791,12 @@ static int decode_cabac_mb_chroma_pre_mode( H264Context *h) {
if( h->slice_table[mbb_xy] == h->slice_num && h->chroma_pred_mode_table[mbb_xy] != 0 )
ctx++;
- if( get_cabac( &h->cabac, &h->cabac_state[64+ctx] ) == 0 )
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+ctx] ) == 0 )
return 0;
- if( get_cabac( &h->cabac, &h->cabac_state[64+3] ) == 0 )
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+3] ) == 0 )
return 1;
- if( get_cabac( &h->cabac, &h->cabac_state[64+3] ) == 0 )
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[64+3] ) == 0 )
return 2;
else
return 3;
@@ -5859,13 +5873,13 @@ static int decode_cabac_mb_cbp_chroma( H264Context *h) {
ctx = 0;
if( cbp_a > 0 ) ctx++;
if( cbp_b > 0 ) ctx += 2;
- if( get_cabac( &h->cabac, &h->cabac_state[77 + ctx] ) == 0 )
+ if( get_cabac_noinline( &h->cabac, &h->cabac_state[77 + ctx] ) == 0 )
return 0;
ctx = 4;
if( cbp_a == 2 ) ctx++;
if( cbp_b == 2 ) ctx += 2;
- return 1 + get_cabac( &h->cabac, &h->cabac_state[77 + ctx] );
+ return 1 + get_cabac_noinline( &h->cabac, &h->cabac_state[77 + ctx] );
}
static int decode_cabac_mb_dqp( H264Context *h) {
MpegEncContext * const s = &h->s;
@@ -5881,7 +5895,7 @@ static int decode_cabac_mb_dqp( H264Context *h) {
if( h->last_qscale_diff != 0 )
ctx++;
- while( get_cabac( &h->cabac, &h->cabac_state[60 + ctx] ) ) {
+ while( get_cabac_noinline( &h->cabac, &h->cabac_state[60 + ctx] ) ) {
if( ctx < 2 )
ctx = 2;
else
@@ -5923,7 +5937,7 @@ static int decode_cabac_b_mb_sub_type( H264Context *h ) {
}
static inline int decode_cabac_mb_transform_size( H264Context *h ) {
- return get_cabac( &h->cabac, &h->cabac_state[399 + h->neighbor_transform_size] );
+ return get_cabac_noinline( &h->cabac, &h->cabac_state[399 + h->neighbor_transform_size] );
}
static int decode_cabac_mb_ref( H264Context *h, int list, int n ) {
@@ -5989,8 +6003,7 @@ static int decode_cabac_mb_mvd( H264Context *h, int list, int n, int l ) {
mvd += 1 << k;
}
}
- if( get_cabac_bypass( &h->cabac ) ) return -mvd;
- else return mvd;
+ return get_cabac_bypass_sign( &h->cabac, -mvd );
}
static int inline get_cabac_cbf_ctx( H264Context *h, int cat, int idx ) {
@@ -6021,6 +6034,13 @@ static int inline get_cabac_cbf_ctx( H264Context *h, int cat, int idx ) {
return ctx + 4 * cat;
}
+static const __attribute((used)) uint8_t last_coeff_flag_offset_8x8[63] = {
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8
+};
+
static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n, const uint8_t *scantable, const uint32_t *qmul, int max_coeff) {
const int mb_xy = h->s.mb_x + h->s.mb_y*h->s.mb_stride;
static const int significant_coeff_flag_offset[2][6] = {
@@ -6034,7 +6054,7 @@ static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n
static const int coeff_abs_level_m1_offset[6] = {
227+0, 227+10, 227+20, 227+30, 227+39, 426
};
- static const int significant_coeff_flag_offset_8x8[2][63] = {
+ static const uint8_t significant_coeff_flag_offset_8x8[2][63] = {
{ 0, 1, 2, 3, 4, 5, 5, 4, 4, 3, 3, 4, 4, 4, 5, 5,
4, 4, 4, 4, 3, 3, 6, 7, 7, 7, 8, 9,10, 9, 8, 7,
7, 6,11,12,13,11, 6, 7, 8, 9,14,10, 9, 8, 6,11,
@@ -6044,16 +6064,10 @@ static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n
9, 9,10,10, 8,11,12,11, 9, 9,10,10, 8,13,13, 9,
9,10,10, 8,13,13, 9, 9,10,10,14,14,14,14,14 }
};
- static const int last_coeff_flag_offset_8x8[63] = {
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4,
- 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8
- };
int index[64];
- int i, last;
+ int last;
int coeff_count = 0;
int abslevel1 = 1;
@@ -6063,6 +6077,20 @@ static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n
uint8_t *last_coeff_ctx_base;
uint8_t *abs_level_m1_ctx_base;
+#ifndef ARCH_X86
+#define CABAC_ON_STACK
+#endif
+#ifdef CABAC_ON_STACK
+#define CC &cc
+ CABACContext cc;
+ cc.range = h->cabac.range;
+ cc.low = h->cabac.low;
+ cc.bytestream= h->cabac.bytestream;
+#else
+#define CC &h->cabac
+#endif
+
+
/* cat: 0-> DC 16x16 n = 0
* 1-> AC 16x16 n = luma4x4idx
* 2-> Luma4x4 n = luma4x4idx
@@ -6073,12 +6101,16 @@ static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n
/* read coded block flag */
if( cat != 5 ) {
- if( get_cabac( &h->cabac, &h->cabac_state[85 + get_cabac_cbf_ctx( h, cat, n ) ] ) == 0 ) {
+ if( get_cabac( CC, &h->cabac_state[85 + get_cabac_cbf_ctx( h, cat, n ) ] ) == 0 ) {
if( cat == 1 || cat == 2 )
h->non_zero_count_cache[scan8[n]] = 0;
else if( cat == 4 )
h->non_zero_count_cache[scan8[16+n]] = 0;
-
+#ifdef CABAC_ON_STACK
+ h->cabac.range = cc.range ;
+ h->cabac.low = cc.low ;
+ h->cabac.bytestream= cc.bytestream;
+#endif
return 0;
}
}
@@ -6094,22 +6126,28 @@ static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n
#define DECODE_SIGNIFICANCE( coefs, sig_off, last_off ) \
for(last= 0; last < coefs; last++) { \
uint8_t *sig_ctx = significant_coeff_ctx_base + sig_off; \
- if( get_cabac( &h->cabac, sig_ctx )) { \
+ if( get_cabac( CC, sig_ctx )) { \
uint8_t *last_ctx = last_coeff_ctx_base + last_off; \
index[coeff_count++] = last; \
- if( get_cabac( &h->cabac, last_ctx ) ) { \
+ if( get_cabac( CC, last_ctx ) ) { \
last= max_coeff; \
break; \
} \
} \
+ }\
+ if( last == max_coeff -1 ) {\
+ index[coeff_count++] = last;\
}
- const int *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD];
+ const uint8_t *sig_off = significant_coeff_flag_offset_8x8[MB_FIELD];
+#if defined(ARCH_X86) && !(defined(PIC) && defined(__GNUC__))
+ coeff_count= decode_significance_8x8_x86(CC, significant_coeff_ctx_base, index, sig_off);
+ } else {
+ coeff_count= decode_significance_x86(CC, max_coeff, significant_coeff_ctx_base, index);
+#else
DECODE_SIGNIFICANCE( 63, sig_off[last], last_coeff_flag_offset_8x8[last] );
} else {
DECODE_SIGNIFICANCE( max_coeff - 1, last, last );
- }
- if( last == max_coeff -1 ) {
- index[coeff_count++] = last;
+#endif
}
assert(coeff_count > 0);
@@ -6126,51 +6164,54 @@ static int decode_cabac_residual( H264Context *h, DCTELEM *block, int cat, int n
fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, coeff_count, 1);
}
- for( i = coeff_count - 1; i >= 0; i-- ) {
+ for( coeff_count--; coeff_count >= 0; coeff_count-- ) {
uint8_t *ctx = (abslevelgt1 != 0 ? 0 : FFMIN( 4, abslevel1 )) + abs_level_m1_ctx_base;
- int j= scantable[index[i]];
+ int j= scantable[index[coeff_count]];
- if( get_cabac( &h->cabac, ctx ) == 0 ) {
+ if( get_cabac( CC, ctx ) == 0 ) {
if( !qmul ) {
- if( get_cabac_bypass( &h->cabac ) ) block[j] = -1;
- else block[j] = 1;
+ block[j] = get_cabac_bypass_sign( CC, -1);
}else{
- if( get_cabac_bypass( &h->cabac ) ) block[j] = (-qmul[j] + 32) >> 6;
- else block[j] = ( qmul[j] + 32) >> 6;
+ block[j] = (get_cabac_bypass_sign( CC, -qmul[j]) + 32) >> 6;;
}
abslevel1++;
} else {
int coeff_abs = 2;
ctx = 5 + FFMIN( 4, abslevelgt1 ) + abs_level_m1_ctx_base;
- while( coeff_abs < 15 && get_cabac( &h->cabac, ctx ) ) {
+ while( coeff_abs < 15 && get_cabac( CC, ctx ) ) {
coeff_abs++;
}
if( coeff_abs >= 15 ) {
int j = 0;
- while( get_cabac_bypass( &h->cabac ) ) {
- coeff_abs += 1 << j;
+ while( get_cabac_bypass( CC ) ) {
j++;
}
+ coeff_abs=1;
while( j-- ) {
- if( get_cabac_bypass( &h->cabac ) )
- coeff_abs += 1 << j ;
+ coeff_abs += coeff_abs + get_cabac_bypass( CC );
}
+ coeff_abs+= 14;
}
if( !qmul ) {
- if( get_cabac_bypass( &h->cabac ) ) block[j] = -coeff_abs;
+ if( get_cabac_bypass( CC ) ) block[j] = -coeff_abs;
else block[j] = coeff_abs;
}else{
- if( get_cabac_bypass( &h->cabac ) ) block[j] = (-coeff_abs * qmul[j] + 32) >> 6;
+ if( get_cabac_bypass( CC ) ) block[j] = (-coeff_abs * qmul[j] + 32) >> 6;
else block[j] = ( coeff_abs * qmul[j] + 32) >> 6;
}
abslevelgt1++;
}
}
+#ifdef CABAC_ON_STACK
+ h->cabac.range = cc.range ;
+ h->cabac.low = cc.low ;
+ h->cabac.bytestream= cc.bytestream;
+#endif
return 0;
}
@@ -6580,7 +6621,7 @@ decode_intra_mb:
cbp |= decode_cabac_mb_cbp_chroma( h ) << 4;
}
- h->cbp_table[mb_xy] = cbp;
+ h->cbp_table[mb_xy] = h->cbp = cbp;
if( dct8x8_allowed && (cbp&15) && !IS_INTRA( mb_type ) ) {
if( decode_cabac_mb_transform_size( h ) )
@@ -6640,8 +6681,10 @@ decode_intra_mb:
for( i4x4 = 0; i4x4 < 4; i4x4++ ) {
const int index = 4*i8x8 + i4x4;
//av_log( s->avctx, AV_LOG_ERROR, "Luma4x4: %d\n", index );
+//START_TIMER
if( decode_cabac_residual(h, h->mb + 16*index, 2, index, scan, h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale], 16) < 0 )
return -1;
+//STOP_TIMER("decode_residual")
}
} else {
uint8_t * const nnz= &h->non_zero_count_cache[ scan8[4*i8x8] ];
@@ -6694,16 +6737,16 @@ decode_intra_mb:
}
-static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
+static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) {
int i, d;
- const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
- const int alpha = alpha_table[index_a];
- const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
+ const int index_a = qp + h->slice_alpha_c0_offset;
+ const int alpha = (alpha_table+52)[index_a];
+ const int beta = (beta_table+52)[qp + h->slice_beta_offset];
if( bS[0] < 4 ) {
int8_t tc[4];
for(i=0; i<4; i++)
- tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] : -1;
+ tc[i] = bS[i] ? (tc0_table+52)[index_a][bS[i] - 1] : -1;
h->s.dsp.h264_h_loop_filter_luma(pix, stride, alpha, beta, tc);
} else {
/* 16px edge length, because bS=4 is triggered by being at
@@ -6717,12 +6760,12 @@ static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4]
const int q1 = pix[1];
const int q2 = pix[2];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
- if(ABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
- if( ABS( p2 - p0 ) < beta)
+ if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
+ if( FFABS( p2 - p0 ) < beta)
{
const int p3 = pix[-4];
/* p0', p1', p2' */
@@ -6733,7 +6776,7 @@ static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4]
/* p0' */
pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
}
- if( ABS( q2 - q0 ) < beta)
+ if( FFABS( q2 - q0 ) < beta)
{
const int q3 = pix[3];
/* q0', q1', q2' */
@@ -6755,23 +6798,23 @@ static void filter_mb_edgev( H264Context *h, uint8_t *pix, int stride, int bS[4]
}
}
}
-static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
+static void filter_mb_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) {
int i;
- const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
- const int alpha = alpha_table[index_a];
- const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
+ const int index_a = qp + h->slice_alpha_c0_offset;
+ const int alpha = (alpha_table+52)[index_a];
+ const int beta = (beta_table+52)[qp + h->slice_beta_offset];
if( bS[0] < 4 ) {
int8_t tc[4];
for(i=0; i<4; i++)
- tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] + 1 : 0;
+ tc[i] = bS[i] ? (tc0_table+52)[index_a][bS[i] - 1] + 1 : 0;
h->s.dsp.h264_h_loop_filter_chroma(pix, stride, alpha, beta, tc);
} else {
h->s.dsp.h264_h_loop_filter_chroma_intra(pix, stride, alpha, beta);
}
}
-static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int bS[8], int qp[2] ) {
+static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int16_t bS[8], int qp[2] ) {
int i;
for( i = 0; i < 16; i++, pix += stride) {
int index_a;
@@ -6790,12 +6833,12 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int
}
qp_index = MB_FIELD ? (i >> 3) : (i & 1);
- index_a = clip( qp[qp_index] + h->slice_alpha_c0_offset, 0, 51 );
- alpha = alpha_table[index_a];
- beta = beta_table[clip( qp[qp_index] + h->slice_beta_offset, 0, 51 )];
+ index_a = qp[qp_index] + h->slice_alpha_c0_offset;
+ alpha = (alpha_table+52)[index_a];
+ beta = (beta_table+52)[qp[qp_index] + h->slice_beta_offset];
if( bS[bS_index] < 4 ) {
- const int tc0 = tc0_table[index_a][bS[bS_index] - 1];
+ const int tc0 = (tc0_table+52)[index_a][bS[bS_index] - 1];
const int p0 = pix[-1];
const int p1 = pix[-2];
const int p2 = pix[-3];
@@ -6803,17 +6846,17 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int
const int q1 = pix[1];
const int q2 = pix[2];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
int tc = tc0;
int i_delta;
- if( ABS( p2 - p0 ) < beta ) {
+ if( FFABS( p2 - p0 ) < beta ) {
pix[-2] = p1 + clip( ( p2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( p1 << 1 ) ) >> 1, -tc0, tc0 );
tc++;
}
- if( ABS( q2 - q0 ) < beta ) {
+ if( FFABS( q2 - q0 ) < beta ) {
pix[1] = q1 + clip( ( q2 + ( ( p0 + q0 + 1 ) >> 1 ) - ( q1 << 1 ) ) >> 1, -tc0, tc0 );
tc++;
}
@@ -6832,12 +6875,12 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int
const int q1 = pix[1];
const int q2 = pix[2];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
- if(ABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
- if( ABS( p2 - p0 ) < beta)
+ if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
+ if( FFABS( p2 - p0 ) < beta)
{
const int p3 = pix[-4];
/* p0', p1', p2' */
@@ -6848,7 +6891,7 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int
/* p0' */
pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
}
- if( ABS( q2 - q0 ) < beta)
+ if( FFABS( q2 - q0 ) < beta)
{
const int q3 = pix[3];
/* q0', q1', q2' */
@@ -6869,7 +6912,7 @@ static void filter_mb_mbaff_edgev( H264Context *h, uint8_t *pix, int stride, int
}
}
}
-static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int bS[8], int qp[2] ) {
+static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[8], int qp[2] ) {
int i;
for( i = 0; i < 8; i++, pix += stride) {
int index_a;
@@ -6884,20 +6927,20 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in
}
qp_index = MB_FIELD ? (i >> 2) : (i & 1);
- index_a = clip( qp[qp_index] + h->slice_alpha_c0_offset, 0, 51 );
- alpha = alpha_table[index_a];
- beta = beta_table[clip( qp[qp_index] + h->slice_beta_offset, 0, 51 )];
+ index_a = qp[qp_index] + h->slice_alpha_c0_offset;
+ alpha = (alpha_table+52)[index_a];
+ beta = (beta_table+52)[qp[qp_index] + h->slice_beta_offset];
if( bS[bS_index] < 4 ) {
- const int tc = tc0_table[index_a][bS[bS_index] - 1] + 1;
+ const int tc = (tc0_table+52)[index_a][bS[bS_index] - 1] + 1;
const int p0 = pix[-1];
const int p1 = pix[-2];
const int q0 = pix[0];
const int q1 = pix[1];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
const int i_delta = clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
pix[-1] = clip_uint8( p0 + i_delta ); /* p0' */
@@ -6910,9 +6953,9 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in
const int q0 = pix[0];
const int q1 = pix[1];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
pix[-1] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */
pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */
@@ -6922,17 +6965,17 @@ static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, in
}
}
-static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
+static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) {
int i, d;
- const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
- const int alpha = alpha_table[index_a];
- const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
+ const int index_a = qp + h->slice_alpha_c0_offset;
+ const int alpha = (alpha_table+52)[index_a];
+ const int beta = (beta_table+52)[qp + h->slice_beta_offset];
const int pix_next = stride;
if( bS[0] < 4 ) {
int8_t tc[4];
for(i=0; i<4; i++)
- tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] : -1;
+ tc[i] = bS[i] ? (tc0_table+52)[index_a][bS[i] - 1] : -1;
h->s.dsp.h264_v_loop_filter_luma(pix, stride, alpha, beta, tc);
} else {
/* 16px edge length, see filter_mb_edgev */
@@ -6944,15 +6987,15 @@ static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4]
const int q1 = pix[1*pix_next];
const int q2 = pix[2*pix_next];
- if( ABS( p0 - q0 ) < alpha &&
- ABS( p1 - p0 ) < beta &&
- ABS( q1 - q0 ) < beta ) {
+ if( FFABS( p0 - q0 ) < alpha &&
+ FFABS( p1 - p0 ) < beta &&
+ FFABS( q1 - q0 ) < beta ) {
const int p3 = pix[-4*pix_next];
const int q3 = pix[ 3*pix_next];
- if(ABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
- if( ABS( p2 - p0 ) < beta) {
+ if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
+ if( FFABS( p2 - p0 ) < beta) {
/* p0', p1', p2' */
pix[-1*pix_next] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
pix[-2*pix_next] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
@@ -6961,7 +7004,7 @@ static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4]
/* p0' */
pix[-1*pix_next] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
}
- if( ABS( q2 - q0 ) < beta) {
+ if( FFABS( q2 - q0 ) < beta) {
/* q0', q1', q2' */
pix[0*pix_next] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
pix[1*pix_next] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
@@ -6982,22 +7025,130 @@ static void filter_mb_edgeh( H264Context *h, uint8_t *pix, int stride, int bS[4]
}
}
-static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int bS[4], int qp ) {
+static void filter_mb_edgech( H264Context *h, uint8_t *pix, int stride, int16_t bS[4], int qp ) {
int i;
- const int index_a = clip( qp + h->slice_alpha_c0_offset, 0, 51 );
- const int alpha = alpha_table[index_a];
- const int beta = beta_table[clip( qp + h->slice_beta_offset, 0, 51 )];
+ const int index_a = qp + h->slice_alpha_c0_offset;
+ const int alpha = (alpha_table+52)[index_a];
+ const int beta = (beta_table+52)[qp + h->slice_beta_offset];
if( bS[0] < 4 ) {
int8_t tc[4];
for(i=0; i<4; i++)
- tc[i] = bS[i] ? tc0_table[index_a][bS[i] - 1] + 1 : 0;
+ tc[i] = bS[i] ? (tc0_table+52)[index_a][bS[i] - 1] + 1 : 0;
h->s.dsp.h264_v_loop_filter_chroma(pix, stride, alpha, beta, tc);
} else {
h->s.dsp.h264_v_loop_filter_chroma_intra(pix, stride, alpha, beta);
}
}
+static void filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
+ MpegEncContext * const s = &h->s;
+ int mb_xy, mb_type;
+ int qp, qp0, qp1, qpc, qpc0, qpc1, qp_thresh;
+
+ if(mb_x==0 || mb_y==0 || !s->dsp.h264_loop_filter_strength) {
+ filter_mb(h, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
+ return;
+ }
+ assert(!FRAME_MBAFF);
+
+ mb_xy = mb_x + mb_y*s->mb_stride;
+ mb_type = s->current_picture.mb_type[mb_xy];
+ qp = s->current_picture.qscale_table[mb_xy];
+ qp0 = s->current_picture.qscale_table[mb_xy-1];
+ qp1 = s->current_picture.qscale_table[h->top_mb_xy];
+ qpc = get_chroma_qp( h->pps.chroma_qp_index_offset, qp );
+ qpc0 = get_chroma_qp( h->pps.chroma_qp_index_offset, qp0 );
+ qpc1 = get_chroma_qp( h->pps.chroma_qp_index_offset, qp1 );
+ qp0 = (qp + qp0 + 1) >> 1;
+ qp1 = (qp + qp1 + 1) >> 1;
+ qpc0 = (qpc + qpc0 + 1) >> 1;
+ qpc1 = (qpc + qpc1 + 1) >> 1;
+ qp_thresh = 15 - h->slice_alpha_c0_offset;
+ if(qp <= qp_thresh && qp0 <= qp_thresh && qp1 <= qp_thresh &&
+ qpc <= qp_thresh && qpc0 <= qp_thresh && qpc1 <= qp_thresh)
+ return;
+
+ if( IS_INTRA(mb_type) ) {
+ int16_t bS4[4] = {4,4,4,4};
+ int16_t bS3[4] = {3,3,3,3};
+ if( IS_8x8DCT(mb_type) ) {
+ filter_mb_edgev( h, &img_y[4*0], linesize, bS4, qp0 );
+ filter_mb_edgev( h, &img_y[4*2], linesize, bS3, qp );
+ filter_mb_edgeh( h, &img_y[4*0*linesize], linesize, bS4, qp1 );
+ filter_mb_edgeh( h, &img_y[4*2*linesize], linesize, bS3, qp );
+ } else {
+ filter_mb_edgev( h, &img_y[4*0], linesize, bS4, qp0 );
+ filter_mb_edgev( h, &img_y[4*1], linesize, bS3, qp );
+ filter_mb_edgev( h, &img_y[4*2], linesize, bS3, qp );
+ filter_mb_edgev( h, &img_y[4*3], linesize, bS3, qp );
+ filter_mb_edgeh( h, &img_y[4*0*linesize], linesize, bS4, qp1 );
+ filter_mb_edgeh( h, &img_y[4*1*linesize], linesize, bS3, qp );
+ filter_mb_edgeh( h, &img_y[4*2*linesize], linesize, bS3, qp );
+ filter_mb_edgeh( h, &img_y[4*3*linesize], linesize, bS3, qp );
+ }
+ filter_mb_edgecv( h, &img_cb[2*0], uvlinesize, bS4, qpc0 );
+ filter_mb_edgecv( h, &img_cb[2*2], uvlinesize, bS3, qpc );
+ filter_mb_edgecv( h, &img_cr[2*0], uvlinesize, bS4, qpc0 );
+ filter_mb_edgecv( h, &img_cr[2*2], uvlinesize, bS3, qpc );
+ filter_mb_edgech( h, &img_cb[2*0*uvlinesize], uvlinesize, bS4, qpc1 );
+ filter_mb_edgech( h, &img_cb[2*2*uvlinesize], uvlinesize, bS3, qpc );
+ filter_mb_edgech( h, &img_cr[2*0*uvlinesize], uvlinesize, bS4, qpc1 );
+ filter_mb_edgech( h, &img_cr[2*2*uvlinesize], uvlinesize, bS3, qpc );
+ return;
+ } else {
+ DECLARE_ALIGNED_8(int16_t, bS[2][4][4]);
+ uint64_t (*bSv)[4] = (uint64_t(*)[4])bS;
+ int edges;
+ if( IS_8x8DCT(mb_type) && (h->cbp&7) == 7 ) {
+ edges = 4;
+ bSv[0][0] = bSv[0][2] = bSv[1][0] = bSv[1][2] = 0x0002000200020002ULL;
+ } else {
+ int mask_edge1 = (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16)) ? 3 :
+ (mb_type & MB_TYPE_16x8) ? 1 : 0;
+ int mask_edge0 = (mb_type & (MB_TYPE_16x16 | MB_TYPE_8x16))
+ && (s->current_picture.mb_type[mb_xy-1] & (MB_TYPE_16x16 | MB_TYPE_8x16))
+ ? 3 : 0;
+ int step = IS_8x8DCT(mb_type) ? 2 : 1;
+ edges = (mb_type & MB_TYPE_16x16) && !(h->cbp & 15) ? 1 : 4;
+ s->dsp.h264_loop_filter_strength( bS, h->non_zero_count_cache, h->ref_cache, h->mv_cache,
+ (h->slice_type == B_TYPE), edges, step, mask_edge0, mask_edge1 );
+ }
+ if( IS_INTRA(s->current_picture.mb_type[mb_xy-1]) )
+ bSv[0][0] = 0x0004000400040004ULL;
+ if( IS_INTRA(s->current_picture.mb_type[h->top_mb_xy]) )
+ bSv[1][0] = 0x0004000400040004ULL;
+
+#define FILTER(hv,dir,edge)\
+ if(bSv[dir][edge]) {\
+ filter_mb_edge##hv( h, &img_y[4*edge*(dir?linesize:1)], linesize, bS[dir][edge], edge ? qp : qp##dir );\
+ if(!(edge&1)) {\
+ filter_mb_edgec##hv( h, &img_cb[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir );\
+ filter_mb_edgec##hv( h, &img_cr[2*edge*(dir?uvlinesize:1)], uvlinesize, bS[dir][edge], edge ? qpc : qpc##dir );\
+ }\
+ }
+ if( edges == 1 ) {
+ FILTER(v,0,0);
+ FILTER(h,1,0);
+ } else if( IS_8x8DCT(mb_type) ) {
+ FILTER(v,0,0);
+ FILTER(v,0,2);
+ FILTER(h,1,0);
+ FILTER(h,1,2);
+ } else {
+ FILTER(v,0,0);
+ FILTER(v,0,1);
+ FILTER(v,0,2);
+ FILTER(v,0,3);
+ FILTER(h,1,0);
+ FILTER(h,1,1);
+ FILTER(h,1,2);
+ FILTER(h,1,3);
+ }
+#undef FILTER
+ }
+}
+
static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
MpegEncContext * const s = &h->s;
const int mb_xy= mb_x + mb_y*s->mb_stride;
@@ -7035,7 +7186,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
*/
const int pair_xy = mb_x + (mb_y&~1)*s->mb_stride;
const int left_mb_xy[2] = { pair_xy-1, pair_xy-1+s->mb_stride };
- int bS[8];
+ int16_t bS[8];
int qp[2];
int chroma_qp[2];
int mb_qp, mbn0_qp, mbn1_qp;
@@ -7114,7 +7265,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
int mbn_xy = mb_xy - 2 * s->mb_stride;
int qp, chroma_qp;
int i, j;
- int bS[4];
+ int16_t bS[4];
for(j=0; j<2; j++, mbn_xy += s->mb_stride){
if( IS_INTRA(mb_type) ||
@@ -7150,7 +7301,7 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
/* mbn_xy: neighbor macroblock */
const int mbn_xy = edge > 0 ? mb_xy : mbm_xy;
const int mbn_type = s->current_picture.mb_type[mbn_xy];
- int bS[4];
+ int16_t bS[4];
int qp;
if( (edge&1) && IS_8x8DCT(mb_type) )
@@ -7189,8 +7340,8 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
int v = 0;
for( l = 0; !v && l < 1 + (h->slice_type == B_TYPE); l++ ) {
v |= ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] ||
- ABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
- ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit;
+ FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
+ FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit;
}
bS[0] = bS[1] = bS[2] = bS[3] = v;
mv_done = 1;
@@ -7213,8 +7364,8 @@ static void filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8
bS[i] = 0;
for( l = 0; l < 1 + (h->slice_type == B_TYPE); l++ ) {
if( ref2frm[h->ref_cache[l][b_idx]+2] != ref2frm[h->ref_cache[l][bn_idx]+2] ||
- ABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
- ABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit ) {
+ FFABS( h->mv_cache[l][b_idx][0] - h->mv_cache[l][bn_idx][0] ) >= 4 ||
+ FFABS( h->mv_cache[l][b_idx][1] - h->mv_cache[l][bn_idx][1] ) >= mvy_limit ) {
bS[i] = 1;
break;
}
@@ -7267,7 +7418,7 @@ static int decode_slice(H264Context *h){
align_get_bits( &s->gb );
/* init cabac */
- ff_init_cabac_states( &h->cabac, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64 );
+ ff_init_cabac_states( &h->cabac);
ff_init_cabac_decoder( &h->cabac,
s->gb.buffer + get_bits_count(&s->gb)/8,
( s->gb.size_in_bits - get_bits_count(&s->gb) + 7)/8);
@@ -7286,8 +7437,10 @@ static int decode_slice(H264Context *h){
}
for(;;){
+//START_TIMER
int ret = decode_mb_cabac(h);
int eos;
+//STOP_TIMER("decode_mb_cabac")
if(ret>=0) hl_decode_mb(h);
@@ -7301,7 +7454,7 @@ static int decode_slice(H264Context *h){
}
eos = get_cabac_terminate( &h->cabac );
- if( ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 1) {
+ if( ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 2) {
av_log(h->s.avctx, AV_LOG_ERROR, "error while decoding MB %d %d, bytestream (%d)\n", s->mb_x, s->mb_y, h->cabac.bytestream_end - h->cabac.bytestream);
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, (AC_ERROR|DC_ERROR|MV_ERROR)&part_mask);
return -1;
@@ -7694,7 +7847,7 @@ static inline int decode_seq_parameter_set(H264Context *h){
#ifndef ALLOW_INTERLACE
if(sps->mb_aff)
- av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF support not included; enable it compilation time\n");
+ av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF support not included; enable it at compile-time.\n");
#endif
if(!sps->direct_8x8_inference_flag && sps->mb_aff)
av_log(h->s.avctx, AV_LOG_ERROR, "MBAFF + !direct_8x8_inference is not implemented\n");
@@ -8381,7 +8534,7 @@ int main(){
printf("\n");*/
for(j=0; j<16; j++){
- int diff= ABS(src[j] - ref[j]);
+ int diff= FFABS(src[j] - ref[j]);
error+= diff*diff;
max_error= FFMAX(max_error, diff);
diff --git a/src/libffmpeg/libavcodec/h264data.h b/contrib/ffmpeg/libavcodec/h264data.h
index 1dd9dafe5..2dea3580f 100644
--- a/src/libffmpeg/libavcodec/h264data.h
+++ b/contrib/ffmpeg/libavcodec/h264data.h
@@ -2,18 +2,20 @@
* H26L/H264/AVC/JVT/14496-10/... encoder/decoder
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -607,23 +609,48 @@ static const int quant_coeff[52][16]={
/* Deblocking filter (p153) */
-static const int alpha_table[52] = {
+static const int alpha_table[52*3] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 4, 4, 5, 6,
7, 8, 9, 10, 12, 13, 15, 17, 20, 22,
25, 28, 32, 36, 40, 45, 50, 56, 63, 71,
80, 90,101,113,127,144,162,182,203,226,
- 255, 255
+ 255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,
+ 255,255,255,255,255,255,255,255,255,255,255,255,255,
};
-static const int beta_table[52] = {
+static const int beta_table[52*3] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 2, 2, 2, 3,
3, 3, 3, 4, 4, 4, 6, 6, 7, 7,
8, 8, 9, 9, 10, 10, 11, 11, 12, 12,
13, 13, 14, 14, 15, 15, 16, 16, 17, 17,
- 18, 18
+ 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
};
-static const int tc0_table[52][3] = {
+static const int tc0_table[52*3][3] = {
+ { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
+ { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
+ { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
+ { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
+ { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
+ { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
+ { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
+ { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
+ { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
{ 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
{ 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
{ 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 1 },
@@ -632,7 +659,16 @@ static const int tc0_table[52][3] = {
{ 1, 1, 2 }, { 1, 2, 3 }, { 1, 2, 3 }, { 2, 2, 3 }, { 2, 2, 4 }, { 2, 3, 4 },
{ 2, 3, 4 }, { 3, 3, 5 }, { 3, 4, 6 }, { 3, 4, 6 }, { 4, 5, 7 }, { 4, 5, 8 },
{ 4, 6, 9 }, { 5, 7,10 }, { 6, 8,11 }, { 6, 8,13 }, { 7,10,14 }, { 8,11,16 },
- { 9,12,18 }, {10,13,20 }, {11,15,23 }, {13,17,25 }
+ { 9,12,18 }, {10,13,20 }, {11,15,23 }, {13,17,25 },
+ {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 },
+ {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 },
+ {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 },
+ {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 },
+ {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 },
+ {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 },
+ {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 },
+ {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 },
+ {13,17,25 }, {13,17,25 }, {13,17,25 }, {13,17,25 },
};
/* Cabac pre state table */
diff --git a/src/libffmpeg/libavcodec/h264idct.c b/contrib/ffmpeg/libavcodec/h264idct.c
index 3e44385d5..3506418ad 100755..100644
--- a/src/libffmpeg/libavcodec/h264idct.c
+++ b/contrib/ffmpeg/libavcodec/h264idct.c
@@ -2,18 +2,20 @@
* H.264 IDCT
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -28,7 +30,7 @@
static always_inline void idct_internal(uint8_t *dst, DCTELEM *block, int stride, int block_stride, int shift, int add){
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
block[0] += 1<<(shift-1);
@@ -72,7 +74,7 @@ void ff_h264_lowres_idct_put_c(uint8_t *dst, int stride, DCTELEM *block){
void ff_h264_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride){
int i;
DCTELEM (*src)[8] = (DCTELEM(*)[8])block;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
block[0] += 32;
@@ -143,7 +145,7 @@ void ff_h264_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride){
// assumes all AC coefs are 0
void ff_h264_idct_dc_add_c(uint8_t *dst, DCTELEM *block, int stride){
int i, j;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int dc = (block[0] + 32) >> 6;
for( j = 0; j < 4; j++ )
{
@@ -155,7 +157,7 @@ void ff_h264_idct_dc_add_c(uint8_t *dst, DCTELEM *block, int stride){
void ff_h264_idct8_dc_add_c(uint8_t *dst, DCTELEM *block, int stride){
int i, j;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int dc = (block[0] + 32) >> 6;
for( j = 0; j < 8; j++ )
{
diff --git a/src/libffmpeg/libavcodec/huffyuv.c b/contrib/ffmpeg/libavcodec/huffyuv.c
index d65943fcc..0aefd6d72 100644
--- a/src/libffmpeg/libavcodec/huffyuv.c
+++ b/contrib/ffmpeg/libavcodec/huffyuv.c
@@ -3,18 +3,20 @@
*
* Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
@@ -236,6 +238,7 @@ static int generate_bits_table(uint32_t *dst, uint8_t *len_table){
return 0;
}
+#ifdef CONFIG_ENCODERS
static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
uint64_t counts[2*size];
int up[2*size];
@@ -291,6 +294,7 @@ static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
if(i==size) break;
}
}
+#endif /* CONFIG_ENCODERS */
static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
GetBitContext gb;
@@ -375,6 +379,7 @@ static int common_init(AVCodecContext *avctx){
return 0;
}
+#ifdef CONFIG_DECODERS
static int decode_init(AVCodecContext *avctx)
{
HYuvContext *s = avctx->priv_data;
@@ -470,7 +475,9 @@ s->bgr32=1;
return 0;
}
+#endif
+#ifdef CONFIG_ENCODERS
static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
int i;
int index= 0;
@@ -612,6 +619,7 @@ static int encode_init(AVCodecContext *avctx)
return 0;
}
+#endif /* CONFIG_ENCODERS */
static void decode_422_bitstream(HYuvContext *s, int count){
int i;
@@ -637,6 +645,7 @@ static void decode_gray_bitstream(HYuvContext *s, int count){
}
}
+#ifdef CONFIG_ENCODERS
static int encode_422_bitstream(HYuvContext *s, int count){
int i;
@@ -711,6 +720,7 @@ static int encode_gray_bitstream(HYuvContext *s, int count){
}
return 0;
}
+#endif /* CONFIG_ENCODERS */
static void decode_bgr_bitstream(HYuvContext *s, int count){
int i;
@@ -748,6 +758,7 @@ static void decode_bgr_bitstream(HYuvContext *s, int count){
}
}
+#ifdef CONFIG_DECODERS
static void draw_slice(HYuvContext *s, int y){
int h, cy;
int offset[4];
@@ -1014,6 +1025,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
return (get_bits_count(&s->gb)+31)/32*4 + table_size;
}
+#endif
static int common_end(HYuvContext *s){
int i;
@@ -1024,6 +1036,7 @@ static int common_end(HYuvContext *s){
return 0;
}
+#ifdef CONFIG_DECODERS
static int decode_end(AVCodecContext *avctx)
{
HYuvContext *s = avctx->priv_data;
@@ -1038,7 +1051,9 @@ static int decode_end(AVCodecContext *avctx)
return 0;
}
+#endif
+#ifdef CONFIG_ENCODERS
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
HYuvContext *s = avctx->priv_data;
AVFrame *pict = data;
@@ -1218,7 +1233,9 @@ static int encode_end(AVCodecContext *avctx)
return 0;
}
+#endif /* CONFIG_ENCODERS */
+#ifdef CONFIG_DECODERS
AVCodec huffyuv_decoder = {
"huffyuv",
CODEC_TYPE_VIDEO,
@@ -1244,6 +1261,7 @@ AVCodec ffvhuff_decoder = {
CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
NULL
};
+#endif
#ifdef CONFIG_ENCODERS
@@ -1255,6 +1273,7 @@ AVCodec huffyuv_encoder = {
encode_init,
encode_frame,
encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV422P, -1},
};
AVCodec ffvhuff_encoder = {
@@ -1265,6 +1284,7 @@ AVCodec ffvhuff_encoder = {
encode_init,
encode_frame,
encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, -1},
};
#endif //CONFIG_ENCODERS
diff --git a/contrib/ffmpeg/libavcodec/i386/cavsdsp_mmx.c b/contrib/ffmpeg/libavcodec/i386/cavsdsp_mmx.c
new file mode 100644
index 000000000..51d519a5c
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/i386/cavsdsp_mmx.c
@@ -0,0 +1,518 @@
+/*
+ * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
+ * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
+ *
+ * MMX optimised DSP functions, based on H.264 optimisations by
+ * Michael Niedermayer and Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../dsputil.h"
+#include "common.h"
+
+DECLARE_ALIGNED_8(static const uint64_t,ff_pw_4 ) = 0x0004000400040004ULL;
+DECLARE_ALIGNED_8(static const uint64_t,ff_pw_5 ) = 0x0005000500050005ULL;
+DECLARE_ALIGNED_8(static const uint64_t,ff_pw_7 ) = 0x0007000700070007ULL;
+DECLARE_ALIGNED_8(static const uint64_t,ff_pw_42) = 0x002A002A002A002AULL;
+DECLARE_ALIGNED_8(static const uint64_t,ff_pw_64) = 0x0040004000400040ULL;
+DECLARE_ALIGNED_8(static const uint64_t,ff_pw_96) = 0x0060006000600060ULL;
+
+/*****************************************************************************
+ *
+ * inverse transform
+ *
+ ****************************************************************************/
+
+#define SUMSUB_BA( a, b ) \
+ "paddw "#b", "#a" \n\t"\
+ "paddw "#b", "#b" \n\t"\
+ "psubw "#a", "#b" \n\t"
+
+#define SBUTTERFLY(a,b,t,n)\
+ "movq " #a ", " #t " \n\t" /* abcd */\
+ "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
+ "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */
+
+#define TRANSPOSE4(a,b,c,d,t)\
+ SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
+ SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
+ SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
+ SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
+
+static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
+{
+ asm volatile(
+ "movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */
+ "movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */
+ "movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */
+ "movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */
+ "movq %%mm4, %%mm0 \n\t"
+ "movq %%mm5, %%mm3 \n\t"
+ "movq %%mm2, %%mm6 \n\t"
+ "movq %%mm7, %%mm1 \n\t"
+
+ "paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */
+ "paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */
+ "paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */
+ "paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */
+ "paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */
+ "paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */
+ "paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */
+ "paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */
+ "psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */
+ "paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */
+ "psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */
+ "paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */
+
+ "movq %%mm5, %%mm4 \n\t"
+ "movq %%mm7, %%mm6 \n\t"
+ "movq %%mm3, %%mm0 \n\t"
+ "movq %%mm1, %%mm2 \n\t"
+ SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */
+ "paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */
+ "paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */
+ "paddw %%mm7, %%mm7 \n\t"
+ "paddw %%mm5, %%mm5 \n\t"
+ "paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */
+ "paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */
+
+ SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */
+ "psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */
+ "movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */
+ "psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */
+ "paddw %%mm1, %%mm1 \n\t"
+ "paddw %%mm3, %%mm3 \n\t"
+ "psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */
+ "paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */
+
+ "movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */
+ "movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */
+ "movq %%mm2, %%mm4 \n\t"
+ "movq %%mm6, %%mm0 \n\t"
+ "psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */
+ "psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */
+ "paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */
+ "paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */
+ "paddw %%mm2, %%mm2 \n\t"
+ "paddw %%mm0, %%mm0 \n\t"
+ "psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */
+ "paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */
+
+ "movq (%0), %%mm2 \n\t" /* mm2 = src0 */
+ "movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */
+ SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */
+ "psllw $3, %%mm0 \n\t"
+ "psllw $3, %%mm2 \n\t"
+ "paddw %1, %%mm0 \n\t" /* add rounding bias */
+ "paddw %1, %%mm2 \n\t" /* add rounding bias */
+
+ SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */
+ SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */
+ SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */
+ SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */
+ SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */
+ SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */
+ :: "r"(block), "m"(bias)
+ );
+}
+
+static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
+{
+ int i;
+ DECLARE_ALIGNED_8(int16_t, b2[64]);
+
+ for(i=0; i<2; i++){
+ DECLARE_ALIGNED_8(uint64_t, tmp);
+
+ cavs_idct8_1d(block+4*i, ff_pw_4);
+
+ asm volatile(
+ "psraw $3, %%mm7 \n\t"
+ "psraw $3, %%mm6 \n\t"
+ "psraw $3, %%mm5 \n\t"
+ "psraw $3, %%mm4 \n\t"
+ "psraw $3, %%mm3 \n\t"
+ "psraw $3, %%mm2 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm0 \n\t"
+ "movq %%mm7, %0 \n\t"
+ TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
+ "movq %%mm0, 8(%1) \n\t"
+ "movq %%mm6, 24(%1) \n\t"
+ "movq %%mm7, 40(%1) \n\t"
+ "movq %%mm4, 56(%1) \n\t"
+ "movq %0, %%mm7 \n\t"
+ TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
+ "movq %%mm7, (%1) \n\t"
+ "movq %%mm1, 16(%1) \n\t"
+ "movq %%mm0, 32(%1) \n\t"
+ "movq %%mm3, 48(%1) \n\t"
+ : "=m"(tmp)
+ : "r"(b2+32*i)
+ : "memory"
+ );
+ }
+
+ for(i=0; i<2; i++){
+ cavs_idct8_1d(b2+4*i, ff_pw_64);
+
+ asm volatile(
+ "psraw $7, %%mm7 \n\t"
+ "psraw $7, %%mm6 \n\t"
+ "psraw $7, %%mm5 \n\t"
+ "psraw $7, %%mm4 \n\t"
+ "psraw $7, %%mm3 \n\t"
+ "psraw $7, %%mm2 \n\t"
+ "psraw $7, %%mm1 \n\t"
+ "psraw $7, %%mm0 \n\t"
+ "movq %%mm7, (%0) \n\t"
+ "movq %%mm5, 16(%0) \n\t"
+ "movq %%mm3, 32(%0) \n\t"
+ "movq %%mm1, 48(%0) \n\t"
+ "movq %%mm0, 64(%0) \n\t"
+ "movq %%mm2, 80(%0) \n\t"
+ "movq %%mm4, 96(%0) \n\t"
+ "movq %%mm6, 112(%0) \n\t"
+ :: "r"(b2+4*i)
+ : "memory"
+ );
+ }
+
+ add_pixels_clamped_mmx(b2, dst, stride);
+
+ /* clear block */
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq %%mm7, (%0) \n\t"
+ "movq %%mm7, 8(%0) \n\t"
+ "movq %%mm7, 16(%0) \n\t"
+ "movq %%mm7, 24(%0) \n\t"
+ "movq %%mm7, 32(%0) \n\t"
+ "movq %%mm7, 40(%0) \n\t"
+ "movq %%mm7, 48(%0) \n\t"
+ "movq %%mm7, 56(%0) \n\t"
+ "movq %%mm7, 64(%0) \n\t"
+ "movq %%mm7, 72(%0) \n\t"
+ "movq %%mm7, 80(%0) \n\t"
+ "movq %%mm7, 88(%0) \n\t"
+ "movq %%mm7, 96(%0) \n\t"
+ "movq %%mm7, 104(%0) \n\t"
+ "movq %%mm7, 112(%0) \n\t"
+ "movq %%mm7, 120(%0) \n\t"
+ :: "r" (block)
+ );
+}
+
+/*****************************************************************************
+ *
+ * motion compensation
+ *
+ ****************************************************************************/
+
+/* vertical filter [-1 -2 96 42 -7 0] */
+#define QPEL_CAVSV1(A,B,C,D,E,F,OP) \
+ "movd (%0), "#F" \n\t"\
+ "movq "#C", %%mm6 \n\t"\
+ "pmullw %5, %%mm6 \n\t"\
+ "movq "#D", %%mm7 \n\t"\
+ "pmullw %6, %%mm7 \n\t"\
+ "psllw $3, "#E" \n\t"\
+ "psubw "#E", %%mm6 \n\t"\
+ "psraw $3, "#E" \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw "#E", %%mm6 \n\t"\
+ "paddw "#B", "#B" \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%mm7, "#F" \n\t"\
+ "psubw "#B", %%mm6 \n\t"\
+ "psraw $1, "#B" \n\t"\
+ "psubw "#A", %%mm6 \n\t"\
+ "paddw %4, %%mm6 \n\t"\
+ "psraw $7, %%mm6 \n\t"\
+ "packuswb %%mm6, %%mm6 \n\t"\
+ OP(%%mm6, (%1), A, d) \
+ "add %3, %1 \n\t"
+
+/* vertical filter [ 0 -1 5 5 -1 0] */
+#define QPEL_CAVSV2(A,B,C,D,E,F,OP) \
+ "movd (%0), "#F" \n\t"\
+ "movq "#C", %%mm6 \n\t"\
+ "paddw "#D", %%mm6 \n\t"\
+ "pmullw %5, %%mm6 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%mm7, "#F" \n\t"\
+ "psubw "#B", %%mm6 \n\t"\
+ "psubw "#E", %%mm6 \n\t"\
+ "paddw %4, %%mm6 \n\t"\
+ "psraw $3, %%mm6 \n\t"\
+ "packuswb %%mm6, %%mm6 \n\t"\
+ OP(%%mm6, (%1), A, d) \
+ "add %3, %1 \n\t"
+
+/* vertical filter [ 0 -7 42 96 -2 -1] */
+#define QPEL_CAVSV3(A,B,C,D,E,F,OP) \
+ "movd (%0), "#F" \n\t"\
+ "movq "#C", %%mm6 \n\t"\
+ "pmullw %6, %%mm6 \n\t"\
+ "movq "#D", %%mm7 \n\t"\
+ "pmullw %5, %%mm7 \n\t"\
+ "psllw $3, "#B" \n\t"\
+ "psubw "#B", %%mm6 \n\t"\
+ "psraw $3, "#B" \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw "#B", %%mm6 \n\t"\
+ "paddw "#E", "#E" \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%mm7, "#F" \n\t"\
+ "psubw "#E", %%mm6 \n\t"\
+ "psraw $1, "#E" \n\t"\
+ "psubw "#F", %%mm6 \n\t"\
+ "paddw %4, %%mm6 \n\t"\
+ "psraw $7, %%mm6 \n\t"\
+ "packuswb %%mm6, %%mm6 \n\t"\
+ OP(%%mm6, (%1), A, d) \
+ "add %3, %1 \n\t"
+
+
+#define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
+ int w= 2;\
+ src -= 2*srcStride;\
+ \
+ while(w--){\
+ asm volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movd (%0), %%mm0 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm1 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm2 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm3 \n\t"\
+ "add %2, %0 \n\t"\
+ "movd (%0), %%mm4 \n\t"\
+ "add %2, %0 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
+ VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
+ VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
+ VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
+ VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
+ VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
+ VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
+ VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
+ \
+ : "+a"(src), "+c"(dst)\
+ : "S"((long)srcStride), "D"((long)dstStride), "m"(ADD), "m"(MUL1), "m"(MUL2)\
+ : "memory"\
+ );\
+ if(h==16){\
+ asm volatile(\
+ VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
+ VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
+ VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
+ VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
+ VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
+ VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
+ VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
+ VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
+ \
+ : "+a"(src), "+c"(dst)\
+ : "S"((long)srcStride), "D"((long)dstStride), "m"(ADD), "m"(MUL1), "m"(MUL2)\
+ : "memory"\
+ );\
+ }\
+ src += 4-(h+5)*srcStride;\
+ dst += 4-h*dstStride;\
+ }
+
+#define QPEL_CAVS(OPNAME, OP, MMX)\
+static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ int h=8;\
+ asm volatile(\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movq %5, %%mm6 \n\t"\
+ "1: \n\t"\
+ "movq (%0), %%mm0 \n\t"\
+ "movq 1(%0), %%mm2 \n\t"\
+ "movq %%mm0, %%mm1 \n\t"\
+ "movq %%mm2, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpckhbw %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "paddw %%mm3, %%mm1 \n\t"\
+ "pmullw %%mm6, %%mm0 \n\t"\
+ "pmullw %%mm6, %%mm1 \n\t"\
+ "movq -1(%0), %%mm2 \n\t"\
+ "movq 2(%0), %%mm4 \n\t"\
+ "movq %%mm2, %%mm3 \n\t"\
+ "movq %%mm4, %%mm5 \n\t"\
+ "punpcklbw %%mm7, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm7, %%mm5 \n\t"\
+ "paddw %%mm4, %%mm2 \n\t"\
+ "paddw %%mm3, %%mm5 \n\t"\
+ "psubw %%mm2, %%mm0 \n\t"\
+ "psubw %%mm5, %%mm1 \n\t"\
+ "movq %6, %%mm5 \n\t"\
+ "paddw %%mm5, %%mm0 \n\t"\
+ "paddw %%mm5, %%mm1 \n\t"\
+ "psraw $3, %%mm0 \n\t"\
+ "psraw $3, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ OP(%%mm0, (%1),%%mm5, q) \
+ "add %3, %0 \n\t"\
+ "add %4, %1 \n\t"\
+ "decl %2 \n\t"\
+ " jnz 1b \n\t"\
+ : "+a"(src), "+c"(dst), "+m"(h)\
+ : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_4)\
+ : "memory"\
+ );\
+}\
+\
+static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
+}\
+\
+static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_5) \
+}\
+\
+static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
+}\
+\
+static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\
+}\
+static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\
+ OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
+}\
+\
+static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\
+}\
+static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\
+ OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
+}\
+\
+static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\
+}\
+static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\
+ OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
+}\
+\
+static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
+ OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
+ src += 8*srcStride;\
+ dst += 8*dstStride;\
+ OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
+ OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
+}\
+
+#define CAVS_MC(OPNAME, SIZE, MMX) \
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
+}\
+\
+static void ff_ ## OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
+}\
+
+#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
+#define AVG_3DNOW_OP(a,b,temp, size) \
+"mov" #size " " #b ", " #temp " \n\t"\
+"pavgusb " #temp ", " #a " \n\t"\
+"mov" #size " " #a ", " #b " \n\t"
+#define AVG_MMX2_OP(a,b,temp, size) \
+"mov" #size " " #b ", " #temp " \n\t"\
+"pavgb " #temp ", " #a " \n\t"\
+"mov" #size " " #a ", " #b " \n\t"
+
+QPEL_CAVS(put_, PUT_OP, 3dnow)
+QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
+QPEL_CAVS(put_, PUT_OP, mmx2)
+QPEL_CAVS(avg_, AVG_MMX2_OP, mmx2)
+
+CAVS_MC(put_, 8, 3dnow)
+CAVS_MC(put_, 16,3dnow)
+CAVS_MC(avg_, 8, 3dnow)
+CAVS_MC(avg_, 16,3dnow)
+CAVS_MC(put_, 8, mmx2)
+CAVS_MC(put_, 16,mmx2)
+CAVS_MC(avg_, 8, mmx2)
+CAVS_MC(avg_, 16,mmx2)
+
+void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride);
+void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride);
+void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride);
+void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride);
+
+void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx) {
+#define dspfunc(PFX, IDX, NUM) \
+ c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \
+ c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_mmx2; \
+ c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_mmx2; \
+ c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_mmx2; \
+ c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_mmx2; \
+
+ dspfunc(put_cavs_qpel, 0, 16);
+ dspfunc(put_cavs_qpel, 1, 8);
+ dspfunc(avg_cavs_qpel, 0, 16);
+ dspfunc(avg_cavs_qpel, 1, 8);
+#undef dspfunc
+ c->cavs_idct8_add = cavs_idct8_add_mmx;
+}
+
+void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx) {
+#define dspfunc(PFX, IDX, NUM) \
+ c->PFX ## _pixels_tab[IDX][ 0] = ff_ ## PFX ## NUM ## _mc00_mmx2; \
+ c->PFX ## _pixels_tab[IDX][ 2] = ff_ ## PFX ## NUM ## _mc20_3dnow; \
+ c->PFX ## _pixels_tab[IDX][ 4] = ff_ ## PFX ## NUM ## _mc01_3dnow; \
+ c->PFX ## _pixels_tab[IDX][ 8] = ff_ ## PFX ## NUM ## _mc02_3dnow; \
+ c->PFX ## _pixels_tab[IDX][12] = ff_ ## PFX ## NUM ## _mc03_3dnow; \
+
+ dspfunc(put_cavs_qpel, 0, 16);
+ dspfunc(put_cavs_qpel, 1, 8);
+ dspfunc(avg_cavs_qpel, 0, 16);
+ dspfunc(avg_cavs_qpel, 1, 8);
+#undef dspfunc
+ c->cavs_idct8_add = cavs_idct8_add_mmx;
+}
diff --git a/src/libffmpeg/libavcodec/i386/cputest.c b/contrib/ffmpeg/libavcodec/i386/cputest.c
index a66bdbe98..262786b71 100644
--- a/src/libffmpeg/libavcodec/i386/cputest.c
+++ b/contrib/ffmpeg/libavcodec/i386/cputest.c
@@ -1,9 +1,30 @@
-/* Cpu detection code, extracted from mmx.h ((c)1997-99 by H. Dietz
- and R. Fisher). Converted to C and improved by Fabrice Bellard */
+/*
+ * CPU detection code, extracted from mmx.h
+ * (c)1997-99 by H. Dietz and R. Fisher
+ * Converted to C and improved by Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
#include <stdlib.h>
#include "../dsputil.h"
+#undef printf
+
#ifdef ARCH_X86_64
# define REG_b "rbx"
# define REG_S "rsi"
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c b/contrib/ffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c
index b49c880a7..e09a1007e 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c
+++ b/contrib/ffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c
@@ -2,18 +2,20 @@
* Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
* Loren Merritt
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c b/contrib/ffmpeg/libavcodec/i386/dsputil_mmx.c
index a2cbab8ce..5675828a4 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c
+++ b/contrib/ffmpeg/libavcodec/i386/dsputil_mmx.c
@@ -3,18 +3,20 @@
* Copyright (c) 2000, 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
@@ -29,7 +31,6 @@
//#undef NDEBUG
//#include <assert.h>
-extern const uint8_t ff_h263_loop_filter_strength[32];
extern void ff_idct_xvid_mmx(short *block);
extern void ff_idct_xvid_mmx2(short *block);
@@ -40,6 +41,9 @@ static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x01
static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
+static const uint64_t ff_pdw_80000000[2] attribute_used __attribute__ ((aligned(16))) =
+{0x8000000080000000ULL, 0x8000000080000000ULL};
+
static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
@@ -50,10 +54,15 @@ static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0
static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
+static const uint64_t ff_pb_1 attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
+static const uint64_t ff_pb_3 attribute_used __attribute__ ((aligned(8))) = 0x0303030303030303ULL;
+static const uint64_t ff_pb_7 attribute_used __attribute__ ((aligned(8))) = 0x0707070707070707ULL;
static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
+static const uint64_t ff_pb_A1 attribute_used __attribute__ ((aligned(8))) = 0xA1A1A1A1A1A1A1A1ULL;
+static const uint64_t ff_pb_5F attribute_used __attribute__ ((aligned(8))) = 0x5F5F5F5F5F5F5F5FULL;
static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
-#define JUMPALIGN() __asm __volatile (".balign 8"::)
+#define JUMPALIGN() __asm __volatile (ASMALIGN(3)::)
#define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
#define MOVQ_WONE(regd) \
@@ -201,7 +210,7 @@ static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
asm volatile(
"mov $-128, %%"REG_a" \n\t"
"pxor %%mm7, %%mm7 \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%0), %%mm0 \n\t"
"movq (%0, %2), %%mm2 \n\t"
@@ -229,7 +238,7 @@ static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint
asm volatile(
"pxor %%mm7, %%mm7 \n\t"
"mov $-128, %%"REG_a" \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%0), %%mm0 \n\t"
"movq (%1), %%mm2 \n\t"
@@ -372,7 +381,7 @@ static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size
{
__asm __volatile(
"lea (%3, %3), %%"REG_a" \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movd (%1), %%mm0 \n\t"
"movd (%1, %3), %%mm1 \n\t"
@@ -398,7 +407,7 @@ static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size
{
__asm __volatile(
"lea (%3, %3), %%"REG_a" \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq (%1, %3), %%mm1 \n\t"
@@ -424,7 +433,7 @@ static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_siz
{
__asm __volatile(
"lea (%3, %3), %%"REG_a" \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq 8(%1), %%mm4 \n\t"
@@ -625,22 +634,10 @@ static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
- "movd %0, %%mm0 \n\t"
- "movd %1, %%mm1 \n\t"
- "movd %2, %%mm2 \n\t"
-
- :
- : "m" (*(uint32_t*)(src + 0*src_stride)),
- "m" (*(uint32_t*)(src + 1*src_stride)),
- "m" (*(uint32_t*)(src + 2*src_stride))
- );
- asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
- "movd %0, %%mm3 \n\t"
-
- :
- : "m" (*(uint32_t*)(src + 3*src_stride))
- );
- asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
+ "movd %4, %%mm0 \n\t"
+ "movd %5, %%mm1 \n\t"
+ "movd %6, %%mm2 \n\t"
+ "movd %7, %%mm3 \n\t"
"punpcklbw %%mm1, %%mm0 \n\t"
"punpcklbw %%mm3, %%mm2 \n\t"
"movq %%mm0, %%mm1 \n\t"
@@ -657,6 +654,10 @@ static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int
"=m" (*(uint32_t*)(dst + 1*dst_stride)),
"=m" (*(uint32_t*)(dst + 2*dst_stride)),
"=m" (*(uint32_t*)(dst + 3*dst_stride))
+ : "m" (*(uint32_t*)(src + 0*src_stride)),
+ "m" (*(uint32_t*)(src + 1*src_stride)),
+ "m" (*(uint32_t*)(src + 2*src_stride)),
+ "m" (*(uint32_t*)(src + 3*src_stride))
);
}
@@ -1185,8 +1186,8 @@ static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, in
else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
- if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
- else return score1 + ABS(score2)*8;
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
}
static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
@@ -1194,8 +1195,8 @@ static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int
int score1= sse8_mmx(c, pix1, pix2, line_size, h);
int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
- if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
- else return score1 + ABS(score2)*8;
+ if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight;
+ else return score1 + FFABS(score2)*8;
}
static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
@@ -2403,6 +2404,53 @@ QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
+/***********************************/
+/* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
+
+#define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
+static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
+}
+#define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
+static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
+}
+
+#define QPEL_2TAP(OPNAME, SIZE, MMX)\
+QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
+QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
+QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
+static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
+ OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
+static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
+ OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
+static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
+ OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
+static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
+}\
+static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
+}\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\
+QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
+
+QPEL_2TAP(put_, 16, mmx2)
+QPEL_2TAP(avg_, 16, mmx2)
+QPEL_2TAP(put_, 8, mmx2)
+QPEL_2TAP(avg_, 8, mmx2)
+QPEL_2TAP(put_, 16, 3dnow)
+QPEL_2TAP(avg_, 16, 3dnow)
+QPEL_2TAP(put_, 8, 3dnow)
+QPEL_2TAP(avg_, 8, 3dnow)
+
+
#if 0
static void just_return() { return; }
#endif
@@ -2497,48 +2545,44 @@ static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int o
"pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
"pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
- "movd %3, %%mm5 \n\t"
- "movd %2, %%mm4 \n\t"
+ "movd %4, %%mm5 \n\t"
+ "movd %3, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
"pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
- "movd %1, %%mm5 \n\t"
- "movd %0, %%mm4 \n\t"
+ "movd %2, %%mm5 \n\t"
+ "movd %1, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
"pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
- "paddw %4, %%mm1 \n\t"
+ "paddw %5, %%mm1 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm1, %%mm0 \n\t"
"paddw %%mm2, %%mm0 \n\t"
- "psrlw %5, %%mm0 \n\t"
+ "psrlw %6, %%mm0 \n\t"
"packuswb %%mm0, %%mm0 \n\t"
+ "movd %%mm0, %0 \n\t"
- :
+ : "=m"(dst[x+y*stride])
: "m"(src[0]), "m"(src[1]),
"m"(src[stride]), "m"(src[stride+1]),
"m"(*r4), "m"(shift2)
);
-
- asm volatile(
- "movd %%mm0, %0 \n\t"
- : "=m"(dst[x+y*stride])
- :
- );
src += stride;
}
src += 4-h*stride;
}
}
+#ifdef CONFIG_ENCODERS
static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
long i=0;
- assert(ABS(scale) < 256);
+ assert(FFABS(scale) < 256);
scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
asm volatile(
@@ -2586,7 +2630,7 @@ static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[6
static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
long i=0;
- if(ABS(scale) < 256){
+ if(FFABS(scale) < 256){
scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
asm volatile(
"pcmpeqw %%mm6, %%mm6 \n\t" // -1w
@@ -2620,9 +2664,10 @@ static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
}
}
}
+#endif /* CONFIG_ENCODERS */
#define PREFETCH(name, op) \
-void name(void *mem, int stride, int h){\
+static void name(void *mem, int stride, int h){\
const uint8_t *p= mem;\
do{\
asm volatile(#op" %0" :: "m"(*p));\
@@ -2661,6 +2706,7 @@ void ff_vp3_dsp_init_mmx(void);
/* XXX: those functions should be suppressed ASAP when all IDCTs are
converted */
+#ifdef CONFIG_GPL
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_mmx_idct (block);
@@ -2681,6 +2727,7 @@ static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *bloc
ff_mmxext_idct (block);
add_pixels_clamped_mmx(block, dest, line_size);
}
+#endif
static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_vp3_idct_sse2(block);
@@ -2701,7 +2748,6 @@ static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
ff_vp3_idct_mmx(block);
add_pixels_clamped_mmx(block, dest, line_size);
}
-#ifdef CONFIG_GPL
static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
{
ff_idct_xvid_mmx (block);
@@ -2722,7 +2768,274 @@ static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
ff_idct_xvid_mmx2 (block);
add_pixels_clamped_mmx(block, dest, line_size);
}
-#endif
+
+static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
+{
+ int i;
+ asm volatile("pxor %%mm7, %%mm7":);
+ for(i=0; i<blocksize; i+=2) {
+ asm volatile(
+ "movq %0, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
+ "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
+ "pslld $31, %%mm2 \n\t" // keep only the sign bit
+ "pxor %%mm2, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pand %%mm1, %%mm3 \n\t"
+ "pandn %%mm1, %%mm4 \n\t"
+ "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
+ "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
+ "movq %%mm3, %1 \n\t"
+ "movq %%mm0, %0 \n\t"
+ :"+m"(mag[i]), "+m"(ang[i])
+ ::"memory"
+ );
+ }
+ asm volatile("femms");
+}
+static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
+{
+ int i;
+
+ asm volatile(
+ "movaps %0, %%xmm5 \n\t"
+ ::"m"(ff_pdw_80000000[0])
+ );
+ for(i=0; i<blocksize; i+=4) {
+ asm volatile(
+ "movaps %0, %%xmm0 \n\t"
+ "movaps %1, %%xmm1 \n\t"
+ "xorps %%xmm2, %%xmm2 \n\t"
+ "xorps %%xmm3, %%xmm3 \n\t"
+ "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
+ "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
+ "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit
+ "xorps %%xmm2, %%xmm1 \n\t"
+ "movaps %%xmm3, %%xmm4 \n\t"
+ "andps %%xmm1, %%xmm3 \n\t"
+ "andnps %%xmm1, %%xmm4 \n\t"
+ "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
+ "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
+ "movaps %%xmm3, %1 \n\t"
+ "movaps %%xmm0, %0 \n\t"
+ :"+m"(mag[i]), "+m"(ang[i])
+ ::"memory"
+ );
+ }
+}
+
+static void vector_fmul_3dnow(float *dst, const float *src, int len){
+ long i = (len-4)*4;
+ asm volatile(
+ "1: \n\t"
+ "movq (%1,%0), %%mm0 \n\t"
+ "movq 8(%1,%0), %%mm1 \n\t"
+ "pfmul (%2,%0), %%mm0 \n\t"
+ "pfmul 8(%2,%0), %%mm1 \n\t"
+ "movq %%mm0, (%1,%0) \n\t"
+ "movq %%mm1, 8(%1,%0) \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ "femms \n\t"
+ :"+r"(i)
+ :"r"(dst), "r"(src)
+ :"memory"
+ );
+}
+static void vector_fmul_sse(float *dst, const float *src, int len){
+ long i = (len-8)*4;
+ asm volatile(
+ "1: \n\t"
+ "movaps (%1,%0), %%xmm0 \n\t"
+ "movaps 16(%1,%0), %%xmm1 \n\t"
+ "mulps (%2,%0), %%xmm0 \n\t"
+ "mulps 16(%2,%0), %%xmm1 \n\t"
+ "movaps %%xmm0, (%1,%0) \n\t"
+ "movaps %%xmm1, 16(%1,%0) \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i)
+ :"r"(dst), "r"(src)
+ :"memory"
+ );
+}
+
+static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
+ long i = len*4-16;
+ asm volatile(
+ "1: \n\t"
+ "pswapd 8(%1), %%mm0 \n\t"
+ "pswapd (%1), %%mm1 \n\t"
+ "pfmul (%3,%0), %%mm0 \n\t"
+ "pfmul 8(%3,%0), %%mm1 \n\t"
+ "movq %%mm0, (%2,%0) \n\t"
+ "movq %%mm1, 8(%2,%0) \n\t"
+ "add $16, %1 \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i), "+r"(src1)
+ :"r"(dst), "r"(src0)
+ );
+ asm volatile("femms");
+}
+static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
+ long i = len*4-32;
+ asm volatile(
+ "1: \n\t"
+ "movaps 16(%1), %%xmm0 \n\t"
+ "movaps (%1), %%xmm1 \n\t"
+ "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
+ "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
+ "mulps (%3,%0), %%xmm0 \n\t"
+ "mulps 16(%3,%0), %%xmm1 \n\t"
+ "movaps %%xmm0, (%2,%0) \n\t"
+ "movaps %%xmm1, 16(%2,%0) \n\t"
+ "add $32, %1 \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i), "+r"(src1)
+ :"r"(dst), "r"(src0)
+ );
+}
+
+static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
+ const float *src2, int src3, int len, int step){
+ long i = (len-4)*4;
+ if(step == 2 && src3 == 0){
+ dst += (len-4)*2;
+ asm volatile(
+ "1: \n\t"
+ "movq (%2,%0), %%mm0 \n\t"
+ "movq 8(%2,%0), %%mm1 \n\t"
+ "pfmul (%3,%0), %%mm0 \n\t"
+ "pfmul 8(%3,%0), %%mm1 \n\t"
+ "pfadd (%4,%0), %%mm0 \n\t"
+ "pfadd 8(%4,%0), %%mm1 \n\t"
+ "movd %%mm0, (%1) \n\t"
+ "movd %%mm1, 16(%1) \n\t"
+ "psrlq $32, %%mm0 \n\t"
+ "psrlq $32, %%mm1 \n\t"
+ "movd %%mm0, 8(%1) \n\t"
+ "movd %%mm1, 24(%1) \n\t"
+ "sub $32, %1 \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i), "+r"(dst)
+ :"r"(src0), "r"(src1), "r"(src2)
+ :"memory"
+ );
+ }
+ else if(step == 1 && src3 == 0){
+ asm volatile(
+ "1: \n\t"
+ "movq (%2,%0), %%mm0 \n\t"
+ "movq 8(%2,%0), %%mm1 \n\t"
+ "pfmul (%3,%0), %%mm0 \n\t"
+ "pfmul 8(%3,%0), %%mm1 \n\t"
+ "pfadd (%4,%0), %%mm0 \n\t"
+ "pfadd 8(%4,%0), %%mm1 \n\t"
+ "movq %%mm0, (%1,%0) \n\t"
+ "movq %%mm1, 8(%1,%0) \n\t"
+ "sub $16, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i)
+ :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
+ :"memory"
+ );
+ }
+ else
+ ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
+ asm volatile("femms");
+}
+static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
+ const float *src2, int src3, int len, int step){
+ long i = (len-8)*4;
+ if(step == 2 && src3 == 0){
+ dst += (len-8)*2;
+ asm volatile(
+ "1: \n\t"
+ "movaps (%2,%0), %%xmm0 \n\t"
+ "movaps 16(%2,%0), %%xmm1 \n\t"
+ "mulps (%3,%0), %%xmm0 \n\t"
+ "mulps 16(%3,%0), %%xmm1 \n\t"
+ "addps (%4,%0), %%xmm0 \n\t"
+ "addps 16(%4,%0), %%xmm1 \n\t"
+ "movss %%xmm0, (%1) \n\t"
+ "movss %%xmm1, 32(%1) \n\t"
+ "movhlps %%xmm0, %%xmm2 \n\t"
+ "movhlps %%xmm1, %%xmm3 \n\t"
+ "movss %%xmm2, 16(%1) \n\t"
+ "movss %%xmm3, 48(%1) \n\t"
+ "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
+ "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
+ "movss %%xmm0, 8(%1) \n\t"
+ "movss %%xmm1, 40(%1) \n\t"
+ "movhlps %%xmm0, %%xmm2 \n\t"
+ "movhlps %%xmm1, %%xmm3 \n\t"
+ "movss %%xmm2, 24(%1) \n\t"
+ "movss %%xmm3, 56(%1) \n\t"
+ "sub $64, %1 \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i), "+r"(dst)
+ :"r"(src0), "r"(src1), "r"(src2)
+ :"memory"
+ );
+ }
+ else if(step == 1 && src3 == 0){
+ asm volatile(
+ "1: \n\t"
+ "movaps (%2,%0), %%xmm0 \n\t"
+ "movaps 16(%2,%0), %%xmm1 \n\t"
+ "mulps (%3,%0), %%xmm0 \n\t"
+ "mulps 16(%3,%0), %%xmm1 \n\t"
+ "addps (%4,%0), %%xmm0 \n\t"
+ "addps 16(%4,%0), %%xmm1 \n\t"
+ "movaps %%xmm0, (%1,%0) \n\t"
+ "movaps %%xmm1, 16(%1,%0) \n\t"
+ "sub $32, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(i)
+ :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
+ :"memory"
+ );
+ }
+ else
+ ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
+}
+
+static void float_to_int16_3dnow(int16_t *dst, const float *src, int len){
+ // not bit-exact: pf2id uses different rounding than C and SSE
+ int i;
+ for(i=0; i<len; i+=4) {
+ asm volatile(
+ "pf2id %1, %%mm0 \n\t"
+ "pf2id %2, %%mm1 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %0 \n\t"
+ :"=m"(dst[i])
+ :"m"(src[i]), "m"(src[i+2])
+ );
+ }
+ asm volatile("femms");
+}
+static void float_to_int16_sse(int16_t *dst, const float *src, int len){
+ int i;
+ for(i=0; i<len; i+=4) {
+ asm volatile(
+ "cvtps2pi %1, %%mm0 \n\t"
+ "cvtps2pi %2, %%mm1 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %0 \n\t"
+ :"=m"(dst[i])
+ :"m"(src[i]), "m"(src[i+2])
+ );
+ }
+ asm volatile("emms");
+}
#ifdef CONFIG_SNOW_ENCODER
extern void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width);
@@ -2782,6 +3095,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->idct_add= ff_simple_idct_add_mmx;
c->idct = ff_simple_idct_mmx;
c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
+#ifdef CONFIG_GPL
}else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
if(mm_flags & MM_MMXEXT){
c->idct_put= ff_libmpeg2mmx2_idct_put;
@@ -2793,8 +3107,10 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->idct = ff_mmx_idct;
}
c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
-#if 0
- }else if(idct_algo==FF_IDCT_VP3){
+#endif
+ }else if(idct_algo==FF_IDCT_VP3 &&
+ avctx->codec->id!=CODEC_ID_THEORA &&
+ !(avctx->flags & CODEC_FLAG_BITEXACT)){
if(mm_flags & MM_SSE2){
c->idct_put= ff_vp3_idct_put_sse2;
c->idct_add= ff_vp3_idct_add_sse2;
@@ -2807,10 +3123,8 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->idct = ff_vp3_idct_mmx;
c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
}
-#endif
}else if(idct_algo==FF_IDCT_CAVS){
c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
-#ifdef CONFIG_GPL
}else if(idct_algo==FF_IDCT_XVIDMMX){
if(mm_flags & MM_MMXEXT){
c->idct_put= ff_idct_xvid_mmx2_put;
@@ -2821,7 +3135,6 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->idct_add= ff_idct_xvid_mmx_add;
c->idct = ff_idct_xvid_mmx;
}
-#endif
}
}
@@ -3012,6 +3325,11 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
dspfunc(avg_h264_qpel, 0, 16);
dspfunc(avg_h264_qpel, 1, 8);
dspfunc(avg_h264_qpel, 2, 4);
+
+ dspfunc(put_2tap_qpel, 0, 16);
+ dspfunc(put_2tap_qpel, 1, 8);
+ dspfunc(avg_2tap_qpel, 0, 16);
+ dspfunc(avg_2tap_qpel, 1, 8);
#undef dspfunc
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
@@ -3024,6 +3342,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
+ c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
@@ -3134,6 +3453,11 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
dspfunc(avg_h264_qpel, 1, 8);
dspfunc(avg_h264_qpel, 2, 4);
+ dspfunc(put_2tap_qpel, 0, 16);
+ dspfunc(put_2tap_qpel, 1, 8);
+ dspfunc(avg_2tap_qpel, 0, 16);
+ dspfunc(avg_2tap_qpel, 1, 8);
+
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
}
@@ -3150,6 +3474,24 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
}
#endif
+
+ if(mm_flags & MM_3DNOW){
+ c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
+ c->vector_fmul = vector_fmul_3dnow;
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT))
+ c->float_to_int16 = float_to_int16_3dnow;
+ }
+ if(mm_flags & MM_3DNOWEXT)
+ c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
+ if(mm_flags & MM_SSE){
+ c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
+ c->vector_fmul = vector_fmul_sse;
+ c->float_to_int16 = float_to_int16_sse;
+ c->vector_fmul_reverse = vector_fmul_reverse_sse;
+ c->vector_fmul_add_add = vector_fmul_add_add_sse;
+ }
+ if(mm_flags & MM_3DNOW)
+ c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
}
#ifdef CONFIG_ENCODERS
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h b/contrib/ffmpeg/libavcodec/i386/dsputil_mmx_avg.h
index 440c5bb9c..b365cea57 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h
+++ b/contrib/ffmpeg/libavcodec/i386/dsputil_mmx_avg.h
@@ -3,18 +3,20 @@
* Copyright (c) 2000, 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
@@ -754,7 +756,7 @@ static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line
"lea (%3, %3), %%"REG_a" \n\t"
"movq (%1), %%mm0 \n\t"
PAVGB" 1(%1), %%mm0 \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm2 \n\t"
"movq (%1, %3), %%mm1 \n\t"
@@ -818,3 +820,51 @@ static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int lin
DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h);
}
+#define QPEL_2TAP_L3(OPNAME) \
+static void DEF(OPNAME ## 2tap_qpel16_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\
+ asm volatile(\
+ "1: \n\t"\
+ "movq (%1,%2), %%mm0 \n\t"\
+ "movq 8(%1,%2), %%mm1 \n\t"\
+ PAVGB" (%1,%3), %%mm0 \n\t"\
+ PAVGB" 8(%1,%3), %%mm1 \n\t"\
+ PAVGB" (%1), %%mm0 \n\t"\
+ PAVGB" 8(%1), %%mm1 \n\t"\
+ STORE_OP( (%1,%4),%%mm0)\
+ STORE_OP(8(%1,%4),%%mm1)\
+ "movq %%mm0, (%1,%4) \n\t"\
+ "movq %%mm1, 8(%1,%4) \n\t"\
+ "add %5, %1 \n\t"\
+ "decl %0 \n\t"\
+ "jnz 1b \n\t"\
+ :"+g"(h), "+r"(src)\
+ :"r"((long)off1), "r"((long)off2),\
+ "r"((long)(dst-src)), "r"((long)stride)\
+ :"memory"\
+ );\
+}\
+static void DEF(OPNAME ## 2tap_qpel8_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\
+ asm volatile(\
+ "1: \n\t"\
+ "movq (%1,%2), %%mm0 \n\t"\
+ PAVGB" (%1,%3), %%mm0 \n\t"\
+ PAVGB" (%1), %%mm0 \n\t"\
+ STORE_OP((%1,%4),%%mm0)\
+ "movq %%mm0, (%1,%4) \n\t"\
+ "add %5, %1 \n\t"\
+ "decl %0 \n\t"\
+ "jnz 1b \n\t"\
+ :"+g"(h), "+r"(src)\
+ :"r"((long)off1), "r"((long)off2),\
+ "r"((long)(dst-src)), "r"((long)stride)\
+ :"memory"\
+ );\
+}
+
+#define STORE_OP(a,b) PAVGB" "#a","#b" \n\t"
+QPEL_2TAP_L3(avg_)
+#undef STORE_OP
+#define STORE_OP(a,b)
+QPEL_2TAP_L3(put_)
+#undef STORE_OP
+#undef QPEL_2TAP_L3
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h b/contrib/ffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
index 3ecd776b8..f53b34662 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
+++ b/contrib/ffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
@@ -3,18 +3,20 @@
* Copyright (c) 2000, 2001 Fabrice Bellard.
* Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
@@ -28,7 +30,7 @@ static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line
MOVQ_BFE(mm6);
__asm __volatile(
"lea (%3, %3), %%"REG_a" \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm1 \n\t"
@@ -69,7 +71,7 @@ static void attribute_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, u
"movq %%mm4, (%3) \n\t"
"add %5, %3 \n\t"
"decl %0 \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq (%2), %%mm1 \n\t"
@@ -110,7 +112,7 @@ static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int lin
MOVQ_BFE(mm6);
__asm __volatile(
"lea (%3, %3), %%"REG_a" \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm1 \n\t"
@@ -168,7 +170,7 @@ static void attribute_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1,
"movq %%mm5, 8(%3) \n\t"
"add %5, %3 \n\t"
"decl %0 \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq (%2), %%mm1 \n\t"
@@ -206,7 +208,7 @@ static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line
__asm __volatile(
"lea (%3, %3), %%"REG_a" \n\t"
"movq (%1), %%mm0 \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1, %3), %%mm1 \n\t"
"movq (%1, %%"REG_a"),%%mm2 \n\t"
@@ -246,7 +248,7 @@ static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int lin
"paddusw %%mm1, %%mm5 \n\t"
"xor %%"REG_a", %%"REG_a" \n\t"
"add %3, %1 \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq 1(%1, %%"REG_a"), %%mm2 \n\t"
@@ -458,7 +460,7 @@ static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line
__asm __volatile(
"lea (%3, %3), %%"REG_a" \n\t"
"movq (%1), %%mm0 \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1, %3), %%mm1 \n\t"
"movq (%1, %%"REG_a"), %%mm2 \n\t"
@@ -509,7 +511,7 @@ static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int lin
"paddusw %%mm1, %%mm5 \n\t"
"xor %%"REG_a", %%"REG_a" \n\t"
"add %3, %1 \n\t"
- ".balign 8 \n\t"
+ ASMALIGN(3)
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq 1(%1, %%"REG_a"), %%mm2 \n\t"
diff --git a/src/libffmpeg/libavcodec/i386/fdct_mmx.c b/contrib/ffmpeg/libavcodec/i386/fdct_mmx.c
index f6150c83c..2ffbfecf6 100644
--- a/src/libffmpeg/libavcodec/i386/fdct_mmx.c
+++ b/contrib/ffmpeg/libavcodec/i386/fdct_mmx.c
@@ -12,6 +12,22 @@
* Also of inspiration:
* a page about fdct at http://www.geocities.com/ssavekar/dct.htm
* Skal's fdct at http://skal.planet-d.net/coding/dct.html
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "common.h"
#include "../dsputil.h"
@@ -51,7 +67,7 @@ static const int64_t fdct_one_corr ATTR_ALIGN(8) = 0x0001000100010001LL;
static const int32_t fdct_r_row[2] ATTR_ALIGN(8) = {RND_FRW_ROW, RND_FRW_ROW };
-struct
+static struct
{
const int32_t fdct_r_row_sse2[4] ATTR_ALIGN(16);
} fdct_r_row_sse2 ATTR_ALIGN(16)=
@@ -134,7 +150,7 @@ static const int16_t tab_frw_01234567[] ATTR_ALIGN(8) = { // forward_dct coeff
29692, -12299, 26722, -31521,
};
-struct
+static struct
{
const int16_t tab_frw_01234567_sse2[256] ATTR_ALIGN(16);
} tab_frw_01234567_sse2 ATTR_ALIGN(16) =
@@ -351,60 +367,60 @@ static always_inline void fdct_col(const int16_t *in, int16_t *out, int offset)
static always_inline void fdct_row_sse2(const int16_t *in, int16_t *out)
{
asm volatile(
- ".macro FDCT_ROW_SSE2_H1 i t \n\t"
- "movq \\i(%0), %%xmm2 \n\t"
- "movq \\i+8(%0), %%xmm0 \n\t"
- "movdqa \\t+32(%1), %%xmm3 \n\t"
- "movdqa \\t+48(%1), %%xmm7 \n\t"
- "movdqa \\t(%1), %%xmm4 \n\t"
- "movdqa \\t+16(%1), %%xmm5 \n\t"
- ".endm \n\t"
- ".macro FDCT_ROW_SSE2_H2 i t \n\t"
- "movq \\i(%0), %%xmm2 \n\t"
- "movq \\i+8(%0), %%xmm0 \n\t"
- "movdqa \\t+32(%1), %%xmm3 \n\t"
- "movdqa \\t+48(%1), %%xmm7 \n\t"
- ".endm \n\t"
- ".macro FDCT_ROW_SSE2 i \n\t"
- "movq %%xmm2, %%xmm1 \n\t"
- "pshuflw $27, %%xmm0, %%xmm0 \n\t"
- "paddsw %%xmm0, %%xmm1 \n\t"
- "psubsw %%xmm0, %%xmm2 \n\t"
- "punpckldq %%xmm2, %%xmm1 \n\t"
- "pshufd $78, %%xmm1, %%xmm2 \n\t"
- "pmaddwd %%xmm2, %%xmm3 \n\t"
- "pmaddwd %%xmm1, %%xmm7 \n\t"
- "pmaddwd %%xmm5, %%xmm2 \n\t"
- "pmaddwd %%xmm4, %%xmm1 \n\t"
- "paddd %%xmm7, %%xmm3 \n\t"
- "paddd %%xmm2, %%xmm1 \n\t"
- "paddd %%xmm6, %%xmm3 \n\t"
- "paddd %%xmm6, %%xmm1 \n\t"
- "psrad %3, %%xmm3 \n\t"
- "psrad %3, %%xmm1 \n\t"
- "packssdw %%xmm3, %%xmm1 \n\t"
- "movdqa %%xmm1, \\i(%4) \n\t"
- ".endm \n\t"
+#define FDCT_ROW_SSE2_H1(i,t) \
+ "movq " #i "(%0), %%xmm2 \n\t" \
+ "movq " #i "+8(%0), %%xmm0 \n\t" \
+ "movdqa " #t "+32(%1), %%xmm3 \n\t" \
+ "movdqa " #t "+48(%1), %%xmm7 \n\t" \
+ "movdqa " #t "(%1), %%xmm4 \n\t" \
+ "movdqa " #t "+16(%1), %%xmm5 \n\t"
+
+#define FDCT_ROW_SSE2_H2(i,t) \
+ "movq " #i "(%0), %%xmm2 \n\t" \
+ "movq " #i "+8(%0), %%xmm0 \n\t" \
+ "movdqa " #t "+32(%1), %%xmm3 \n\t" \
+ "movdqa " #t "+48(%1), %%xmm7 \n\t"
+
+#define FDCT_ROW_SSE2(i) \
+ "movq %%xmm2, %%xmm1 \n\t" \
+ "pshuflw $27, %%xmm0, %%xmm0 \n\t" \
+ "paddsw %%xmm0, %%xmm1 \n\t" \
+ "psubsw %%xmm0, %%xmm2 \n\t" \
+ "punpckldq %%xmm2, %%xmm1 \n\t" \
+ "pshufd $78, %%xmm1, %%xmm2 \n\t" \
+ "pmaddwd %%xmm2, %%xmm3 \n\t" \
+ "pmaddwd %%xmm1, %%xmm7 \n\t" \
+ "pmaddwd %%xmm5, %%xmm2 \n\t" \
+ "pmaddwd %%xmm4, %%xmm1 \n\t" \
+ "paddd %%xmm7, %%xmm3 \n\t" \
+ "paddd %%xmm2, %%xmm1 \n\t" \
+ "paddd %%xmm6, %%xmm3 \n\t" \
+ "paddd %%xmm6, %%xmm1 \n\t" \
+ "psrad %3, %%xmm3 \n\t" \
+ "psrad %3, %%xmm1 \n\t" \
+ "packssdw %%xmm3, %%xmm1 \n\t" \
+ "movdqa %%xmm1, " #i "(%4) \n\t"
+
"movdqa (%2), %%xmm6 \n\t"
- "FDCT_ROW_SSE2_H1 0 0 \n\t"
- "FDCT_ROW_SSE2 0 \n\t"
- "FDCT_ROW_SSE2_H2 64 0 \n\t"
- "FDCT_ROW_SSE2 64 \n\t"
-
- "FDCT_ROW_SSE2_H1 16 64 \n\t"
- "FDCT_ROW_SSE2 16 \n\t"
- "FDCT_ROW_SSE2_H2 112 64 \n\t"
- "FDCT_ROW_SSE2 112 \n\t"
-
- "FDCT_ROW_SSE2_H1 32 128 \n\t"
- "FDCT_ROW_SSE2 32 \n\t"
- "FDCT_ROW_SSE2_H2 96 128 \n\t"
- "FDCT_ROW_SSE2 96 \n\t"
-
- "FDCT_ROW_SSE2_H1 48 192 \n\t"
- "FDCT_ROW_SSE2 48 \n\t"
- "FDCT_ROW_SSE2_H2 80 192 \n\t"
- "FDCT_ROW_SSE2 80 \n\t"
+ FDCT_ROW_SSE2_H1(0,0)
+ FDCT_ROW_SSE2(0)
+ FDCT_ROW_SSE2_H2(64,0)
+ FDCT_ROW_SSE2(64)
+
+ FDCT_ROW_SSE2_H1(16,64)
+ FDCT_ROW_SSE2(16)
+ FDCT_ROW_SSE2_H2(112,64)
+ FDCT_ROW_SSE2(112)
+
+ FDCT_ROW_SSE2_H1(32,128)
+ FDCT_ROW_SSE2(32)
+ FDCT_ROW_SSE2_H2(96,128)
+ FDCT_ROW_SSE2(96)
+
+ FDCT_ROW_SSE2_H1(48,192)
+ FDCT_ROW_SSE2(48)
+ FDCT_ROW_SSE2_H2(80,192)
+ FDCT_ROW_SSE2(80)
:
: "r" (in), "r" (tab_frw_01234567_sse2.tab_frw_01234567_sse2), "r" (fdct_r_row_sse2.fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out)
);
@@ -504,56 +520,44 @@ static always_inline void fdct_row_mmx(const int16_t *in, int16_t *out, const in
void ff_fdct_mmx(int16_t *block)
{
int64_t align_tmp[16] ATTR_ALIGN(8);
- int16_t * const block_tmp= (int16_t*)align_tmp;
- int16_t *block1, *out;
- const int16_t *table;
+ int16_t * block1= (int16_t*)align_tmp;
+ const int16_t *table= tab_frw_01234567;
int i;
- block1 = block_tmp;
fdct_col(block, block1, 0);
fdct_col(block, block1, 4);
- block1 = block_tmp;
- table = tab_frw_01234567;
- out = block;
for(i=8;i>0;i--) {
- fdct_row_mmx(block1, out, table);
+ fdct_row_mmx(block1, block, table);
block1 += 8;
table += 32;
- out += 8;
+ block += 8;
}
}
void ff_fdct_mmx2(int16_t *block)
{
int64_t align_tmp[16] ATTR_ALIGN(8);
- int16_t * const block_tmp= (int16_t*)align_tmp;
- int16_t *block1, *out;
- const int16_t *table;
+ int16_t *block1= (int16_t*)align_tmp;
+ const int16_t *table= tab_frw_01234567;
int i;
- block1 = block_tmp;
fdct_col(block, block1, 0);
fdct_col(block, block1, 4);
- block1 = block_tmp;
- table = tab_frw_01234567;
- out = block;
for(i=8;i>0;i--) {
- fdct_row_mmx2(block1, out, table);
+ fdct_row_mmx2(block1, block, table);
block1 += 8;
table += 32;
- out += 8;
+ block += 8;
}
}
void ff_fdct_sse2(int16_t *block)
{
- int64_t align_tmp[16] ATTR_ALIGN(8);
- int16_t * const block_tmp= (int16_t*)align_tmp;
- int16_t *block1;
+ int64_t align_tmp[16] ATTR_ALIGN(16);
+ int16_t * const block1= (int16_t*)align_tmp;
- block1 = block_tmp;
fdct_col(block, block1, 0);
fdct_col(block, block1, 4);
diff --git a/contrib/ffmpeg/libavcodec/i386/fft_3dn.c b/contrib/ffmpeg/libavcodec/i386/fft_3dn.c
new file mode 100644
index 000000000..8087f1932
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/i386/fft_3dn.c
@@ -0,0 +1,125 @@
+/*
+ * FFT/MDCT transform with 3DNow! optimizations
+ * Copyright (c) 2006 Zuxy MENG Jie, Loren Merritt
+ * Based on fft_sse.c copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "../dsputil.h"
+
+static const int p1m1[2] __attribute__((aligned(8))) =
+ { 0, 1 << 31 };
+
+static const int m1p1[2] __attribute__((aligned(8))) =
+ { 1 << 31, 0 };
+
+void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z)
+{
+ int ln = s->nbits;
+ long i, j;
+ long nblocks, nloops;
+ FFTComplex *p, *cptr;
+
+ asm volatile(
+ /* FEMMS is not a must here but recommended by AMD */
+ "femms \n\t"
+ "movq %0, %%mm7 \n\t"
+ ::"m"(*(s->inverse ? m1p1 : p1m1))
+ );
+
+ i = 8 << ln;
+ asm volatile(
+ "1: \n\t"
+ "sub $32, %0 \n\t"
+ "movq (%0,%1), %%mm0 \n\t"
+ "movq 16(%0,%1), %%mm1 \n\t"
+ "movq 8(%0,%1), %%mm2 \n\t"
+ "movq 24(%0,%1), %%mm3 \n\t"
+ "movq %%mm0, %%mm4 \n\t"
+ "movq %%mm1, %%mm5 \n\t"
+ "pfadd %%mm2, %%mm0 \n\t"
+ "pfadd %%mm3, %%mm1 \n\t"
+ "pfsub %%mm2, %%mm4 \n\t"
+ "pfsub %%mm3, %%mm5 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "punpckldq %%mm5, %%mm6 \n\t"
+ "punpckhdq %%mm6, %%mm5 \n\t"
+ "movq %%mm4, %%mm3 \n\t"
+ "pxor %%mm7, %%mm5 \n\t"
+ "pfadd %%mm1, %%mm0 \n\t"
+ "pfadd %%mm5, %%mm4 \n\t"
+ "pfsub %%mm1, %%mm2 \n\t"
+ "pfsub %%mm5, %%mm3 \n\t"
+ "movq %%mm0, (%0,%1) \n\t"
+ "movq %%mm4, 8(%0,%1) \n\t"
+ "movq %%mm2, 16(%0,%1) \n\t"
+ "movq %%mm3, 24(%0,%1) \n\t"
+ "jg 1b \n\t"
+ :"+r"(i)
+ :"r"(z)
+ );
+ /* pass 2 .. ln-1 */
+
+ nblocks = 1 << (ln-3);
+ nloops = 1 << 2;
+ cptr = s->exptab1;
+ do {
+ p = z;
+ j = nblocks;
+ do {
+ i = nloops*8;
+ asm volatile(
+ "1: \n\t"
+ "sub $16, %0 \n\t"
+ "movq (%1,%0), %%mm0 \n\t"
+ "movq 8(%1,%0), %%mm1 \n\t"
+ "movq (%2,%0), %%mm2 \n\t"
+ "movq 8(%2,%0), %%mm3 \n\t"
+ "movq %%mm2, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "punpckldq %%mm2, %%mm2 \n\t"
+ "punpckldq %%mm3, %%mm3 \n\t"
+ "punpckhdq %%mm4, %%mm4 \n\t"
+ "punpckhdq %%mm5, %%mm5 \n\t"
+ "pfmul (%3,%0,2), %%mm2 \n\t" // cre*re cim*re
+ "pfmul 8(%3,%0,2), %%mm3 \n\t"
+ "pfmul 16(%3,%0,2), %%mm4 \n\t" // -cim*im cre*im
+ "pfmul 24(%3,%0,2), %%mm5 \n\t"
+ "pfadd %%mm2, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im
+ "pfadd %%mm3, %%mm5 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "pfadd %%mm4, %%mm0 \n\t"
+ "pfadd %%mm5, %%mm1 \n\t"
+ "pfsub %%mm4, %%mm2 \n\t"
+ "pfsub %%mm5, %%mm3 \n\t"
+ "movq %%mm0, (%1,%0) \n\t"
+ "movq %%mm1, 8(%1,%0) \n\t"
+ "movq %%mm2, (%2,%0) \n\t"
+ "movq %%mm3, 8(%2,%0) \n\t"
+ "jg 1b \n\t"
+ :"+r"(i)
+ :"r"(p), "r"(p + nloops), "r"(cptr)
+ );
+ p += nloops*2;
+ } while (--j);
+ cptr += nloops*2;
+ nblocks >>= 1;
+ nloops <<= 1;
+ } while (nblocks != 0);
+ asm volatile("femms");
+}
diff --git a/contrib/ffmpeg/libavcodec/i386/fft_3dn2.c b/contrib/ffmpeg/libavcodec/i386/fft_3dn2.c
new file mode 100644
index 000000000..a4fe5f0b6
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/i386/fft_3dn2.c
@@ -0,0 +1,210 @@
+/*
+ * FFT/MDCT transform with Extended 3DNow! optimizations
+ * Copyright (c) 2006 Zuxy MENG Jie, Loren Merritt
+ * Based on fft_sse.c copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "../dsputil.h"
+
+static const int p1m1[2] __attribute__((aligned(8))) =
+ { 0, 1 << 31 };
+
+static const int m1p1[2] __attribute__((aligned(8))) =
+ { 1 << 31, 0 };
+
+void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z)
+{
+ int ln = s->nbits;
+ long i, j;
+ long nblocks, nloops;
+ FFTComplex *p, *cptr;
+
+ asm volatile(
+ /* FEMMS is not a must here but recommended by AMD */
+ "femms \n\t"
+ "movq %0, %%mm7 \n\t"
+ ::"m"(*(s->inverse ? m1p1 : p1m1))
+ );
+
+ i = 8 << ln;
+ asm volatile(
+ "1: \n\t"
+ "sub $32, %0 \n\t"
+ "movq (%0,%1), %%mm0 \n\t"
+ "movq 16(%0,%1), %%mm1 \n\t"
+ "movq 8(%0,%1), %%mm2 \n\t"
+ "movq 24(%0,%1), %%mm3 \n\t"
+ "movq %%mm0, %%mm4 \n\t"
+ "movq %%mm1, %%mm5 \n\t"
+ "pfadd %%mm2, %%mm0 \n\t"
+ "pfadd %%mm3, %%mm1 \n\t"
+ "pfsub %%mm2, %%mm4 \n\t"
+ "pfsub %%mm3, %%mm5 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "pswapd %%mm5, %%mm5 \n\t"
+ "movq %%mm4, %%mm3 \n\t"
+ "pxor %%mm7, %%mm5 \n\t"
+ "pfadd %%mm1, %%mm0 \n\t"
+ "pfadd %%mm5, %%mm4 \n\t"
+ "pfsub %%mm1, %%mm2 \n\t"
+ "pfsub %%mm5, %%mm3 \n\t"
+ "movq %%mm0, (%0,%1) \n\t"
+ "movq %%mm4, 8(%0,%1) \n\t"
+ "movq %%mm2, 16(%0,%1) \n\t"
+ "movq %%mm3, 24(%0,%1) \n\t"
+ "jg 1b \n\t"
+ :"+r"(i)
+ :"r"(z)
+ );
+ /* pass 2 .. ln-1 */
+
+ nblocks = 1 << (ln-3);
+ nloops = 1 << 2;
+ cptr = s->exptab1;
+ do {
+ p = z;
+ j = nblocks;
+ do {
+ i = nloops*8;
+ asm volatile(
+ "1: \n\t"
+ "sub $16, %0 \n\t"
+ "movq (%1,%0), %%mm0 \n\t"
+ "movq 8(%1,%0), %%mm1 \n\t"
+ "movq (%2,%0), %%mm2 \n\t"
+ "movq 8(%2,%0), %%mm3 \n\t"
+ "movq (%3,%0,2), %%mm4 \n\t"
+ "movq 8(%3,%0,2), %%mm5 \n\t"
+ "pswapd %%mm4, %%mm6 \n\t" // no need for cptr[2] & cptr[3]
+ "pswapd %%mm5, %%mm7 \n\t"
+ "pfmul %%mm2, %%mm4 \n\t" // cre*re cim*im
+ "pfmul %%mm3, %%mm5 \n\t"
+ "pfmul %%mm2, %%mm6 \n\t" // cim*re cre*im
+ "pfmul %%mm3, %%mm7 \n\t"
+ "pfpnacc %%mm6, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im
+ "pfpnacc %%mm7, %%mm5 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "pfadd %%mm4, %%mm0 \n\t"
+ "pfadd %%mm5, %%mm1 \n\t"
+ "pfsub %%mm4, %%mm2 \n\t"
+ "pfsub %%mm5, %%mm3 \n\t"
+ "movq %%mm0, (%1,%0) \n\t"
+ "movq %%mm1, 8(%1,%0) \n\t"
+ "movq %%mm2, (%2,%0) \n\t"
+ "movq %%mm3, 8(%2,%0) \n\t"
+ "jg 1b \n\t"
+ :"+r"(i)
+ :"r"(p), "r"(p + nloops), "r"(cptr)
+ );
+ p += nloops*2;
+ } while (--j);
+ cptr += nloops*2;
+ nblocks >>= 1;
+ nloops <<= 1;
+ } while (nblocks != 0);
+ asm volatile("femms");
+}
+
+void ff_imdct_calc_3dn2(MDCTContext *s, FFTSample *output,
+ const FFTSample *input, FFTSample *tmp)
+{
+ long k, n8, n4, n2, n;
+ const uint16_t *revtab = s->fft.revtab;
+ const FFTSample *tcos = s->tcos;
+ const FFTSample *tsin = s->tsin;
+ const FFTSample *in1, *in2;
+ FFTComplex *z = (FFTComplex *)tmp;
+
+ n = 1 << s->nbits;
+ n2 = n >> 1;
+ n4 = n >> 2;
+ n8 = n >> 3;
+
+ /* pre rotation */
+ in1 = input;
+ in2 = input + n2 - 1;
+ for(k = 0; k < n4; k++) {
+ // FIXME a single block is faster, but gcc 2.95 and 3.4.x on 32bit can't compile it
+ asm volatile(
+ "movd %0, %%mm0 \n\t"
+ "movd %2, %%mm1 \n\t"
+ "punpckldq %1, %%mm0 \n\t"
+ "punpckldq %3, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "pfmul %%mm1, %%mm0 \n\t"
+ "pswapd %%mm1, %%mm1 \n\t"
+ "pfmul %%mm1, %%mm2 \n\t"
+ "pfpnacc %%mm2, %%mm0 \n\t"
+ ::"m"(in2[-2*k]), "m"(in1[2*k]),
+ "m"(tcos[k]), "m"(tsin[k])
+ );
+ asm volatile(
+ "movq %%mm0, %0 \n\t"
+ :"=m"(z[revtab[k]])
+ );
+ }
+
+ ff_fft_calc(&s->fft, z);
+
+ /* post rotation + reordering */
+ for(k = 0; k < n4; k++) {
+ asm volatile(
+ "movq %0, %%mm0 \n\t"
+ "movd %1, %%mm1 \n\t"
+ "punpckldq %2, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "pfmul %%mm1, %%mm0 \n\t"
+ "pswapd %%mm1, %%mm1 \n\t"
+ "pfmul %%mm1, %%mm2 \n\t"
+ "pfpnacc %%mm2, %%mm0 \n\t"
+ "movq %%mm0, %0 \n\t"
+ :"+m"(z[k])
+ :"m"(tcos[k]), "m"(tsin[k])
+ );
+ }
+
+ k = n-8;
+ asm volatile("movd %0, %%mm7" ::"r"(1<<31));
+ asm volatile(
+ "1: \n\t"
+ "movq (%4,%0), %%mm0 \n\t" // z[n8+k]
+ "neg %0 \n\t"
+ "pswapd -8(%4,%0), %%mm1 \n\t" // z[n8-1-k]
+ "movq %%mm0, %%mm2 \n\t"
+ "pxor %%mm7, %%mm2 \n\t"
+ "punpckldq %%mm1, %%mm2 \n\t"
+ "pswapd %%mm2, %%mm3 \n\t"
+ "punpckhdq %%mm1, %%mm0 \n\t"
+ "pswapd %%mm0, %%mm4 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "pxor %%mm7, %%mm4 \n\t"
+ "movq %%mm3, -8(%3,%0) \n\t" // output[n-2-2*k] = { z[n8-1-k].im, -z[n8+k].re }
+ "movq %%mm4, -8(%2,%0) \n\t" // output[n2-2-2*k]= { -z[n8-1-k].re, z[n8+k].im }
+ "neg %0 \n\t"
+ "movq %%mm0, (%1,%0) \n\t" // output[2*k] = { -z[n8+k].im, z[n8-1-k].re }
+ "movq %%mm2, (%2,%0) \n\t" // output[n2+2*k] = { -z[n8+k].re, z[n8-1-k].im }
+ "sub $8, %0 \n\t"
+ "jge 1b \n\t"
+ :"+r"(k)
+ :"r"(output), "r"(output+n2), "r"(output+n), "r"(z+n8)
+ :"memory"
+ );
+ asm volatile("femms");
+}
+
diff --git a/contrib/ffmpeg/libavcodec/i386/fft_sse.c b/contrib/ffmpeg/libavcodec/i386/fft_sse.c
new file mode 100644
index 000000000..0dc0c61c1
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/i386/fft_sse.c
@@ -0,0 +1,247 @@
+/*
+ * FFT/MDCT transform with SSE optimizations
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "../dsputil.h"
+
+static const int p1p1p1m1[4] __attribute__((aligned(16))) =
+ { 0, 0, 0, 1 << 31 };
+
+static const int p1p1m1p1[4] __attribute__((aligned(16))) =
+ { 0, 0, 1 << 31, 0 };
+
+static const int p1p1m1m1[4] __attribute__((aligned(16))) =
+ { 0, 0, 1 << 31, 1 << 31 };
+
+static const int p1m1p1m1[4] __attribute__((aligned(16))) =
+ { 0, 1 << 31, 0, 1 << 31 };
+
+static const int m1m1m1m1[4] __attribute__((aligned(16))) =
+ { 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
+
+#if 0
+static void print_v4sf(const char *str, __m128 a)
+{
+ float *p = (float *)&a;
+ printf("%s: %f %f %f %f\n",
+ str, p[0], p[1], p[2], p[3]);
+}
+#endif
+
+/* XXX: handle reverse case */
+void ff_fft_calc_sse(FFTContext *s, FFTComplex *z)
+{
+ int ln = s->nbits;
+ long i, j;
+ long nblocks, nloops;
+ FFTComplex *p, *cptr;
+
+ asm volatile(
+ "movaps %0, %%xmm4 \n\t"
+ "movaps %1, %%xmm5 \n\t"
+ ::"m"(*p1p1m1m1),
+ "m"(*(s->inverse ? p1p1m1p1 : p1p1p1m1))
+ );
+
+ i = 8 << ln;
+ asm volatile(
+ "1: \n\t"
+ "sub $32, %0 \n\t"
+ /* do the pass 0 butterfly */
+ "movaps (%0,%1), %%xmm0 \n\t"
+ "movaps %%xmm0, %%xmm1 \n\t"
+ "shufps $0x4E, %%xmm0, %%xmm0 \n\t"
+ "xorps %%xmm4, %%xmm1 \n\t"
+ "addps %%xmm1, %%xmm0 \n\t"
+ "movaps 16(%0,%1), %%xmm2 \n\t"
+ "movaps %%xmm2, %%xmm3 \n\t"
+ "shufps $0x4E, %%xmm2, %%xmm2 \n\t"
+ "xorps %%xmm4, %%xmm3 \n\t"
+ "addps %%xmm3, %%xmm2 \n\t"
+ /* multiply third by -i */
+ /* by toggling the sign bit */
+ "shufps $0xB4, %%xmm2, %%xmm2 \n\t"
+ "xorps %%xmm5, %%xmm2 \n\t"
+ /* do the pass 1 butterfly */
+ "movaps %%xmm0, %%xmm1 \n\t"
+ "addps %%xmm2, %%xmm0 \n\t"
+ "subps %%xmm2, %%xmm1 \n\t"
+ "movaps %%xmm0, (%0,%1) \n\t"
+ "movaps %%xmm1, 16(%0,%1) \n\t"
+ "jg 1b \n\t"
+ :"+r"(i)
+ :"r"(z)
+ );
+ /* pass 2 .. ln-1 */
+
+ nblocks = 1 << (ln-3);
+ nloops = 1 << 2;
+ cptr = s->exptab1;
+ do {
+ p = z;
+ j = nblocks;
+ do {
+ i = nloops*8;
+ asm volatile(
+ "1: \n\t"
+ "sub $16, %0 \n\t"
+ "movaps (%2,%0), %%xmm1 \n\t"
+ "movaps (%1,%0), %%xmm0 \n\t"
+ "movaps %%xmm1, %%xmm2 \n\t"
+ "shufps $0xA0, %%xmm1, %%xmm1 \n\t"
+ "shufps $0xF5, %%xmm2, %%xmm2 \n\t"
+ "mulps (%3,%0,2), %%xmm1 \n\t" // cre*re cim*re
+ "mulps 16(%3,%0,2), %%xmm2 \n\t" // -cim*im cre*im
+ "addps %%xmm2, %%xmm1 \n\t"
+ "movaps %%xmm0, %%xmm3 \n\t"
+ "addps %%xmm1, %%xmm0 \n\t"
+ "subps %%xmm1, %%xmm3 \n\t"
+ "movaps %%xmm0, (%1,%0) \n\t"
+ "movaps %%xmm3, (%2,%0) \n\t"
+ "jg 1b \n\t"
+ :"+r"(i)
+ :"r"(p), "r"(p + nloops), "r"(cptr)
+ );
+ p += nloops*2;
+ } while (--j);
+ cptr += nloops*2;
+ nblocks >>= 1;
+ nloops <<= 1;
+ } while (nblocks != 0);
+}
+
+void ff_imdct_calc_sse(MDCTContext *s, FFTSample *output,
+ const FFTSample *input, FFTSample *tmp)
+{
+ long k, n8, n4, n2, n;
+ const uint16_t *revtab = s->fft.revtab;
+ const FFTSample *tcos = s->tcos;
+ const FFTSample *tsin = s->tsin;
+ const FFTSample *in1, *in2;
+ FFTComplex *z = (FFTComplex *)tmp;
+
+ n = 1 << s->nbits;
+ n2 = n >> 1;
+ n4 = n >> 2;
+ n8 = n >> 3;
+
+ asm volatile ("movaps %0, %%xmm7\n\t"::"m"(*p1m1p1m1));
+
+ /* pre rotation */
+ in1 = input;
+ in2 = input + n2 - 4;
+
+ /* Complex multiplication
+ Two complex products per iteration, we could have 4 with 8 xmm
+ registers, 8 with 16 xmm registers.
+ Maybe we should unroll more.
+ */
+ for (k = 0; k < n4; k += 2) {
+ asm volatile (
+ "movaps %0, %%xmm0 \n\t" // xmm0 = r0 X r1 X : in2
+ "movaps %1, %%xmm3 \n\t" // xmm3 = X i1 X i0: in1
+ "movlps %2, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos
+ "movlps %3, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin
+ "shufps $95, %%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0
+ "shufps $160,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0
+ "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0
+ "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0
+ "xorps %%xmm7, %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0
+ "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR
+ "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0
+ "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii
+ "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result
+ ::"m"(in2[-2*k]), "m"(in1[2*k]),
+ "m"(tcos[k]), "m"(tsin[k])
+ );
+ /* Should be in the same block, hack for gcc2.95 & gcc3 */
+ asm (
+ "movlps %%xmm0, %0 \n\t"
+ "movhps %%xmm0, %1 \n\t"
+ :"=m"(z[revtab[k]]), "=m"(z[revtab[k + 1]])
+ );
+ }
+
+ ff_fft_calc_sse(&s->fft, z);
+
+ /* Not currently needed, added for safety */
+ asm volatile ("movaps %0, %%xmm7\n\t"::"m"(*p1m1p1m1));
+
+ /* post rotation + reordering */
+ for (k = 0; k < n4; k += 2) {
+ asm (
+ "movaps %0, %%xmm0 \n\t" // xmm0 = i1 r1 i0 r0: z
+ "movlps %1, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos
+ "movaps %%xmm0, %%xmm3 \n\t" // xmm3 = i1 r1 i0 r0
+ "movlps %2, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin
+ "shufps $160,%%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0
+ "shufps $245,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0
+ "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0
+ "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0
+ "xorps %%xmm7, %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0
+ "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR
+ "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0
+ "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii
+ "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result
+ "movaps %%xmm0, %0 \n\t"
+ :"+m"(z[k])
+ :"m"(tcos[k]), "m"(tsin[k])
+ );
+ }
+
+ /*
+ Mnemonics:
+ 0 = z[k].re
+ 1 = z[k].im
+ 2 = z[k + 1].re
+ 3 = z[k + 1].im
+ 4 = z[-k - 2].re
+ 5 = z[-k - 2].im
+ 6 = z[-k - 1].re
+ 7 = z[-k - 1].im
+ */
+ k = 16-n;
+ asm volatile("movaps %0, %%xmm7 \n\t"::"m"(*m1m1m1m1));
+ asm volatile(
+ "1: \n\t"
+ "movaps -16(%4,%0), %%xmm1 \n\t" // xmm1 = 4 5 6 7 = z[-2-k]
+ "neg %0 \n\t"
+ "movaps (%4,%0), %%xmm0 \n\t" // xmm0 = 0 1 2 3 = z[k]
+ "xorps %%xmm7, %%xmm0 \n\t" // xmm0 = -0 -1 -2 -3
+ "movaps %%xmm0, %%xmm2 \n\t" // xmm2 = -0 -1 -2 -3
+ "shufps $141,%%xmm1, %%xmm0 \n\t" // xmm0 = -1 -3 4 6
+ "shufps $216,%%xmm1, %%xmm2 \n\t" // xmm2 = -0 -2 5 7
+ "shufps $156,%%xmm0, %%xmm0 \n\t" // xmm0 = -1 6 -3 4 !
+ "shufps $156,%%xmm2, %%xmm2 \n\t" // xmm2 = -0 7 -2 5 !
+ "movaps %%xmm0, (%1,%0) \n\t" // output[2*k]
+ "movaps %%xmm2, (%2,%0) \n\t" // output[n2+2*k]
+ "neg %0 \n\t"
+ "shufps $27, %%xmm0, %%xmm0 \n\t" // xmm0 = 4 -3 6 -1
+ "xorps %%xmm7, %%xmm0 \n\t" // xmm0 = -4 3 -6 1 !
+ "shufps $27, %%xmm2, %%xmm2 \n\t" // xmm2 = 5 -2 7 -0 !
+ "movaps %%xmm0, -16(%2,%0) \n\t" // output[n2-4-2*k]
+ "movaps %%xmm2, -16(%3,%0) \n\t" // output[n-4-2*k]
+ "add $16, %0 \n\t"
+ "jle 1b \n\t"
+ :"+r"(k)
+ :"r"(output), "r"(output+n2), "r"(output+n), "r"(z+n8)
+ :"memory"
+ );
+}
+
diff --git a/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c b/contrib/ffmpeg/libavcodec/i386/h264dsp_mmx.c
index ac4ad6401..40baf199b 100644
--- a/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c
+++ b/contrib/ffmpeg/libavcodec/i386/h264dsp_mmx.c
@@ -1,18 +1,20 @@
/*
* Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -174,7 +176,7 @@ static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
block[0] += 32;
for(i=0; i<2; i++){
- uint64_t tmp;
+ DECLARE_ALIGNED_8(uint64_t, tmp);
h264_idct8_1d(block+4*i);
@@ -315,6 +317,17 @@ static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
"por "#t", "#o" \n\t"\
"psubusb "#a", "#o" \n\t"
+// out: o = |x-y|>a
+// clobbers: t
+#define DIFF_GT2_MMX(x,y,a,o,t)\
+ "movq "#y", "#t" \n\t"\
+ "movq "#x", "#o" \n\t"\
+ "psubusb "#x", "#t" \n\t"\
+ "psubusb "#y", "#o" \n\t"\
+ "psubusb "#a", "#t" \n\t"\
+ "psubusb "#a", "#o" \n\t"\
+ "pcmpeqb "#t", "#o" \n\t"\
+
// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
// out: mm5=beta-1, mm7=mask
// clobbers: mm4,mm6
@@ -335,46 +348,26 @@ static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
// out: mm1=p0' mm2=q0'
// clobbers: mm0,3-6
#define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
- /* a = q0^p0^((p1-q1)>>2) */\
- "movq %%mm0, %%mm4 \n\t"\
- "psubb %%mm3, %%mm4 \n\t"\
- "psrlw $2, %%mm4 \n\t"\
- "pxor %%mm1, %%mm4 \n\t"\
- "pxor %%mm2, %%mm4 \n\t"\
- /* b = p0^(q1>>2) */\
- "psrlw $2, %%mm3 \n\t"\
- "pand "#pb_3f", %%mm3 \n\t"\
- "movq %%mm1, %%mm5 \n\t"\
- "pxor %%mm3, %%mm5 \n\t"\
- /* c = q0^(p1>>2) */\
- "psrlw $2, %%mm0 \n\t"\
- "pand "#pb_3f", %%mm0 \n\t"\
- "movq %%mm2, %%mm6 \n\t"\
- "pxor %%mm0, %%mm6 \n\t"\
- /* d = (c^b) & ~(b^a) & 1 */\
- "pxor %%mm5, %%mm6 \n\t"\
- "pxor %%mm4, %%mm5 \n\t"\
- "pandn %%mm6, %%mm5 \n\t"\
- "pand "#pb_01", %%mm5 \n\t"\
- /* delta = (avg(q0, p1>>2) + (d&a))
- * - (avg(p0, q1>>2) + (d&~a)) */\
- "pavgb %%mm2, %%mm0 \n\t"\
- "pand %%mm5, %%mm4 \n\t"\
- "paddusb %%mm4, %%mm0 \n\t"\
- "pavgb %%mm1, %%mm3 \n\t"\
- "pxor %%mm5, %%mm4 \n\t"\
- "paddusb %%mm4, %%mm3 \n\t"\
- /* p0 += clip(delta, -tc0, tc0)
- * q0 -= clip(delta, -tc0, tc0) */\
- "movq %%mm0, %%mm4 \n\t"\
- "psubusb %%mm3, %%mm0 \n\t"\
- "psubusb %%mm4, %%mm3 \n\t"\
- "pminub %%mm7, %%mm0 \n\t"\
- "pminub %%mm7, %%mm3 \n\t"\
- "paddusb %%mm0, %%mm1 \n\t"\
- "paddusb %%mm3, %%mm2 \n\t"\
- "psubusb %%mm3, %%mm1 \n\t"\
- "psubusb %%mm0, %%mm2 \n\t"
+ "movq %%mm1 , %%mm5 \n\t"\
+ "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\
+ "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\
+ "pcmpeqb %%mm4 , %%mm4 \n\t"\
+ "pxor %%mm4 , %%mm3 \n\t"\
+ "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
+ "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
+ "pxor %%mm1 , %%mm4 \n\t"\
+ "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
+ "pavgb %%mm5 , %%mm3 \n\t"\
+ "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\
+ "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
+ "psubusb %%mm3 , %%mm6 \n\t"\
+ "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
+ "pminub %%mm7 , %%mm6 \n\t"\
+ "pminub %%mm7 , %%mm3 \n\t"\
+ "psubusb %%mm6 , %%mm1 \n\t"\
+ "psubusb %%mm3 , %%mm2 \n\t"\
+ "paddusb %%mm3 , %%mm1 \n\t"\
+ "paddusb %%mm6 , %%mm2 \n\t"
// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone
// out: (q1addr) = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
@@ -395,10 +388,7 @@ static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
{
- uint64_t tmp0;
- uint64_t tc = (uint8_t)tc0[1]*0x01010000 | (uint8_t)tc0[0]*0x0101;
- // with luma, tc0=0 doesn't mean no filtering, so we need a separate input mask
- uint32_t mask[2] = { (tc0[0]>=0)*0xffffffff, (tc0[1]>=0)*0xffffffff };
+ DECLARE_ALIGNED_8(uint64_t, tmp0[2]);
asm volatile(
"movq (%1,%3), %%mm0 \n\t" //p1
@@ -406,45 +396,46 @@ static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alph
"movq (%2), %%mm2 \n\t" //q0
"movq (%2,%3), %%mm3 \n\t" //q1
H264_DEBLOCK_MASK(%6, %7)
- "pand %5, %%mm7 \n\t"
- "movq %%mm7, %0 \n\t"
+
+ "movd %5, %%mm4 \n\t"
+ "punpcklbw %%mm4, %%mm4 \n\t"
+ "punpcklwd %%mm4, %%mm4 \n\t"
+ "pcmpeqb %%mm3, %%mm3 \n\t"
+ "movq %%mm4, %%mm6 \n\t"
+ "pcmpgtb %%mm3, %%mm4 \n\t"
+ "movq %%mm6, 8+%0 \n\t"
+ "pand %%mm4, %%mm7 \n\t"
+ "movq %%mm7, %0 \n\t"
/* filter p1 */
"movq (%1), %%mm3 \n\t" //p2
- DIFF_GT_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
- "pandn %%mm7, %%mm6 \n\t"
- "pcmpeqb %%mm7, %%mm6 \n\t"
+ DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
"pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
- "pshufw $80, %4, %%mm4 \n\t"
- "pand %%mm7, %%mm4 \n\t" // mask & tc0
- "movq %8, %%mm7 \n\t"
- "pand %%mm6, %%mm7 \n\t" // mask & |p2-p0|<beta & 1
+ "pand 8+%0, %%mm7 \n\t" // mask & tc0
+ "movq %%mm7, %%mm4 \n\t"
+ "psubb %%mm6, %%mm7 \n\t"
"pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
- "paddb %%mm4, %%mm7 \n\t" // tc++
H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
/* filter q1 */
"movq (%2,%3,2), %%mm4 \n\t" //q2
- DIFF_GT_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
- "pandn %0, %%mm6 \n\t"
- "pcmpeqb %0, %%mm6 \n\t"
+ DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
"pand %0, %%mm6 \n\t"
- "pshufw $80, %4, %%mm5 \n\t"
+ "movq 8+%0, %%mm5 \n\t" // can be merged with the and below but is slower then
"pand %%mm6, %%mm5 \n\t"
- "pand %8, %%mm6 \n\t"
- "paddb %%mm6, %%mm7 \n\t"
+ "psubb %%mm6, %%mm7 \n\t"
"movq (%2,%3), %%mm3 \n\t"
H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
/* filter p0, q0 */
- H264_DEBLOCK_P0_Q0(%8, %9)
+ H264_DEBLOCK_P0_Q0(%8, unused)
"movq %%mm1, (%1,%3,2) \n\t"
"movq %%mm2, (%2) \n\t"
- : "=m"(tmp0)
+ : "=m"(*tmp0)
: "r"(pix-3*stride), "r"(pix), "r"((long)stride),
- "m"(tc), "m"(*(uint64_t*)mask), "m"(alpha1), "m"(beta1),
- "m"(mm_bone), "m"(ff_pb_3F)
+ "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
+ "m"(mm_bone)
);
}
@@ -459,7 +450,7 @@ static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, in
{
//FIXME: could cut some load/stores by merging transpose with filter
// also, it only needs to transpose 6x8
- uint8_t trans[8*8];
+ DECLARE_ALIGNED_8(uint8_t, trans[8*8]);
int i;
for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
if((tc0[0] & tc0[1]) < 0)
@@ -503,7 +494,7 @@ static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha,
static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
//FIXME: could cut some load/stores by merging transpose with filter
- uint8_t trans[8*4];
+ DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
transpose4x4(trans, pix-2, 8, stride);
transpose4x4(trans+4, pix-2+4*stride, 8, stride);
h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
@@ -553,7 +544,7 @@ static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int a
static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
{
//FIXME: could cut some load/stores by merging transpose with filter
- uint8_t trans[8*4];
+ DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
transpose4x4(trans, pix-2, 8, stride);
transpose4x4(trans+4, pix-2+4*stride, 8, stride);
h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
@@ -561,6 +552,101 @@ static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int a
transpose4x4(pix-2+4*stride, trans+4, stride, 8);
}
+static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
+ int bidir, int edges, int step, int mask_mv0, int mask_mv1 ) {
+ int dir;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq %0, %%mm6 \n\t"
+ "movq %1, %%mm5 \n\t"
+ "movq %2, %%mm4 \n\t"
+ ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7)
+ );
+ // could do a special case for dir==0 && edges==1, but it only reduces the
+ // average filter time by 1.2%
+ for( dir=1; dir>=0; dir-- ) {
+ const int d_idx = dir ? -8 : -1;
+ const int mask_mv = dir ? mask_mv1 : mask_mv0;
+ DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
+ int b_idx, edge, l;
+ for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
+ asm volatile(
+ "pand %0, %%mm0 \n\t"
+ ::"m"(mask_dir)
+ );
+ if(!(mask_mv & edge)) {
+ asm volatile("pxor %%mm0, %%mm0 \n\t":);
+ for( l = bidir; l >= 0; l-- ) {
+ asm volatile(
+ "movd %0, %%mm1 \n\t"
+ "punpckldq %1, %%mm1 \n\t"
+ "movq %%mm1, %%mm2 \n\t"
+ "psrlw $7, %%mm2 \n\t"
+ "pand %%mm6, %%mm2 \n\t"
+ "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1
+ "punpckldq %%mm1, %%mm2 \n\t"
+ "pcmpeqb %%mm2, %%mm1 \n\t"
+ "paddb %%mm6, %%mm1 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn]
+ "por %%mm1, %%mm0 \n\t"
+
+ "movq %2, %%mm1 \n\t"
+ "movq %3, %%mm2 \n\t"
+ "psubw %4, %%mm1 \n\t"
+ "psubw %5, %%mm2 \n\t"
+ "packsswb %%mm2, %%mm1 \n\t"
+ "paddb %%mm5, %%mm1 \n\t"
+ "pminub %%mm4, %%mm1 \n\t"
+ "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit
+ "por %%mm1, %%mm0 \n\t"
+ ::"m"(ref[l][b_idx]),
+ "m"(ref[l][b_idx+d_idx]),
+ "m"(mv[l][b_idx][0]),
+ "m"(mv[l][b_idx+2][0]),
+ "m"(mv[l][b_idx+d_idx][0]),
+ "m"(mv[l][b_idx+d_idx+2][0])
+ );
+ }
+ }
+ asm volatile(
+ "movd %0, %%mm1 \n\t"
+ "por %1, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn]
+ ::"m"(nnz[b_idx]),
+ "m"(nnz[b_idx+d_idx])
+ );
+ asm volatile(
+ "pcmpeqw %%mm7, %%mm0 \n\t"
+ "pcmpeqw %%mm7, %%mm0 \n\t"
+ "psrlw $15, %%mm0 \n\t" // nonzero -> 1
+ "psrlw $14, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "por %%mm1, %%mm2 \n\t"
+ "psrlw $1, %%mm1 \n\t"
+ "pandn %%mm2, %%mm1 \n\t"
+ "movq %%mm1, %0 \n\t"
+ :"=m"(*bS[dir][edge])
+ ::"memory"
+ );
+ }
+ edges = 4;
+ step = 1;
+ }
+ asm volatile(
+ "movq (%0), %%mm0 \n\t"
+ "movq 8(%0), %%mm1 \n\t"
+ "movq 16(%0), %%mm2 \n\t"
+ "movq 24(%0), %%mm3 \n\t"
+ TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm3, 8(%0) \n\t"
+ "movq %%mm4, 16(%0) \n\t"
+ "movq %%mm2, 24(%0) \n\t"
+ ::"r"(bS[0])
+ :"memory"
+ );
+}
/***********************************/
/* motion compensation */
diff --git a/src/libffmpeg/libavcodec/i386/idct_mmx.c b/contrib/ffmpeg/libavcodec/i386/idct_mmx.c
index 1c8632fb7..ba595845a 100644
--- a/src/libffmpeg/libavcodec/i386/idct_mmx.c
+++ b/contrib/ffmpeg/libavcodec/i386/idct_mmx.c
@@ -1,7 +1,4 @@
/*
- * Note: For libavcodec, this code can also be used under the LGPL license
- */
-/*
* idct_mmx.c
* Copyright (C) 1999-2001 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
*
diff --git a/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c b/contrib/ffmpeg/libavcodec/i386/idct_mmx_xvid.c
index a55d4ea07..43eb329cc 100644
--- a/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c
+++ b/contrib/ffmpeg/libavcodec/i386/idct_mmx_xvid.c
@@ -5,22 +5,23 @@
// *
// * Copyright(C) 2001 Peter Ross <pross@xvid.org>
// *
-// * This program is free software; you can redistribute it and/or modify it
-// * under the terms of the GNU General Public License as published by
-// * the Free Software Foundation; either version 2 of the License, or
-// * (at your option) any later version.
+// * This file is part of FFmpeg.
// *
-// * This program is distributed in the hope that it will be useful,
-// * but WITHOUT ANY WARRANTY; without even the implied warranty of
-// * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// * GNU General Public License for more details.
+// * FFmpeg is free software; you can redistribute it and/or
+// * modify it under the terms of the GNU Lesser General Public
+// * License as published by the Free Software Foundation; either
+// * version 2.1 of the License, or (at your option) any later version.
// *
-// * You should have received a copy of the GNU General Public License
-// * along with this program; if not, write to the Free Software Foundation,
-// * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-//
+// * FFmpeg is distributed in the hope that it will be useful,
+// * but WITHOUT ANY WARRANTY; without even the implied warranty of
+// * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// * Lesser General Public License for more details.
+// *
+// * You should have received a copy of the GNU Lesser General Public License
+// * along with FFmpeg; if not, write to the Free Software Foundation,
+// * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
// *
-// * $Id: idct_mmx_xvid.c,v 1.3 2006/08/02 07:02:41 tmmm Exp $
+// * $Id: idct_mmx_xvid.c,v 1.1.2.1 2006/12/02 01:19:55 dgp85 Exp $
// *
// ***************************************************************************/
@@ -295,17 +296,17 @@ static const int16_t tab_i_04_xmm[32*4] attribute_used __attribute__ ((aligned(8
"movq 8+" #A1 ",%%mm1 \n\t"/* 1 ; x7 x6 x5 x4*/\
"movq %%mm0,%%mm2 \n\t"/* 2 ; x3 x2 x1 x0*/\
"movq " #A3 ",%%mm3 \n\t"/* 3 ; w05 w04 w01 w00*/\
- "pshufw $0b10001000,%%mm0,%%mm0 \n\t"/* x2 x0 x2 x0*/\
+ "pshufw $0x88,%%mm0,%%mm0 \n\t"/* x2 x0 x2 x0*/\
"movq 8+" #A3 ",%%mm4 \n\t"/* 4 ; w07 w06 w03 w02*/\
"movq %%mm1,%%mm5 \n\t"/* 5 ; x7 x6 x5 x4*/\
"pmaddwd %%mm0,%%mm3 \n\t"/* x2*w05+x0*w04 x2*w01+x0*w00*/\
"movq 32+" #A3 ",%%mm6 \n\t"/* 6 ; w21 w20 w17 w16*/\
- "pshufw $0b10001000,%%mm1,%%mm1 \n\t"/* x6 x4 x6 x4*/\
+ "pshufw $0x88,%%mm1,%%mm1 \n\t"/* x6 x4 x6 x4*/\
"pmaddwd %%mm1,%%mm4 \n\t"/* x6*w07+x4*w06 x6*w03+x4*w02*/\
"movq 40+" #A3 ",%%mm7 \n\t"/* 7 ; w23 w22 w19 w18*/\
- "pshufw $0b11011101,%%mm2,%%mm2 \n\t"/* x3 x1 x3 x1*/\
+ "pshufw $0xdd,%%mm2,%%mm2 \n\t"/* x3 x1 x3 x1*/\
"pmaddwd %%mm2,%%mm6 \n\t"/* x3*w21+x1*w20 x3*w17+x1*w16*/\
- "pshufw $0b11011101,%%mm5,%%mm5 \n\t"/* x7 x5 x7 x5*/\
+ "pshufw $0xdd,%%mm5,%%mm5 \n\t"/* x7 x5 x7 x5*/\
"pmaddwd %%mm5,%%mm7 \n\t"/* x7*w23+x5*w22 x7*w19+x5*w18*/\
"paddd " #A4 ",%%mm3 \n\t"/* +%4*/\
"pmaddwd 16+" #A3 ",%%mm0 \n\t"/* x2*w13+x0*w12 x2*w09+x0*w08*/\
@@ -330,7 +331,7 @@ static const int16_t tab_i_04_xmm[32*4] attribute_used __attribute__ ((aligned(8
"packssdw %%mm0,%%mm3 \n\t"/* 0 ; y3 y2 y1 y0*/\
"packssdw %%mm4,%%mm7 \n\t"/* 4 ; y6 y7 y4 y5*/\
"movq %%mm3, " #A2 " \n\t"/* 3 ; save y3 y2 y1 y0*/\
- "pshufw $0b10110001,%%mm7,%%mm7 \n\t"/* y7 y6 y5 y4*/\
+ "pshufw $0xb1,%%mm7,%%mm7 \n\t"/* y7 y6 y5 y4*/\
"movq %%mm7,8 +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\
diff --git a/contrib/ffmpeg/libavcodec/i386/mathops.h b/contrib/ffmpeg/libavcodec/i386/mathops.h
new file mode 100644
index 000000000..3553a4025
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/i386/mathops.h
@@ -0,0 +1,41 @@
+/*
+ * simple math operations
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifdef FRAC_BITS
+# define MULL(ra, rb) \
+ ({ int rt, dummy; asm (\
+ "imull %3 \n\t"\
+ "shrdl %4, %%edx, %%eax \n\t"\
+ : "=a"(rt), "=d"(dummy)\
+ : "a" (ra), "rm" (rb), "i"(FRAC_BITS));\
+ rt; })
+#endif
+
+#define MULH(ra, rb) \
+ ({ int rt, dummy;\
+ asm ("imull %3\n\t" : "=d"(rt), "=a"(dummy): "a" (ra), "rm" (rb));\
+ rt; })
+
+#define MUL64(ra, rb) \
+ ({ int64_t rt;\
+ asm ("imull %2\n\t" : "=A"(rt) : "a" (ra), "g" (rb));\
+ rt; })
+
diff --git a/src/libffmpeg/libavcodec/i386/mmx.h b/contrib/ffmpeg/libavcodec/i386/mmx.h
index eab051341..41aae6c21 100644
--- a/src/libffmpeg/libavcodec/i386/mmx.h
+++ b/contrib/ffmpeg/libavcodec/i386/mmx.h
@@ -1,6 +1,22 @@
/*
* mmx.h
* Copyright (C) 1997-2001 H. Dietz and R. Fisher
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_I386MMX_H
#define AVCODEC_I386MMX_H
@@ -184,16 +200,16 @@ typedef union {
#define mmx_m2ri(op,mem,reg,imm) \
__asm__ __volatile__ (#op " %1, %0, %%" #reg \
: /* nothing */ \
- : "X" (mem), "X" (imm))
+ : "m" (mem), "i" (imm))
#define mmx_r2ri(op,regs,regd,imm) \
__asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \
: /* nothing */ \
- : "X" (imm) )
+ : "i" (imm) )
#define mmx_fetch(mem,hint) \
__asm__ __volatile__ ("prefetch" #hint " %0" \
: /* nothing */ \
- : "X" (mem))
+ : "m" (mem))
#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg)
diff --git a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c b/contrib/ffmpeg/libavcodec/i386/motion_est_mmx.c
index edcabcf38..e33870e0f 100644
--- a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c
+++ b/contrib/ffmpeg/libavcodec/i386/motion_est_mmx.c
@@ -3,18 +3,20 @@
* Copyright (c) 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* mostly by Michael Niedermayer <michaelni@gmx.at>
@@ -34,7 +36,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
long len= -(stride*h);
asm volatile(
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq (%2, %%"REG_a"), %%mm2 \n\t"
@@ -70,7 +72,7 @@ static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
long len= -(stride*h);
asm volatile(
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq (%2, %%"REG_a"), %%mm2 \n\t"
@@ -92,7 +94,7 @@ static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, in
{
long len= -(stride*h);
asm volatile(
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq (%2, %%"REG_a"), %%mm2 \n\t"
@@ -118,7 +120,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{ //FIXME reuse src
long len= -(stride*h);
asm volatile(
- ".balign 16 \n\t"
+ ASMALIGN(4)
"movq "MANGLE(bone)", %%mm5 \n\t"
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
@@ -155,7 +157,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int
{
long len= -(stride*h);
asm volatile(
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq (%2, %%"REG_a"), %%mm1 \n\t"
@@ -193,7 +195,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
long len= -(stride*h);
asm volatile(
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq (%2, %%"REG_a"), %%mm1 \n\t"
diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c b/contrib/ffmpeg/libavcodec/i386/mpegvideo_mmx.c
index c00a602bd..1b7b1c19f 100644
--- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c
+++ b/contrib/ffmpeg/libavcodec/i386/mpegvideo_mmx.c
@@ -2,18 +2,20 @@
* The simplest mpeg encoder (well, it was the simplest!)
* Copyright (c) 2000,2001 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Optimized for ia32 cpus by Nick Kurshev <nickols_k@mail.ru>
@@ -25,7 +27,6 @@
#include "../avcodec.h"
#include "x86_cpu.h"
-extern uint8_t zigzag_direct_noperm[64];
extern uint16_t inv_zigzag_direct16[64];
static const unsigned long long int mm_wabs __attribute__ ((aligned(8))) = 0xffffffffffffffffULL;
@@ -66,7 +67,7 @@ asm volatile(
"packssdw %%mm5, %%mm5 \n\t"
"psubw %%mm5, %%mm7 \n\t"
"pxor %%mm4, %%mm4 \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%0, %3), %%mm0 \n\t"
"movq 8(%0, %3), %%mm1 \n\t"
@@ -129,7 +130,7 @@ asm volatile(
"packssdw %%mm5, %%mm5 \n\t"
"psubw %%mm5, %%mm7 \n\t"
"pxor %%mm4, %%mm4 \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%0, %3), %%mm0 \n\t"
"movq 8(%0, %3), %%mm1 \n\t"
@@ -222,7 +223,7 @@ asm volatile(
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"mov %3, %%"REG_a" \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq 8(%0, %%"REG_a"), %%mm1 \n\t"
@@ -285,7 +286,7 @@ asm volatile(
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"mov %3, %%"REG_a" \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq 8(%0, %%"REG_a"), %%mm1 \n\t"
@@ -357,7 +358,7 @@ asm volatile(
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"mov %3, %%"REG_a" \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq 8(%0, %%"REG_a"), %%mm1 \n\t"
@@ -418,7 +419,7 @@ asm volatile(
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"mov %3, %%"REG_a" \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq 8(%0, %%"REG_a"), %%mm1 \n\t"
diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c b/contrib/ffmpeg/libavcodec/i386/mpegvideo_mmx_template.c
index de2ef08e5..d59b6efd9 100644
--- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c
+++ b/contrib/ffmpeg/libavcodec/i386/mpegvideo_mmx_template.c
@@ -3,18 +3,20 @@
*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#undef SPREADW
@@ -74,7 +76,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
asm volatile (
"mul %%ecx \n\t"
: "=d" (level), "=a"(dummy)
- : "a" ((block[0]>>2) + q), "c" (inverse[q<<1])
+ : "a" ((block[0]>>2) + q), "c" (ff_inverse[q<<1])
);
#else
asm volatile (
@@ -112,7 +114,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
"pxor %%mm6, %%mm6 \n\t"
"psubw (%3), %%mm6 \n\t" // -bias[0]
"mov $-128, %%"REG_a" \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"pxor %%mm1, %%mm1 \n\t" // 0
"movq (%1, %%"REG_a"), %%mm0 \n\t" // block[i]
@@ -156,7 +158,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
"pxor %%mm7, %%mm7 \n\t" // 0
"pxor %%mm4, %%mm4 \n\t" // 0
"mov $-128, %%"REG_a" \n\t"
- ".balign 16 \n\t"
+ ASMALIGN(4)
"1: \n\t"
"pxor %%mm1, %%mm1 \n\t" // 0
"movq (%1, %%"REG_a"), %%mm0 \n\t" // block[i]
diff --git a/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c b/contrib/ffmpeg/libavcodec/i386/simple_idct_mmx.c
index b033a12b8..525ef34f7 100644
--- a/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c
+++ b/contrib/ffmpeg/libavcodec/i386/simple_idct_mmx.c
@@ -3,18 +3,20 @@
*
* Copyright (c) 2001, 2002 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "../dsputil.h"
@@ -281,7 +283,7 @@ static inline void idct(int16_t *block)
"packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
"movq %%mm4, 16+" #dst " \n\t"\
-#define COL_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+#define COL_IDCT(src0, src4, src1, src5, dst, shift) \
"movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
@@ -294,10 +296,8 @@ static inline void idct(int16_t *block)
"pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
"movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
"pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
- #rounder ", %%mm4 \n\t"\
"movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
- #rounder ", %%mm0 \n\t"\
"pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
"paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
"psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
@@ -458,11 +458,11 @@ DC_COND_ROW_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11)
DC_COND_ROW_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11)
-//IDCT( src0, src4, src1, src5, dst, rounder, shift)
-COL_IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-COL_IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-COL_IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-COL_IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, shift)
+COL_IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+COL_IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+COL_IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+COL_IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
#else
@@ -705,7 +705,7 @@ Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 2f)
Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 1f)
#undef IDCT
-#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+#define IDCT(src0, src4, src1, src5, dst, shift) \
"movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
@@ -718,10 +718,8 @@ Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 1f)
"pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
"movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
"pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
- #rounder ", %%mm4 \n\t"\
"movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
- #rounder ", %%mm0 \n\t"\
"pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
"paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
"psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
@@ -782,20 +780,20 @@ Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 1f)
"movd %%mm5, 80+" #dst " \n\t"
-//IDCT( src0, src4, src1, src5, dst, rounder, shift)
-IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
"jmp 9f \n\t"
- "#.balign 16 \n\t"\
+ "#" ASMALIGN(4) \
"4: \n\t"
Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 6f)
Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f)
#undef IDCT
-#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+#define IDCT(src0, src4, src1, src5, dst, shift) \
"movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
@@ -807,9 +805,7 @@ Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f)
"pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
"movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
"pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
- #rounder ", %%mm4 \n\t"\
"movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
- #rounder ", %%mm0 \n\t"\
"paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
"psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
"movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
@@ -859,28 +855,26 @@ Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f)
"movd %%mm1, 64+" #dst " \n\t"\
"movd %%mm5, 80+" #dst " \n\t"
-//IDCT( src0, src4, src1, src5, dst, rounder, shift)
-IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
"jmp 9f \n\t"
- "#.balign 16 \n\t"\
+ "#" ASMALIGN(4) \
"6: \n\t"
Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 7f)
#undef IDCT
-#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+#define IDCT(src0, src4, src1, src5, dst, shift) \
"movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
"movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
"pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
"pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
- #rounder ", %%mm4 \n\t"\
"movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
- #rounder ", %%mm0 \n\t"\
"movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
"movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
"pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
@@ -927,19 +921,19 @@ Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 7f)
"movd %%mm5, 80+" #dst " \n\t"
-//IDCT( src0, src4, src1, src5, dst, rounder, shift)
-IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
"jmp 9f \n\t"
- "#.balign 16 \n\t"\
+ "#" ASMALIGN(4) \
"2: \n\t"
Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f)
#undef IDCT
-#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+#define IDCT(src0, src4, src1, src5, dst, shift) \
"movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
@@ -947,10 +941,8 @@ Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f)
"pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
"pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
- #rounder ", %%mm4 \n\t"\
"movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
- #rounder ", %%mm0 \n\t"\
"pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
"movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
"movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
@@ -1006,27 +998,25 @@ Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f)
"movd %%mm4, 64+" #dst " \n\t"\
"movd %%mm5, 80+" #dst " \n\t"
-//IDCT( src0, src4, src1, src5, dst, rounder, shift)
-IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
"jmp 9f \n\t"
- "#.balign 16 \n\t"\
+ "#" ASMALIGN(4) \
"3: \n\t"
#undef IDCT
-#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+#define IDCT(src0, src4, src1, src5, dst, shift) \
"movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
"movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
"pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
"pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
- #rounder ", %%mm4 \n\t"\
"movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
- #rounder ", %%mm0 \n\t"\
"pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
"movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
"movq 64(%2), %%mm3 \n\t"\
@@ -1072,17 +1062,17 @@ IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"movd %%mm5, 80+" #dst " \n\t"
-//IDCT( src0, src4, src1, src5, dst, rounder, shift)
-IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
"jmp 9f \n\t"
- "#.balign 16 \n\t"\
+ "#" ASMALIGN(4) \
"5: \n\t"
#undef IDCT
-#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+#define IDCT(src0, src4, src1, src5, dst, shift) \
"movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
@@ -1093,10 +1083,8 @@ IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
"movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
"pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
- #rounder ", %%mm4 \n\t"\
"movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
- #rounder ", %%mm0 \n\t"\
"psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
"movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
"paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
@@ -1110,10 +1098,8 @@ IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\
"pmaddwd %%mm3, %%mm7 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
"pmaddwd 40(%2), %%mm3 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
- #rounder ", %%mm1 \n\t"\
"paddd %%mm1, %%mm7 \n\t" /* A0 a0 */\
"paddd %%mm1, %%mm1 \n\t" /* 2C0 2c0 */\
- #rounder ", %%mm2 \n\t"\
"psubd %%mm7, %%mm1 \n\t" /* A3 a3 */\
"paddd %%mm2, %%mm3 \n\t" /* A1 a1 */\
"paddd %%mm2, %%mm2 \n\t" /* 2C1 2c1 */\
@@ -1140,18 +1126,18 @@ IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"movq %%mm5, 80+" #dst " \n\t"
-//IDCT( src0, src4, src1, src5, dst, rounder, shift)
-IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
"jmp 9f \n\t"
- "#.balign 16 \n\t"\
+ "#" ASMALIGN(4) \
"1: \n\t"
#undef IDCT
-#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+#define IDCT(src0, src4, src1, src5, dst, shift) \
"movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
@@ -1163,10 +1149,8 @@ IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
"pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
"movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
"pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
- #rounder ", %%mm4 \n\t"\
"movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
- #rounder ", %%mm0 \n\t"\
"pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
"paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
"psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
@@ -1216,25 +1200,23 @@ IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
"movd %%mm5, 80+" #dst " \n\t"
-//IDCT( src0, src4, src1, src5, dst, rounder, shift)
-IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
"jmp 9f \n\t"
- "#.balign 16 \n\t"
+ "#" ASMALIGN(4)
"7: \n\t"
#undef IDCT
-#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+#define IDCT(src0, src4, src1, src5, dst, shift) \
"movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
"pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
"pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
- #rounder ", %%mm4 \n\t"\
- #rounder ", %%mm0 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
"psrad $" #shift ", %%mm0 \n\t"\
"movq 8+" #src0 ", %%mm2 \n\t" /* R4 R0 r4 r0 */\
@@ -1243,8 +1225,6 @@ IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"movq 24(%2), %%mm7 \n\t" /* -C4 C4 -C4 C4 */\
"pmaddwd %%mm7, %%mm2 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
"movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\
- #rounder ", %%mm1 \n\t"\
- #rounder ", %%mm2 \n\t"\
"psrad $" #shift ", %%mm1 \n\t"\
"packssdw %%mm1, %%mm4 \n\t" /* A0 a0 */\
"movq %%mm4, " #dst " \n\t"\
@@ -1258,11 +1238,11 @@ IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"movq %%mm4, 64+" #dst " \n\t"\
"movq %%mm0, 80+" #dst " \n\t"
-//IDCT( src0, src4, src1, src5, dst, rounder, shift)
-IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, shift)
+IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0), 20)
+//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20)
+//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
#endif
diff --git a/contrib/ffmpeg/libavcodec/i386/snowdsp_mmx.c b/contrib/ffmpeg/libavcodec/i386/snowdsp_mmx.c
new file mode 100644
index 000000000..718202632
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/i386/snowdsp_mmx.c
@@ -0,0 +1,921 @@
+/*
+ * MMX and SSE2 optimized snow DSP utils
+ * Copyright (c) 2005-2006 Robert Edele <yartrebo@earthlink.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../avcodec.h"
+#include "../snow.h"
+#include "x86_cpu.h"
+
+void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width){
+ const int w2= (width+1)>>1;
+ // SSE2 code runs faster with pointers aligned on a 32-byte boundary.
+ DWTELEM temp_buf[(width>>1) + 4];
+ DWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2);
+ const int w_l= (width>>1);
+ const int w_r= w2 - 1;
+ int i;
+
+ { // Lift 0
+ DWTELEM * const ref = b + w2 - 1;
+ DWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice
+ // (the first time erroneously), we allow the SSE2 code to run an extra pass.
+ // The savings in code and time are well worth having to store this value and
+ // calculate b[0] correctly afterwards.
+
+ i = 0;
+ asm volatile(
+ "pcmpeqd %%xmm7, %%xmm7 \n\t"
+ "pslld $31, %%xmm7 \n\t"
+ "psrld $29, %%xmm7 \n\t"
+ ::);
+ for(; i<w_l-7; i+=8){
+ asm volatile(
+ "movdqu (%1), %%xmm1 \n\t"
+ "movdqu 16(%1), %%xmm5 \n\t"
+ "movdqu 4(%1), %%xmm2 \n\t"
+ "movdqu 20(%1), %%xmm6 \n\t"
+ "paddd %%xmm1, %%xmm2 \n\t"
+ "paddd %%xmm5, %%xmm6 \n\t"
+ "movdqa %%xmm2, %%xmm0 \n\t"
+ "movdqa %%xmm6, %%xmm4 \n\t"
+ "paddd %%xmm2, %%xmm2 \n\t"
+ "paddd %%xmm6, %%xmm6 \n\t"
+ "paddd %%xmm0, %%xmm2 \n\t"
+ "paddd %%xmm4, %%xmm6 \n\t"
+ "paddd %%xmm7, %%xmm2 \n\t"
+ "paddd %%xmm7, %%xmm6 \n\t"
+ "psrad $3, %%xmm2 \n\t"
+ "psrad $3, %%xmm6 \n\t"
+ "movdqa (%0), %%xmm0 \n\t"
+ "movdqa 16(%0), %%xmm4 \n\t"
+ "psubd %%xmm2, %%xmm0 \n\t"
+ "psubd %%xmm6, %%xmm4 \n\t"
+ "movdqa %%xmm0, (%0) \n\t"
+ "movdqa %%xmm4, 16(%0) \n\t"
+ :: "r"(&b[i]), "r"(&ref[i])
+ : "memory"
+ );
+ }
+ snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
+ b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
+ }
+
+ { // Lift 1
+ DWTELEM * const dst = b+w2;
+
+ i = 0;
+ for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
+ dst[i] = dst[i] - (b[i] + b[i + 1]);
+ }
+ for(; i<w_r-7; i+=8){
+ asm volatile(
+ "movdqu (%1), %%xmm1 \n\t"
+ "movdqu 16(%1), %%xmm5 \n\t"
+ "movdqu 4(%1), %%xmm2 \n\t"
+ "movdqu 20(%1), %%xmm6 \n\t"
+ "paddd %%xmm1, %%xmm2 \n\t"
+ "paddd %%xmm5, %%xmm6 \n\t"
+ "movdqa (%0), %%xmm0 \n\t"
+ "movdqa 16(%0), %%xmm4 \n\t"
+ "psubd %%xmm2, %%xmm0 \n\t"
+ "psubd %%xmm6, %%xmm4 \n\t"
+ "movdqa %%xmm0, (%0) \n\t"
+ "movdqa %%xmm4, 16(%0) \n\t"
+ :: "r"(&dst[i]), "r"(&b[i])
+ : "memory"
+ );
+ }
+ snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
+ }
+
+ { // Lift 2
+ DWTELEM * const ref = b+w2 - 1;
+ DWTELEM b_0 = b[0];
+
+ i = 0;
+ asm volatile(
+ "pslld $1, %%xmm7 \n\t" /* xmm7 already holds a '4' from 2 lifts ago. */
+ ::);
+ for(; i<w_l-7; i+=8){
+ asm volatile(
+ "movdqu (%1), %%xmm1 \n\t"
+ "movdqu 16(%1), %%xmm5 \n\t"
+ "movdqu 4(%1), %%xmm0 \n\t"
+ "movdqu 20(%1), %%xmm4 \n\t"
+ "paddd %%xmm1, %%xmm0 \n\t"
+ "paddd %%xmm5, %%xmm4 \n\t"
+ "movdqa %%xmm7, %%xmm1 \n\t"
+ "movdqa %%xmm7, %%xmm5 \n\t"
+ "psubd %%xmm0, %%xmm1 \n\t"
+ "psubd %%xmm4, %%xmm5 \n\t"
+ "movdqa (%0), %%xmm0 \n\t"
+ "movdqa 16(%0), %%xmm4 \n\t"
+ "pslld $2, %%xmm0 \n\t"
+ "pslld $2, %%xmm4 \n\t"
+ "psubd %%xmm0, %%xmm1 \n\t"
+ "psubd %%xmm4, %%xmm5 \n\t"
+ "psrad $4, %%xmm1 \n\t"
+ "psrad $4, %%xmm5 \n\t"
+ "movdqa (%0), %%xmm0 \n\t"
+ "movdqa 16(%0), %%xmm4 \n\t"
+ "psubd %%xmm1, %%xmm0 \n\t"
+ "psubd %%xmm5, %%xmm4 \n\t"
+ "movdqa %%xmm0, (%0) \n\t"
+ "movdqa %%xmm4, 16(%0) \n\t"
+ :: "r"(&b[i]), "r"(&ref[i])
+ : "memory"
+ );
+ }
+ snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
+ b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
+ }
+
+ { // Lift 3
+ DWTELEM * const src = b+w2;
+
+ i = 0;
+ for(; (((long)&temp[i]) & 0xF) && i<w_r; i++){
+ temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS);
+ }
+ for(; i<w_r-7; i+=8){
+ asm volatile(
+ "movdqu 4(%1), %%xmm2 \n\t"
+ "movdqu 20(%1), %%xmm6 \n\t"
+ "paddd (%1), %%xmm2 \n\t"
+ "paddd 16(%1), %%xmm6 \n\t"
+ "movdqa %%xmm2, %%xmm0 \n\t"
+ "movdqa %%xmm6, %%xmm4 \n\t"
+ "pslld $2, %%xmm2 \n\t"
+ "pslld $2, %%xmm6 \n\t"
+ "psubd %%xmm2, %%xmm0 \n\t"
+ "psubd %%xmm6, %%xmm4 \n\t"
+ "psrad $1, %%xmm0 \n\t"
+ "psrad $1, %%xmm4 \n\t"
+ "movdqu (%0), %%xmm2 \n\t"
+ "movdqu 16(%0), %%xmm6 \n\t"
+ "psubd %%xmm0, %%xmm2 \n\t"
+ "psubd %%xmm4, %%xmm6 \n\t"
+ "movdqa %%xmm2, (%2) \n\t"
+ "movdqa %%xmm6, 16(%2) \n\t"
+ :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
+ : "memory"
+ );
+ }
+ snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO, W_AS);
+ }
+
+ {
+ snow_interleave_line_header(&i, width, b, temp);
+
+ for (; (i & 0x1E) != 0x1E; i-=2){
+ b[i+1] = temp[i>>1];
+ b[i] = b[i>>1];
+ }
+ for (i-=30; i>=0; i-=32){
+ asm volatile(
+ "movdqa (%1), %%xmm0 \n\t"
+ "movdqa 16(%1), %%xmm2 \n\t"
+ "movdqa 32(%1), %%xmm4 \n\t"
+ "movdqa 48(%1), %%xmm6 \n\t"
+ "movdqa (%1), %%xmm1 \n\t"
+ "movdqa 16(%1), %%xmm3 \n\t"
+ "movdqa 32(%1), %%xmm5 \n\t"
+ "movdqa 48(%1), %%xmm7 \n\t"
+ "punpckldq (%2), %%xmm0 \n\t"
+ "punpckldq 16(%2), %%xmm2 \n\t"
+ "punpckldq 32(%2), %%xmm4 \n\t"
+ "punpckldq 48(%2), %%xmm6 \n\t"
+ "movdqa %%xmm0, (%0) \n\t"
+ "movdqa %%xmm2, 32(%0) \n\t"
+ "movdqa %%xmm4, 64(%0) \n\t"
+ "movdqa %%xmm6, 96(%0) \n\t"
+ "punpckhdq (%2), %%xmm1 \n\t"
+ "punpckhdq 16(%2), %%xmm3 \n\t"
+ "punpckhdq 32(%2), %%xmm5 \n\t"
+ "punpckhdq 48(%2), %%xmm7 \n\t"
+ "movdqa %%xmm1, 16(%0) \n\t"
+ "movdqa %%xmm3, 48(%0) \n\t"
+ "movdqa %%xmm5, 80(%0) \n\t"
+ "movdqa %%xmm7, 112(%0) \n\t"
+ :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1])
+ : "memory"
+ );
+ }
+ }
+}
+
+void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width){
+ const int w2= (width+1)>>1;
+ DWTELEM temp[width >> 1];
+ const int w_l= (width>>1);
+ const int w_r= w2 - 1;
+ int i;
+
+ { // Lift 0
+ DWTELEM * const ref = b + w2 - 1;
+
+ i = 1;
+ b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
+ asm volatile(
+ "pcmpeqd %%mm7, %%mm7 \n\t"
+ "pslld $31, %%mm7 \n\t"
+ "psrld $29, %%mm7 \n\t"
+ ::);
+ for(; i<w_l-3; i+=4){
+ asm volatile(
+ "movq (%1), %%mm2 \n\t"
+ "movq 8(%1), %%mm6 \n\t"
+ "paddd 4(%1), %%mm2 \n\t"
+ "paddd 12(%1), %%mm6 \n\t"
+ "movq %%mm2, %%mm0 \n\t"
+ "movq %%mm6, %%mm4 \n\t"
+ "paddd %%mm2, %%mm2 \n\t"
+ "paddd %%mm6, %%mm6 \n\t"
+ "paddd %%mm0, %%mm2 \n\t"
+ "paddd %%mm4, %%mm6 \n\t"
+ "paddd %%mm7, %%mm2 \n\t"
+ "paddd %%mm7, %%mm6 \n\t"
+ "psrad $3, %%mm2 \n\t"
+ "psrad $3, %%mm6 \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "movq 8(%0), %%mm4 \n\t"
+ "psubd %%mm2, %%mm0 \n\t"
+ "psubd %%mm6, %%mm4 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm4, 8(%0) \n\t"
+ :: "r"(&b[i]), "r"(&ref[i])
+ : "memory"
+ );
+ }
+ snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
+ }
+
+ { // Lift 1
+ DWTELEM * const dst = b+w2;
+
+ i = 0;
+ for(; i<w_r-3; i+=4){
+ asm volatile(
+ "movq (%1), %%mm2 \n\t"
+ "movq 8(%1), %%mm6 \n\t"
+ "paddd 4(%1), %%mm2 \n\t"
+ "paddd 12(%1), %%mm6 \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "movq 8(%0), %%mm4 \n\t"
+ "psubd %%mm2, %%mm0 \n\t"
+ "psubd %%mm6, %%mm4 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm4, 8(%0) \n\t"
+ :: "r"(&dst[i]), "r"(&b[i])
+ : "memory"
+ );
+ }
+ snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
+ }
+
+ { // Lift 2
+ DWTELEM * const ref = b+w2 - 1;
+
+ i = 1;
+ b[0] = b[0] - (((-2 * ref[1] + W_BO) - 4 * b[0]) >> W_BS);
+ asm volatile(
+ "pslld $1, %%mm7 \n\t" /* xmm7 already holds a '4' from 2 lifts ago. */
+ ::);
+ for(; i<w_l-3; i+=4){
+ asm volatile(
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm4 \n\t"
+ "paddd 4(%1), %%mm0 \n\t"
+ "paddd 12(%1), %%mm4 \n\t"
+ "movq %%mm7, %%mm1 \n\t"
+ "movq %%mm7, %%mm5 \n\t"
+ "psubd %%mm0, %%mm1 \n\t"
+ "psubd %%mm4, %%mm5 \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "movq 8(%0), %%mm4 \n\t"
+ "pslld $2, %%mm0 \n\t"
+ "pslld $2, %%mm4 \n\t"
+ "psubd %%mm0, %%mm1 \n\t"
+ "psubd %%mm4, %%mm5 \n\t"
+ "psrad $4, %%mm1 \n\t"
+ "psrad $4, %%mm5 \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "movq 8(%0), %%mm4 \n\t"
+ "psubd %%mm1, %%mm0 \n\t"
+ "psubd %%mm5, %%mm4 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm4, 8(%0) \n\t"
+ :: "r"(&b[i]), "r"(&ref[i])
+ : "memory"
+ );
+ }
+ snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
+ }
+
+ { // Lift 3
+ DWTELEM * const src = b+w2;
+ i = 0;
+
+ for(; i<w_r-3; i+=4){
+ asm volatile(
+ "movq 4(%1), %%mm2 \n\t"
+ "movq 12(%1), %%mm6 \n\t"
+ "paddd (%1), %%mm2 \n\t"
+ "paddd 8(%1), %%mm6 \n\t"
+ "movq %%mm2, %%mm0 \n\t"
+ "movq %%mm6, %%mm4 \n\t"
+ "pslld $2, %%mm2 \n\t"
+ "pslld $2, %%mm6 \n\t"
+ "psubd %%mm2, %%mm0 \n\t"
+ "psubd %%mm6, %%mm4 \n\t"
+ "psrad $1, %%mm0 \n\t"
+ "psrad $1, %%mm4 \n\t"
+ "movq (%0), %%mm2 \n\t"
+ "movq 8(%0), %%mm6 \n\t"
+ "psubd %%mm0, %%mm2 \n\t"
+ "psubd %%mm4, %%mm6 \n\t"
+ "movq %%mm2, (%2) \n\t"
+ "movq %%mm6, 8(%2) \n\t"
+ :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
+ : "memory"
+ );
+ }
+ snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO, W_AS);
+ }
+
+ {
+ snow_interleave_line_header(&i, width, b, temp);
+
+ for (; (i & 0xE) != 0xE; i-=2){
+ b[i+1] = temp[i>>1];
+ b[i] = b[i>>1];
+ }
+ for (i-=14; i>=0; i-=16){
+ asm volatile(
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm2 \n\t"
+ "movq 16(%1), %%mm4 \n\t"
+ "movq 24(%1), %%mm6 \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "movq 8(%1), %%mm3 \n\t"
+ "movq 16(%1), %%mm5 \n\t"
+ "movq 24(%1), %%mm7 \n\t"
+ "punpckldq (%2), %%mm0 \n\t"
+ "punpckldq 8(%2), %%mm2 \n\t"
+ "punpckldq 16(%2), %%mm4 \n\t"
+ "punpckldq 24(%2), %%mm6 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm2, 16(%0) \n\t"
+ "movq %%mm4, 32(%0) \n\t"
+ "movq %%mm6, 48(%0) \n\t"
+ "punpckhdq (%2), %%mm1 \n\t"
+ "punpckhdq 8(%2), %%mm3 \n\t"
+ "punpckhdq 16(%2), %%mm5 \n\t"
+ "punpckhdq 24(%2), %%mm7 \n\t"
+ "movq %%mm1, 8(%0) \n\t"
+ "movq %%mm3, 24(%0) \n\t"
+ "movq %%mm5, 40(%0) \n\t"
+ "movq %%mm7, 56(%0) \n\t"
+ :: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1])
+ : "memory"
+ );
+ }
+ }
+}
+
+#define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\
+ ""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\
+ ""op" 16(%%"r",%%"REG_d",4), %%"t1" \n\t"\
+ ""op" 32(%%"r",%%"REG_d",4), %%"t2" \n\t"\
+ ""op" 48(%%"r",%%"REG_d",4), %%"t3" \n\t"
+
+#define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\
+ snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3)
+
+#define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\
+ snow_vertical_compose_sse2_load_add("paddd",r,t0,t1,t2,t3)
+
+#define snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
+ "psubd %%"s0", %%"t0" \n\t"\
+ "psubd %%"s1", %%"t1" \n\t"\
+ "psubd %%"s2", %%"t2" \n\t"\
+ "psubd %%"s3", %%"t3" \n\t"
+
+#define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\
+ "movdqa %%"s0", (%%"w",%%"REG_d",4) \n\t"\
+ "movdqa %%"s1", 16(%%"w",%%"REG_d",4) \n\t"\
+ "movdqa %%"s2", 32(%%"w",%%"REG_d",4) \n\t"\
+ "movdqa %%"s3", 48(%%"w",%%"REG_d",4) \n\t"
+
+#define snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)\
+ "psrad $"n", %%"t0" \n\t"\
+ "psrad $"n", %%"t1" \n\t"\
+ "psrad $"n", %%"t2" \n\t"\
+ "psrad $"n", %%"t3" \n\t"
+
+#define snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
+ "paddd %%"s0", %%"t0" \n\t"\
+ "paddd %%"s1", %%"t1" \n\t"\
+ "paddd %%"s2", %%"t2" \n\t"\
+ "paddd %%"s3", %%"t3" \n\t"
+
+#define snow_vertical_compose_sse2_sll(n,t0,t1,t2,t3)\
+ "pslld $"n", %%"t0" \n\t"\
+ "pslld $"n", %%"t1" \n\t"\
+ "pslld $"n", %%"t2" \n\t"\
+ "pslld $"n", %%"t3" \n\t"
+
+#define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\
+ "movdqa %%"s0", %%"t0" \n\t"\
+ "movdqa %%"s1", %%"t1" \n\t"\
+ "movdqa %%"s2", %%"t2" \n\t"\
+ "movdqa %%"s3", %%"t3" \n\t"
+
+void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width){
+ long i = width;
+
+ while(i & 0xF)
+ {
+ i--;
+ b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
+ b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
+ b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
+ b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
+ }
+
+ asm volatile (
+ "jmp 2f \n\t"
+ "1: \n\t"
+
+ "mov %6, %%"REG_a" \n\t"
+ "mov %4, %%"REG_S" \n\t"
+
+ snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_sse2_sll("1","xmm0","xmm2","xmm4","xmm6")\
+ snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
+
+ "pcmpeqd %%xmm1, %%xmm1 \n\t"
+ "pslld $31, %%xmm1 \n\t"
+ "psrld $29, %%xmm1 \n\t"
+ "mov %5, %%"REG_a" \n\t"
+
+ snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_sra("3","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_load(REG_a,"xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_sse2_sub("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_sse2_store(REG_a,"xmm1","xmm3","xmm5","xmm7")
+ "mov %3, %%"REG_c" \n\t"
+ snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add(REG_c,"xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_sse2_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_store(REG_S,"xmm0","xmm2","xmm4","xmm6")
+ "mov %2, %%"REG_a" \n\t"
+ snow_vertical_compose_sse2_load(REG_c,"xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_sll("2","xmm1","xmm3","xmm5","xmm7")\
+ snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
+
+ "pcmpeqd %%xmm1, %%xmm1 \n\t"
+ "pslld $31, %%xmm1 \n\t"
+ "psrld $28, %%xmm1 \n\t"
+ "mov %1, %%"REG_S" \n\t"
+
+ snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_sra("4","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_store(REG_c,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add(REG_S,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_sse2_sll("1","xmm0","xmm2","xmm4","xmm6")\
+ snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_sra("1","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_store(REG_a,"xmm0","xmm2","xmm4","xmm6")
+
+ "2: \n\t"
+ "sub $16, %%"REG_d" \n\t"
+ "jge 1b \n\t"
+ :"+d"(i)
+ :
+ "m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
+ "%"REG_a"","%"REG_S"","%"REG_c"");
+}
+
+#define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\
+ ""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\
+ ""op" 8(%%"r",%%"REG_d",4), %%"t1" \n\t"\
+ ""op" 16(%%"r",%%"REG_d",4), %%"t2" \n\t"\
+ ""op" 24(%%"r",%%"REG_d",4), %%"t3" \n\t"
+
+#define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\
+ snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3)
+
+#define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\
+ snow_vertical_compose_mmx_load_add("paddd",r,t0,t1,t2,t3)
+
+#define snow_vertical_compose_mmx_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
+ snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)
+
+#define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\
+ "movq %%"s0", (%%"w",%%"REG_d",4) \n\t"\
+ "movq %%"s1", 8(%%"w",%%"REG_d",4) \n\t"\
+ "movq %%"s2", 16(%%"w",%%"REG_d",4) \n\t"\
+ "movq %%"s3", 24(%%"w",%%"REG_d",4) \n\t"
+
+#define snow_vertical_compose_mmx_sra(n,t0,t1,t2,t3)\
+ snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)
+
+#define snow_vertical_compose_mmx_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
+ snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)
+
+#define snow_vertical_compose_mmx_sll(n,t0,t1,t2,t3)\
+ snow_vertical_compose_sse2_sll(n,t0,t1,t2,t3)
+
+#define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\
+ "movq %%"s0", %%"t0" \n\t"\
+ "movq %%"s1", %%"t1" \n\t"\
+ "movq %%"s2", %%"t2" \n\t"\
+ "movq %%"s3", %%"t3" \n\t"
+
+void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width){
+ long i = width;
+ while(i & 0x7)
+ {
+ i--;
+ b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
+ b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
+ b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
+ b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
+ }
+
+ asm volatile(
+ "jmp 2f \n\t"
+ "1: \n\t"
+
+ "mov %6, %%"REG_a" \n\t"
+ "mov %4, %%"REG_S" \n\t"
+
+ snow_vertical_compose_mmx_load(REG_S,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_sll("1","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
+
+ "pcmpeqd %%mm1, %%mm1 \n\t"
+ "pslld $31, %%mm1 \n\t"
+ "psrld $29, %%mm1 \n\t"
+ "mov %5, %%"REG_a" \n\t"
+
+ snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_sra("3","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_load(REG_a,"mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_sub("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_store(REG_a,"mm1","mm3","mm5","mm7")
+ "mov %3, %%"REG_c" \n\t"
+ snow_vertical_compose_mmx_load(REG_S,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add(REG_c,"mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_store(REG_S,"mm0","mm2","mm4","mm6")
+ "mov %2, %%"REG_a" \n\t"
+ snow_vertical_compose_mmx_load(REG_c,"mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_sll("2","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
+
+ "pcmpeqd %%mm1, %%mm1 \n\t"
+ "pslld $31, %%mm1 \n\t"
+ "psrld $28, %%mm1 \n\t"
+ "mov %1, %%"REG_S" \n\t"
+
+ snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_sra("4","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add(REG_c,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_store(REG_c,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add(REG_S,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_sll("1","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_sra("1","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_store(REG_a,"mm0","mm2","mm4","mm6")
+
+ "2: \n\t"
+ "sub $8, %%"REG_d" \n\t"
+ "jge 1b \n\t"
+ :"+d"(i)
+ :
+ "m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
+ "%"REG_a"","%"REG_S"","%"REG_c"");
+}
+
+#define snow_inner_add_yblock_sse2_header \
+ DWTELEM * * dst_array = sb->line + src_y;\
+ long tmp;\
+ asm volatile(\
+ "mov %7, %%"REG_c" \n\t"\
+ "mov %6, %2 \n\t"\
+ "mov %4, %%"REG_S" \n\t"\
+ "pxor %%xmm7, %%xmm7 \n\t" /* 0 */\
+ "pcmpeqd %%xmm3, %%xmm3 \n\t"\
+ "pslld $31, %%xmm3 \n\t"\
+ "psrld $24, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\
+ "1: \n\t"\
+ "mov %1, %%"REG_D" \n\t"\
+ "mov (%%"REG_D"), %%"REG_D" \n\t"\
+ "add %3, %%"REG_D" \n\t"
+
+#define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\
+ "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
+ "movq (%%"REG_d"), %%"out_reg1" \n\t"\
+ "movq (%%"REG_d", %%"REG_c"), %%"out_reg2" \n\t"\
+ "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
+ "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
+ "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
+ "movq "s_offset"+16(%%"REG_S"), %%xmm4 \n\t"\
+ "punpcklbw %%xmm7, %%xmm0 \n\t"\
+ "punpcklbw %%xmm7, %%xmm4 \n\t"\
+ "pmullw %%xmm0, %%"out_reg1" \n\t"\
+ "pmullw %%xmm4, %%"out_reg2" \n\t"
+
+#define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\
+ "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
+ "movq (%%"REG_d"), %%"out_reg1" \n\t"\
+ "movq 8(%%"REG_d"), %%"out_reg2" \n\t"\
+ "punpcklbw %%xmm7, %%"out_reg1" \n\t"\
+ "punpcklbw %%xmm7, %%"out_reg2" \n\t"\
+ "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
+ "movq "s_offset"+8(%%"REG_S"), %%xmm4 \n\t"\
+ "punpcklbw %%xmm7, %%xmm0 \n\t"\
+ "punpcklbw %%xmm7, %%xmm4 \n\t"\
+ "pmullw %%xmm0, %%"out_reg1" \n\t"\
+ "pmullw %%xmm4, %%"out_reg2" \n\t"
+
+#define snow_inner_add_yblock_sse2_accum_8(ptr_offset, s_offset) \
+ snow_inner_add_yblock_sse2_start_8("xmm2", "xmm6", ptr_offset, s_offset)\
+ "paddusw %%xmm2, %%xmm1 \n\t"\
+ "paddusw %%xmm6, %%xmm5 \n\t"
+
+#define snow_inner_add_yblock_sse2_accum_16(ptr_offset, s_offset) \
+ snow_inner_add_yblock_sse2_start_16("xmm2", "xmm6", ptr_offset, s_offset)\
+ "paddusw %%xmm2, %%xmm1 \n\t"\
+ "paddusw %%xmm6, %%xmm5 \n\t"
+
+#define snow_inner_add_yblock_sse2_end_common1\
+ "add $32, %%"REG_S" \n\t"\
+ "add %%"REG_c", %0 \n\t"\
+ "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
+ "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
+ "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
+ "add %%"REG_c", (%%"REG_a") \n\t"
+
+#define snow_inner_add_yblock_sse2_end_common2\
+ "jnz 1b \n\t"\
+ :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
+ :\
+ "rm"((long)(src_x<<2)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\
+ "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
+
+#define snow_inner_add_yblock_sse2_end_8\
+ "sal $1, %%"REG_c" \n\t"\
+ "add $"PTR_SIZE"*2, %1 \n\t"\
+ snow_inner_add_yblock_sse2_end_common1\
+ "sar $1, %%"REG_c" \n\t"\
+ "sub $2, %2 \n\t"\
+ snow_inner_add_yblock_sse2_end_common2
+
+#define snow_inner_add_yblock_sse2_end_16\
+ "add $"PTR_SIZE"*1, %1 \n\t"\
+ snow_inner_add_yblock_sse2_end_common1\
+ "dec %2 \n\t"\
+ snow_inner_add_yblock_sse2_end_common2
+
+static void inner_add_yblock_bw_8_obmc_16_bh_even_sse2(uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
+ int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
+snow_inner_add_yblock_sse2_header
+snow_inner_add_yblock_sse2_start_8("xmm1", "xmm5", "3", "0")
+snow_inner_add_yblock_sse2_accum_8("2", "8")
+snow_inner_add_yblock_sse2_accum_8("1", "128")
+snow_inner_add_yblock_sse2_accum_8("0", "136")
+
+ "mov %0, %%"REG_d" \n\t"
+ "movdqa (%%"REG_D"), %%xmm0 \n\t"
+ "movdqa %%xmm1, %%xmm2 \n\t"
+
+ "punpckhwd %%xmm7, %%xmm1 \n\t"
+ "punpcklwd %%xmm7, %%xmm2 \n\t"
+ "paddd %%xmm2, %%xmm0 \n\t"
+ "movdqa 16(%%"REG_D"), %%xmm2 \n\t"
+ "paddd %%xmm1, %%xmm2 \n\t"
+ "paddd %%xmm3, %%xmm0 \n\t"
+ "paddd %%xmm3, %%xmm2 \n\t"
+
+ "mov %1, %%"REG_D" \n\t"
+ "mov "PTR_SIZE"(%%"REG_D"), %%"REG_D";\n\t"
+ "add %3, %%"REG_D" \n\t"
+
+ "movdqa (%%"REG_D"), %%xmm4 \n\t"
+ "movdqa %%xmm5, %%xmm6 \n\t"
+ "punpckhwd %%xmm7, %%xmm5 \n\t"
+ "punpcklwd %%xmm7, %%xmm6 \n\t"
+ "paddd %%xmm6, %%xmm4 \n\t"
+ "movdqa 16(%%"REG_D"), %%xmm6 \n\t"
+ "paddd %%xmm5, %%xmm6 \n\t"
+ "paddd %%xmm3, %%xmm4 \n\t"
+ "paddd %%xmm3, %%xmm6 \n\t"
+
+ "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
+ "psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */
+ "packssdw %%xmm2, %%xmm0 \n\t"
+ "packuswb %%xmm7, %%xmm0 \n\t"
+ "movq %%xmm0, (%%"REG_d") \n\t"
+
+ "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
+ "psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */
+ "packssdw %%xmm6, %%xmm4 \n\t"
+ "packuswb %%xmm7, %%xmm4 \n\t"
+ "movq %%xmm4, (%%"REG_d",%%"REG_c");\n\t"
+snow_inner_add_yblock_sse2_end_8
+}
+
+static void inner_add_yblock_bw_16_obmc_32_sse2(uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
+ int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
+snow_inner_add_yblock_sse2_header
+snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0")
+snow_inner_add_yblock_sse2_accum_16("2", "16")
+snow_inner_add_yblock_sse2_accum_16("1", "512")
+snow_inner_add_yblock_sse2_accum_16("0", "528")
+
+ "mov %0, %%"REG_d" \n\t"
+ "movdqa %%xmm1, %%xmm0 \n\t"
+ "movdqa %%xmm5, %%xmm4 \n\t"
+ "punpcklwd %%xmm7, %%xmm0 \n\t"
+ "paddd (%%"REG_D"), %%xmm0 \n\t"
+ "punpckhwd %%xmm7, %%xmm1 \n\t"
+ "paddd 16(%%"REG_D"), %%xmm1 \n\t"
+ "punpcklwd %%xmm7, %%xmm4 \n\t"
+ "paddd 32(%%"REG_D"), %%xmm4 \n\t"
+ "punpckhwd %%xmm7, %%xmm5 \n\t"
+ "paddd 48(%%"REG_D"), %%xmm5 \n\t"
+ "paddd %%xmm3, %%xmm0 \n\t"
+ "paddd %%xmm3, %%xmm1 \n\t"
+ "paddd %%xmm3, %%xmm4 \n\t"
+ "paddd %%xmm3, %%xmm5 \n\t"
+ "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
+ "psrad $8, %%xmm1 \n\t" /* FRAC_BITS. */
+ "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
+ "psrad $8, %%xmm5 \n\t" /* FRAC_BITS. */
+
+ "packssdw %%xmm1, %%xmm0 \n\t"
+ "packssdw %%xmm5, %%xmm4 \n\t"
+ "packuswb %%xmm4, %%xmm0 \n\t"
+
+ "movdqu %%xmm0, (%%"REG_d") \n\t"
+
+snow_inner_add_yblock_sse2_end_16
+}
+
+#define snow_inner_add_yblock_mmx_header \
+ DWTELEM * * dst_array = sb->line + src_y;\
+ long tmp;\
+ asm volatile(\
+ "mov %7, %%"REG_c" \n\t"\
+ "mov %6, %2 \n\t"\
+ "mov %4, %%"REG_S" \n\t"\
+ "pxor %%mm7, %%mm7 \n\t" /* 0 */\
+ "pcmpeqd %%mm3, %%mm3 \n\t"\
+ "pslld $31, %%mm3 \n\t"\
+ "psrld $24, %%mm3 \n\t" /* FRAC_BITS >> 1 */\
+ "1: \n\t"\
+ "mov %1, %%"REG_D" \n\t"\
+ "mov (%%"REG_D"), %%"REG_D" \n\t"\
+ "add %3, %%"REG_D" \n\t"
+
+#define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\
+ "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
+ "movd "d_offset"(%%"REG_d"), %%"out_reg1" \n\t"\
+ "movd "d_offset"+4(%%"REG_d"), %%"out_reg2" \n\t"\
+ "punpcklbw %%mm7, %%"out_reg1" \n\t"\
+ "punpcklbw %%mm7, %%"out_reg2" \n\t"\
+ "movd "s_offset"(%%"REG_S"), %%mm0 \n\t"\
+ "movd "s_offset"+4(%%"REG_S"), %%mm4 \n\t"\
+ "punpcklbw %%mm7, %%mm0 \n\t"\
+ "punpcklbw %%mm7, %%mm4 \n\t"\
+ "pmullw %%mm0, %%"out_reg1" \n\t"\
+ "pmullw %%mm4, %%"out_reg2" \n\t"
+
+#define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \
+ snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\
+ "paddusw %%mm2, %%mm1 \n\t"\
+ "paddusw %%mm6, %%mm5 \n\t"
+
+#define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\
+ "mov %0, %%"REG_d" \n\t"\
+ "movq %%mm1, %%mm0 \n\t"\
+ "movq %%mm5, %%mm4 \n\t"\
+ "punpcklwd %%mm7, %%mm0 \n\t"\
+ "paddd "read_offset"(%%"REG_D"), %%mm0 \n\t"\
+ "punpckhwd %%mm7, %%mm1 \n\t"\
+ "paddd "read_offset"+8(%%"REG_D"), %%mm1 \n\t"\
+ "punpcklwd %%mm7, %%mm4 \n\t"\
+ "paddd "read_offset"+16(%%"REG_D"), %%mm4 \n\t"\
+ "punpckhwd %%mm7, %%mm5 \n\t"\
+ "paddd "read_offset"+24(%%"REG_D"), %%mm5 \n\t"\
+ "paddd %%mm3, %%mm0 \n\t"\
+ "paddd %%mm3, %%mm1 \n\t"\
+ "paddd %%mm3, %%mm4 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "psrad $8, %%mm0 \n\t"\
+ "psrad $8, %%mm1 \n\t"\
+ "psrad $8, %%mm4 \n\t"\
+ "psrad $8, %%mm5 \n\t"\
+\
+ "packssdw %%mm1, %%mm0 \n\t"\
+ "packssdw %%mm5, %%mm4 \n\t"\
+ "packuswb %%mm4, %%mm0 \n\t"\
+ "movq %%mm0, "write_offset"(%%"REG_d") \n\t"
+
+#define snow_inner_add_yblock_mmx_end(s_step)\
+ "add $"s_step", %%"REG_S" \n\t"\
+ "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
+ "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
+ "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
+ "add %%"REG_c", (%%"REG_a") \n\t"\
+ "add $"PTR_SIZE"*1, %1 \n\t"\
+ "add %%"REG_c", %0 \n\t"\
+ "dec %2 \n\t"\
+ "jnz 1b \n\t"\
+ :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\
+ :\
+ "rm"((long)(src_x<<2)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\
+ "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
+
+static void inner_add_yblock_bw_8_obmc_16_mmx(uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
+ int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
+snow_inner_add_yblock_mmx_header
+snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
+snow_inner_add_yblock_mmx_accum("2", "8", "0")
+snow_inner_add_yblock_mmx_accum("1", "128", "0")
+snow_inner_add_yblock_mmx_accum("0", "136", "0")
+snow_inner_add_yblock_mmx_mix("0", "0")
+snow_inner_add_yblock_mmx_end("16")
+}
+
+static void inner_add_yblock_bw_16_obmc_32_mmx(uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
+ int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
+snow_inner_add_yblock_mmx_header
+snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
+snow_inner_add_yblock_mmx_accum("2", "16", "0")
+snow_inner_add_yblock_mmx_accum("1", "512", "0")
+snow_inner_add_yblock_mmx_accum("0", "528", "0")
+snow_inner_add_yblock_mmx_mix("0", "0")
+
+snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "8", "8")
+snow_inner_add_yblock_mmx_accum("2", "24", "8")
+snow_inner_add_yblock_mmx_accum("1", "520", "8")
+snow_inner_add_yblock_mmx_accum("0", "536", "8")
+snow_inner_add_yblock_mmx_mix("32", "8")
+snow_inner_add_yblock_mmx_end("32")
+}
+
+void ff_snow_inner_add_yblock_sse2(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
+ int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
+
+ if (b_w == 16)
+ inner_add_yblock_bw_16_obmc_32_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
+ else if (b_w == 8 && obmc_stride == 16) {
+ if (!(b_h & 1))
+ inner_add_yblock_bw_8_obmc_16_bh_even_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
+ else
+ inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
+ } else
+ ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
+}
+
+void ff_snow_inner_add_yblock_mmx(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
+ int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
+ if (b_w == 16)
+ inner_add_yblock_bw_16_obmc_32_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
+ else if (b_w == 8 && obmc_stride == 16)
+ inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
+ else
+ ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
+}
diff --git a/src/libffmpeg/libavcodec/i386/vp3dsp_mmx.c b/contrib/ffmpeg/libavcodec/i386/vp3dsp_mmx.c
index 0684531ae..f715dc803 100644
--- a/src/libffmpeg/libavcodec/i386/vp3dsp_mmx.c
+++ b/contrib/ffmpeg/libavcodec/i386/vp3dsp_mmx.c
@@ -1,18 +1,20 @@
/*
* Copyright (C) 2004 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c b/contrib/ffmpeg/libavcodec/i386/vp3dsp_sse2.c
index cf822f7d4..bd2911d59 100644
--- a/src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c
+++ b/contrib/ffmpeg/libavcodec/i386/vp3dsp_sse2.c
@@ -1,18 +1,20 @@
/*
* Copyright (C) 2004 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/idcinvideo.c b/contrib/ffmpeg/libavcodec/idcinvideo.c
index 7e7e6aab1..112da7bc6 100644
--- a/src/libffmpeg/libavcodec/idcinvideo.c
+++ b/contrib/ffmpeg/libavcodec/idcinvideo.c
@@ -2,18 +2,20 @@
* Id Quake II CIN Video Decoder
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/contrib/ffmpeg/libavcodec/imc.c b/contrib/ffmpeg/libavcodec/imc.c
new file mode 100644
index 000000000..7360b6409
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/imc.c
@@ -0,0 +1,813 @@
+/*
+ * IMC compatible decoder
+ * Copyright (c) 2002-2004 Maxim Poliakovski
+ * Copyright (c) 2006 Benjamin Larsson
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+/**
+ * @file imc.c IMC - Intel Music Coder
+ * A mdct based codec using a 256 points large transform
+ * divied into 32 bands with some mix of scale factors.
+ * Only mono is supported.
+ *
+ */
+
+
+#include <math.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#define ALT_BITSTREAM_READER
+#include "avcodec.h"
+#include "bitstream.h"
+#include "dsputil.h"
+
+#include "imcdata.h"
+
+#define IMC_FRAME_ID 0x21
+#define BANDS 32
+#define COEFFS 256
+
+typedef struct {
+ float old_floor[BANDS];
+ float flcoeffs1[BANDS];
+ float flcoeffs2[BANDS];
+ float flcoeffs3[BANDS];
+ float flcoeffs4[BANDS];
+ float flcoeffs5[BANDS];
+ float flcoeffs6[BANDS];
+ float CWdecoded[COEFFS];
+
+ /** MDCT tables */
+ //@{
+ float mdct_sine_window[COEFFS];
+ float post_cos[COEFFS];
+ float post_sin[COEFFS];
+ float pre_coef1[COEFFS];
+ float pre_coef2[COEFFS];
+ float last_fft_im[COEFFS];
+ //@}
+
+ int bandWidthT[BANDS]; ///< codewords per band
+ int bitsBandT[BANDS]; ///< how many bits per codeword in band
+ int CWlengthT[COEFFS]; ///< how many bits in each codeword
+ int levlCoeffBuf[BANDS];
+ int bandFlagsBuf[BANDS]; ///< flags for each band
+ int sumLenArr[BANDS]; ///< bits for all coeffs in band
+ int skipFlagRaw[BANDS]; ///< skip flags are stored in raw form or not
+ int skipFlagBits[BANDS]; ///< bits used to code skip flags
+ int skipFlagCount[BANDS]; ///< skipped coeffients per band
+ int skipFlags[COEFFS]; ///< skip coefficient decoding or not
+ int codewords[COEFFS]; ///< raw codewords read from bitstream
+ float sqrt_tab[30];
+ GetBitContext gb;
+ VLC huffman_vlc[4][4];
+ int decoder_reset;
+ float one_div_log2;
+
+ DSPContext dsp;
+ FFTContext fft;
+ DECLARE_ALIGNED_16(FFTComplex, samples[COEFFS/2]);
+ DECLARE_ALIGNED_16(float, out_samples[COEFFS]);
+} IMCContext;
+
+
+static int imc_decode_init(AVCodecContext * avctx)
+{
+ int i, j;
+ IMCContext *q = avctx->priv_data;
+ double r1, r2;
+
+ q->decoder_reset = 1;
+
+ for(i = 0; i < BANDS; i++)
+ q->old_floor[i] = 1.0;
+
+ /* Build mdct window, a simple sine window normalized with sqrt(2) */
+ for(i = 0; i < COEFFS; i++)
+ q->mdct_sine_window[i] = sin((i + 0.5) / 512.0 * M_PI) * sqrt(2.0);
+ for(i = 0; i < COEFFS/2; i++){
+ q->post_cos[i] = cos(i / 256.0 * M_PI);
+ q->post_sin[i] = sin(i / 256.0 * M_PI);
+
+ r1 = sin((i * 4.0 + 1.0) / 1024.0 * M_PI);
+ r2 = cos((i * 4.0 + 1.0) / 1024.0 * M_PI);
+
+ if (i & 0x1)
+ {
+ q->pre_coef1[i] = (r1 + r2) * sqrt(2.0);
+ q->pre_coef2[i] = -(r1 - r2) * sqrt(2.0);
+ }
+ else
+ {
+ q->pre_coef1[i] = -(r1 + r2) * sqrt(2.0);
+ q->pre_coef2[i] = (r1 - r2) * sqrt(2.0);
+ }
+
+ q->last_fft_im[i] = 0;
+ }
+
+ /* Generate a square root table */
+
+ for(i = 0; i < 30; i++) {
+ q->sqrt_tab[i] = sqrt(i);
+ }
+
+ /* initialize the VLC tables */
+ for(i = 0; i < 4 ; i++) {
+ for(j = 0; j < 4; j++) {
+ init_vlc (&q->huffman_vlc[i][j], 9, imc_huffman_sizes[i],
+ imc_huffman_lens[i][j], 1, 1,
+ imc_huffman_bits[i][j], 2, 2, 1);
+ }
+ }
+ q->one_div_log2 = 1/log(2);
+
+ ff_fft_init(&q->fft, 7, 1);
+ dsputil_init(&q->dsp, avctx);
+ return 0;
+}
+
+static void imc_calculate_coeffs(IMCContext* q, float* flcoeffs1, float* flcoeffs2, int* bandWidthT,
+ float* flcoeffs3, float* flcoeffs5)
+{
+ float workT1[BANDS];
+ float workT2[BANDS];
+ float workT3[BANDS];
+ float snr_limit = 1.e-30;
+ float accum = 0.0;
+ int i, cnt2;
+
+ for(i = 0; i < BANDS; i++) {
+ flcoeffs5[i] = workT2[i] = 0.0;
+ if (bandWidthT[i]){
+ workT1[i] = flcoeffs1[i] * flcoeffs1[i];
+ flcoeffs3[i] = 2.0 * flcoeffs2[i];
+ } else {
+ workT1[i] = 0.0;
+ flcoeffs3[i] = -30000.0;
+ }
+ workT3[i] = bandWidthT[i] * workT1[i] * 0.01;
+ if (workT3[i] <= snr_limit)
+ workT3[i] = 0.0;
+ }
+
+ for(i = 0; i < BANDS; i++) {
+ for(cnt2 = i; cnt2 < cyclTab[i]; cnt2++)
+ flcoeffs5[cnt2] = flcoeffs5[cnt2] + workT3[i];
+ workT2[cnt2-1] = workT2[cnt2-1] + workT3[i];
+ }
+
+ for(i = 1; i < BANDS; i++) {
+ accum = (workT2[i-1] + accum) * imc_weights1[i-1];
+ flcoeffs5[i] += accum;
+ }
+
+ for(i = 0; i < BANDS; i++)
+ workT2[i] = 0.0;
+
+ for(i = 0; i < BANDS; i++) {
+ for(cnt2 = i-1; cnt2 > cyclTab2[i]; cnt2--)
+ flcoeffs5[cnt2] += workT3[i];
+ workT2[cnt2+1] += workT3[i];
+ }
+
+ accum = 0.0;
+
+ for(i = BANDS-2; i >= 0; i--) {
+ accum = (workT2[i+1] + accum) * imc_weights2[i];
+ flcoeffs5[i] += accum;
+ //there is missing code here, but it seems to never be triggered
+ }
+}
+
+
+static void imc_read_level_coeffs(IMCContext* q, int stream_format_code, int* levlCoeffs)
+{
+ int i;
+ VLC *hufftab[4];
+ int start = 0;
+ const uint8_t *cb_sel;
+ int s;
+
+ s = stream_format_code >> 1;
+ hufftab[0] = &q->huffman_vlc[s][0];
+ hufftab[1] = &q->huffman_vlc[s][1];
+ hufftab[2] = &q->huffman_vlc[s][2];
+ hufftab[3] = &q->huffman_vlc[s][3];
+ cb_sel = imc_cb_select[s];
+
+ if(stream_format_code & 4)
+ start = 1;
+ if(start)
+ levlCoeffs[0] = get_bits(&q->gb, 7);
+ for(i = start; i < BANDS; i++){
+ levlCoeffs[i] = get_vlc2(&q->gb, hufftab[cb_sel[i]]->table, hufftab[cb_sel[i]]->bits, 2);
+ if(levlCoeffs[i] == 17)
+ levlCoeffs[i] += get_bits(&q->gb, 4);
+ }
+}
+
+static void imc_decode_level_coefficients(IMCContext* q, int* levlCoeffBuf, float* flcoeffs1,
+ float* flcoeffs2)
+{
+ int i, level;
+ float tmp, tmp2;
+ //maybe some frequency division thingy
+
+ flcoeffs1[0] = 20000.0 / pow (2, levlCoeffBuf[0] * 0.18945); // 0.18945 = log2(10) * 0.05703125
+ flcoeffs2[0] = log(flcoeffs1[0])/log(2);
+ tmp = flcoeffs1[0];
+ tmp2 = flcoeffs2[0];
+
+ for(i = 1; i < BANDS; i++) {
+ level = levlCoeffBuf[i];
+ if (level == 16) {
+ flcoeffs1[i] = 1.0;
+ flcoeffs2[i] = 0.0;
+ } else {
+ if (level < 17)
+ level -=7;
+ else if (level <= 24)
+ level -=32;
+ else
+ level -=16;
+
+ tmp *= imc_exp_tab[15 + level];
+ tmp2 += 0.83048 * level; // 0.83048 = log2(10) * 0.25
+ flcoeffs1[i] = tmp;
+ flcoeffs2[i] = tmp2;
+ }
+ }
+}
+
+
+static void imc_decode_level_coefficients2(IMCContext* q, int* levlCoeffBuf, float* old_floor, float* flcoeffs1,
+ float* flcoeffs2) {
+ int i;
+ //FIXME maybe flag_buf = noise coding and flcoeffs1 = new scale factors
+ // and flcoeffs2 old scale factors
+ // might be incomplete due to a missing table that is in the binary code
+ for(i = 0; i < BANDS; i++) {
+ flcoeffs1[i] = 0;
+ if(levlCoeffBuf[i] < 16) {
+ flcoeffs1[i] = imc_exp_tab2[levlCoeffBuf[i]] * old_floor[i];
+ flcoeffs2[i] = (levlCoeffBuf[i]-7) * 0.83048 + flcoeffs2[i]; // 0.83048 = log2(10) * 0.25
+ } else {
+ flcoeffs1[i] = old_floor[i];
+ }
+ }
+}
+
+/**
+ * Perform bit allocation depending on bits available
+ */
+static int bit_allocation (IMCContext* q, int stream_format_code, int freebits, int flag) {
+ int i, j;
+ const float limit = -1.e20;
+ float highest = 0.0;
+ int indx;
+ int t1 = 0;
+ int t2 = 1;
+ float summa = 0.0;
+ int iacc = 0;
+ int summer = 0;
+ int rres, cwlen;
+ float lowest = 1.e10;
+ int low_indx = 0;
+ float workT[32];
+ int flg;
+ int found_indx = 0;
+
+ for(i = 0; i < BANDS; i++)
+ highest = FFMAX(highest, q->flcoeffs1[i]);
+
+ for(i = 0; i < BANDS-1; i++) {
+ q->flcoeffs4[i] = q->flcoeffs3[i] - log(q->flcoeffs5[i])/log(2);
+ }
+ q->flcoeffs4[BANDS - 1] = limit;
+
+ highest = highest * 0.25;
+
+ for(i = 0; i < BANDS; i++) {
+ indx = -1;
+ if ((band_tab[i+1] - band_tab[i]) == q->bandWidthT[i])
+ indx = 0;
+
+ if ((band_tab[i+1] - band_tab[i]) > q->bandWidthT[i])
+ indx = 1;
+
+ if (((band_tab[i+1] - band_tab[i])/2) >= q->bandWidthT[i])
+ indx = 2;
+
+ if (indx == -1)
+ return -1;
+
+ q->flcoeffs4[i] = q->flcoeffs4[i] + xTab[(indx*2 + (q->flcoeffs1[i] < highest)) * 2 + flag];
+ }
+
+ if (stream_format_code & 0x2) {
+ q->flcoeffs4[0] = limit;
+ q->flcoeffs4[1] = limit;
+ q->flcoeffs4[2] = limit;
+ q->flcoeffs4[3] = limit;
+ }
+
+ for(i = (stream_format_code & 0x2)?4:0; i < BANDS-1; i++) {
+ iacc += q->bandWidthT[i];
+ summa += q->bandWidthT[i] * q->flcoeffs4[i];
+ }
+ q->bandWidthT[BANDS-1] = 0;
+ summa = (summa * 0.5 - freebits) / iacc;
+
+
+ for(i = 0; i < BANDS/2; i++) {
+ rres = summer - freebits;
+ if((rres >= -8) && (rres <= 8)) break;
+
+ summer = 0;
+ iacc = 0;
+
+ for(j = (stream_format_code & 0x2)?4:0; j < BANDS; j++) {
+ cwlen = clip((int)((q->flcoeffs4[j] * 0.5) - summa + 0.5), 0, 6);
+
+ q->bitsBandT[j] = cwlen;
+ summer += q->bandWidthT[j] * cwlen;
+
+ if (cwlen > 0)
+ iacc += q->bandWidthT[j];
+ }
+
+ flg = t2;
+ t2 = 1;
+ if (freebits < summer)
+ t2 = -1;
+ if (i == 0)
+ flg = t2;
+ if(flg != t2)
+ t1++;
+
+ summa = (float)(summer - freebits) / ((t1 + 1) * iacc) + summa;
+ }
+
+ for(i = (stream_format_code & 0x2)?4:0; i < BANDS; i++) {
+ for(j = band_tab[i]; j < band_tab[i+1]; j++)
+ q->CWlengthT[j] = q->bitsBandT[i];
+ }
+
+ if (freebits > summer) {
+ for(i = 0; i < BANDS; i++) {
+ workT[i] = (q->bitsBandT[i] == 6) ? -1.e20 : (q->bitsBandT[i] * -2 + q->flcoeffs4[i] - 0.415);
+ }
+
+ highest = 0.0;
+
+ do{
+ if (highest <= -1.e20)
+ break;
+
+ found_indx = 0;
+ highest = -1.e20;
+
+ for(i = 0; i < BANDS; i++) {
+ if (workT[i] > highest) {
+ highest = workT[i];
+ found_indx = i;
+ }
+ }
+
+ if (highest > -1.e20) {
+ workT[found_indx] -= 2.0;
+ if (++(q->bitsBandT[found_indx]) == 6)
+ workT[found_indx] = -1.e20;
+
+ for(j = band_tab[found_indx]; j < band_tab[found_indx+1] && (freebits > summer); j++){
+ q->CWlengthT[j]++;
+ summer++;
+ }
+ }
+ }while (freebits > summer);
+ }
+ if (freebits < summer) {
+ for(i = 0; i < BANDS; i++) {
+ workT[i] = q->bitsBandT[i] ? (q->bitsBandT[i] * -2 + q->flcoeffs4[i] + 1.585) : 1.e20;
+ }
+ if (stream_format_code & 0x2) {
+ workT[0] = 1.e20;
+ workT[1] = 1.e20;
+ workT[2] = 1.e20;
+ workT[3] = 1.e20;
+ }
+ while (freebits < summer){
+ lowest = 1.e10;
+ low_indx = 0;
+ for(i = 0; i < BANDS; i++) {
+ if (workT[i] < lowest) {
+ lowest = workT[i];
+ low_indx = i;
+ }
+ }
+ //if(lowest >= 1.e10) break;
+ workT[low_indx] = lowest + 2.0;
+
+ if (!(--q->bitsBandT[low_indx]))
+ workT[low_indx] = 1.e20;
+
+ for(j = band_tab[low_indx]; j < band_tab[low_indx+1] && (freebits < summer); j++){
+ if(q->CWlengthT[j] > 0){
+ q->CWlengthT[j]--;
+ summer--;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static void imc_get_skip_coeff(IMCContext* q) {
+ int i, j;
+
+ memset(q->skipFlagBits, 0, sizeof(q->skipFlagBits));
+ memset(q->skipFlagCount, 0, sizeof(q->skipFlagCount));
+ for(i = 0; i < BANDS; i++) {
+ if (!q->bandFlagsBuf[i] || !q->bandWidthT[i])
+ continue;
+
+ if (!q->skipFlagRaw[i]) {
+ q->skipFlagBits[i] = band_tab[i+1] - band_tab[i];
+
+ for(j = band_tab[i]; j < band_tab[i+1]; j++) {
+ if ((q->skipFlags[j] = get_bits(&q->gb,1)))
+ q->skipFlagCount[i]++;
+ }
+ } else {
+ for(j = band_tab[i]; j < (band_tab[i+1]-1); j += 2) {
+ if(!get_bits1(&q->gb)){//0
+ q->skipFlagBits[i]++;
+ q->skipFlags[j]=1;
+ q->skipFlags[j+1]=1;
+ q->skipFlagCount[i] += 2;
+ }else{
+ if(get_bits1(&q->gb)){//11
+ q->skipFlagBits[i] +=2;
+ q->skipFlags[j]=0;
+ q->skipFlags[j+1]=1;
+ q->skipFlagCount[i]++;
+ }else{
+ q->skipFlagBits[i] +=3;
+ q->skipFlags[j+1]=0;
+ if(!get_bits1(&q->gb)){//100
+ q->skipFlags[j]=1;
+ q->skipFlagCount[i]++;
+ }else{//101
+ q->skipFlags[j]=0;
+ }
+ }
+ }
+ }
+
+ if (j < band_tab[i+1]) {
+ q->skipFlagBits[i]++;
+ if ((q->skipFlags[j] = get_bits(&q->gb,1)))
+ q->skipFlagCount[i]++;
+ }
+ }
+ }
+}
+
+/**
+ * Increase highest' band coefficient sizes as some bits won't be used
+ */
+static void imc_adjust_bit_allocation (IMCContext* q, int summer) {
+ float workT[32];
+ int corrected = 0;
+ int i, j;
+ float highest = 0;
+ int found_indx=0;
+
+ for(i = 0; i < BANDS; i++) {
+ workT[i] = (q->bitsBandT[i] == 6) ? -1.e20 : (q->bitsBandT[i] * -2 + q->flcoeffs4[i] - 0.415);
+ }
+
+ while (corrected < summer) {
+ if(highest <= -1.e20)
+ break;
+
+ highest = -1.e20;
+
+ for(i = 0; i < BANDS; i++) {
+ if (workT[i] > highest) {
+ highest = workT[i];
+ found_indx = i;
+ }
+ }
+
+ if (highest > -1.e20) {
+ workT[found_indx] -= 2.0;
+ if (++(q->bitsBandT[found_indx]) == 6)
+ workT[found_indx] = -1.e20;
+
+ for(j = band_tab[found_indx]; j < band_tab[found_indx+1] && (corrected < summer); j++) {
+ if (!q->skipFlags[j] && (q->CWlengthT[j] < 6)) {
+ q->CWlengthT[j]++;
+ corrected++;
+ }
+ }
+ }
+ }
+}
+
+static void imc_imdct256(IMCContext *q) {
+ int i;
+ float re, im;
+
+ /* prerotation */
+ for(i=0; i < COEFFS/2; i++){
+ q->samples[i].re = -(q->pre_coef1[i] * q->CWdecoded[COEFFS-1-i*2]) -
+ (q->pre_coef2[i] * q->CWdecoded[i*2]);
+ q->samples[i].im = (q->pre_coef2[i] * q->CWdecoded[COEFFS-1-i*2]) -
+ (q->pre_coef1[i] * q->CWdecoded[i*2]);
+ }
+
+ /* FFT */
+ ff_fft_permute(&q->fft, q->samples);
+ ff_fft_calc (&q->fft, q->samples);
+
+ /* postrotation, window and reorder */
+ for(i = 0; i < COEFFS/2; i++){
+ re = (q->samples[i].re * q->post_cos[i]) + (-q->samples[i].im * q->post_sin[i]);
+ im = (-q->samples[i].im * q->post_cos[i]) - (q->samples[i].re * q->post_sin[i]);
+ q->out_samples[i*2] = (q->mdct_sine_window[COEFFS-1-i*2] * q->last_fft_im[i]) + (q->mdct_sine_window[i*2] * re);
+ q->out_samples[COEFFS-1-i*2] = (q->mdct_sine_window[i*2] * q->last_fft_im[i]) - (q->mdct_sine_window[COEFFS-1-i*2] * re);
+ q->last_fft_im[i] = im;
+ }
+}
+
+static int inverse_quant_coeff (IMCContext* q, int stream_format_code) {
+ int i, j;
+ int middle_value, cw_len, max_size;
+ const float* quantizer;
+
+ for(i = 0; i < BANDS; i++) {
+ for(j = band_tab[i]; j < band_tab[i+1]; j++) {
+ q->CWdecoded[j] = 0;
+ cw_len = q->CWlengthT[j];
+
+ if (cw_len <= 0 || q->skipFlags[j])
+ continue;
+
+ max_size = 1 << cw_len;
+ middle_value = max_size >> 1;
+
+ if (q->codewords[j] >= max_size || q->codewords[j] < 0)
+ return -1;
+
+ if (cw_len >= 4){
+ quantizer = imc_quantizer2[(stream_format_code & 2) >> 1];
+ if (q->codewords[j] >= middle_value)
+ q->CWdecoded[j] = quantizer[q->codewords[j] - 8] * q->flcoeffs6[i];
+ else
+ q->CWdecoded[j] = -quantizer[max_size - q->codewords[j] - 8 - 1] * q->flcoeffs6[i];
+ }else{
+ quantizer = imc_quantizer1[((stream_format_code & 2) >> 1) | (q->bandFlagsBuf[i] << 1)];
+ if (q->codewords[j] >= middle_value)
+ q->CWdecoded[j] = quantizer[q->codewords[j] - 1] * q->flcoeffs6[i];
+ else
+ q->CWdecoded[j] = -quantizer[max_size - 2 - q->codewords[j]] * q->flcoeffs6[i];
+ }
+ }
+ }
+ return 0;
+}
+
+
+static int imc_get_coeffs (IMCContext* q) {
+ int i, j, cw_len, cw;
+
+ for(i = 0; i < BANDS; i++) {
+ if(!q->sumLenArr[i]) continue;
+ if (q->bandFlagsBuf[i] || q->bandWidthT[i]) {
+ for(j = band_tab[i]; j < band_tab[i+1]; j++) {
+ cw_len = q->CWlengthT[j];
+ cw = 0;
+
+ if (get_bits_count(&q->gb) + cw_len > 512){
+//av_log(NULL,0,"Band %i coeff %i cw_len %i\n",i,j,cw_len);
+ return -1;
+ }
+
+ if(cw_len && (!q->bandFlagsBuf[i] || !q->skipFlags[j]))
+ cw = get_bits(&q->gb, cw_len);
+
+ q->codewords[j] = cw;
+ }
+ }
+ }
+ return 0;
+}
+
+static int imc_decode_frame(AVCodecContext * avctx,
+ void *data, int *data_size,
+ uint8_t * buf, int buf_size)
+{
+
+ IMCContext *q = avctx->priv_data;
+
+ int stream_format_code;
+ int imc_hdr, i, j;
+ int flag;
+ int bits, summer;
+ int counter, bitscount;
+ uint16_t *buf16 = (uint16_t *) buf;
+
+ /* FIXME: input should not be modified */
+ for(i = 0; i < FFMIN(buf_size, avctx->block_align) / 2; i++)
+ buf16[i] = bswap_16(buf16[i]);
+
+ init_get_bits(&q->gb, buf, 512);
+
+ /* Check the frame header */
+ imc_hdr = get_bits(&q->gb, 9);
+ if (imc_hdr != IMC_FRAME_ID) {
+ av_log(avctx, AV_LOG_ERROR, "imc frame header check failed!\n");
+ av_log(avctx, AV_LOG_ERROR, "got %x instead of 0x21.\n", imc_hdr);
+ return -1;
+ }
+ stream_format_code = get_bits(&q->gb, 3);
+
+ if(stream_format_code & 1){
+ av_log(avctx, AV_LOG_ERROR, "Stream code format %X is not supported\n", stream_format_code);
+ return -1;
+ }
+
+// av_log(avctx, AV_LOG_DEBUG, "stream_format_code = %d\n", stream_format_code);
+
+ if (stream_format_code & 0x04)
+ q->decoder_reset = 1;
+
+ if(q->decoder_reset) {
+ memset(q->out_samples, 0, sizeof(q->out_samples));
+ for(i = 0; i < BANDS; i++)q->old_floor[i] = 1.0;
+ for(i = 0; i < COEFFS; i++)q->CWdecoded[i] = 0;
+ q->decoder_reset = 0;
+ }
+
+ flag = get_bits1(&q->gb);
+ imc_read_level_coeffs(q, stream_format_code, q->levlCoeffBuf);
+
+ if (stream_format_code & 0x4)
+ imc_decode_level_coefficients(q, q->levlCoeffBuf, q->flcoeffs1, q->flcoeffs2);
+ else
+ imc_decode_level_coefficients2(q, q->levlCoeffBuf, q->old_floor, q->flcoeffs1, q->flcoeffs2);
+
+ memcpy(q->old_floor, q->flcoeffs1, 32 * sizeof(float));
+
+ counter = 0;
+ for (i=0 ; i<BANDS ; i++) {
+ if (q->levlCoeffBuf[i] == 16) {
+ q->bandWidthT[i] = 0;
+ counter++;
+ } else
+ q->bandWidthT[i] = band_tab[i+1] - band_tab[i];
+ }
+ memset(q->bandFlagsBuf, 0, BANDS * sizeof(int));
+ for(i = 0; i < BANDS-1; i++) {
+ if (q->bandWidthT[i])
+ q->bandFlagsBuf[i] = get_bits1(&q->gb);
+ }
+
+ imc_calculate_coeffs(q, q->flcoeffs1, q->flcoeffs2, q->bandWidthT, q->flcoeffs3, q->flcoeffs5);
+
+ bitscount = 0;
+ /* first 4 bands will be assigned 5 bits per coefficient */
+ if (stream_format_code & 0x2) {
+ bitscount += 15;
+
+ q->bitsBandT[0] = 5;
+ q->CWlengthT[0] = 5;
+ q->CWlengthT[1] = 5;
+ q->CWlengthT[2] = 5;
+ for(i = 1; i < 4; i++){
+ bits = (q->levlCoeffBuf[i] == 16) ? 0 : 5;
+ q->bitsBandT[i] = bits;
+ for(j = band_tab[i]; j < band_tab[i+1]; j++) {
+ q->CWlengthT[j] = bits;
+ bitscount += bits;
+ }
+ }
+ }
+
+ if(bit_allocation (q, stream_format_code, 512 - bitscount - get_bits_count(&q->gb), flag) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Bit allocations failed\n");
+ q->decoder_reset = 1;
+ return -1;
+ }
+
+ for(i = 0; i < BANDS; i++) {
+ q->sumLenArr[i] = 0;
+ q->skipFlagRaw[i] = 0;
+ for(j = band_tab[i]; j < band_tab[i+1]; j++)
+ q->sumLenArr[i] += q->CWlengthT[j];
+ if (q->bandFlagsBuf[i])
+ if( (((band_tab[i+1] - band_tab[i]) * 1.5) > q->sumLenArr[i]) && (q->sumLenArr[i] > 0))
+ q->skipFlagRaw[i] = 1;
+ }
+
+ imc_get_skip_coeff(q);
+
+ for(i = 0; i < BANDS; i++) {
+ q->flcoeffs6[i] = q->flcoeffs1[i];
+ /* band has flag set and at least one coded coefficient */
+ if (q->bandFlagsBuf[i] && (band_tab[i+1] - band_tab[i]) != q->skipFlagCount[i]){
+ q->flcoeffs6[i] *= q->sqrt_tab[band_tab[i+1] - band_tab[i]] /
+ q->sqrt_tab[(band_tab[i+1] - band_tab[i] - q->skipFlagCount[i])];
+ }
+ }
+
+ /* calculate bits left, bits needed and adjust bit allocation */
+ bits = summer = 0;
+
+ for(i = 0; i < BANDS; i++) {
+ if (q->bandFlagsBuf[i]) {
+ for(j = band_tab[i]; j < band_tab[i+1]; j++) {
+ if(q->skipFlags[j]) {
+ summer += q->CWlengthT[j];
+ q->CWlengthT[j] = 0;
+ }
+ }
+ bits += q->skipFlagBits[i];
+ summer -= q->skipFlagBits[i];
+ }
+ }
+ imc_adjust_bit_allocation(q, summer);
+
+ for(i = 0; i < BANDS; i++) {
+ q->sumLenArr[i] = 0;
+
+ for(j = band_tab[i]; j < band_tab[i+1]; j++)
+ if (!q->skipFlags[j])
+ q->sumLenArr[i] += q->CWlengthT[j];
+ }
+
+ memset(q->codewords, 0, sizeof(q->codewords));
+
+ if(imc_get_coeffs(q) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Read coefficients failed\n");
+ q->decoder_reset = 1;
+ return 0;
+ }
+
+ if(inverse_quant_coeff(q, stream_format_code) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Inverse quantization of coefficients failed\n");
+ q->decoder_reset = 1;
+ return 0;
+ }
+
+ memset(q->skipFlags, 0, sizeof(q->skipFlags));
+
+ imc_imdct256(q);
+
+ q->dsp.float_to_int16(data, q->out_samples, COEFFS);
+
+ *data_size = COEFFS * sizeof(int16_t);
+
+ return avctx->block_align;
+}
+
+
+static int imc_decode_close(AVCodecContext * avctx)
+{
+ IMCContext *q = avctx->priv_data;
+
+ ff_fft_end(&q->fft);
+ return 0;
+}
+
+
+AVCodec imc_decoder = {
+ .name = "imc",
+ .type = CODEC_TYPE_AUDIO,
+ .id = CODEC_ID_IMC,
+ .priv_data_size = sizeof(IMCContext),
+ .init = imc_decode_init,
+ .close = imc_decode_close,
+ .decode = imc_decode_frame,
+};
diff --git a/contrib/ffmpeg/libavcodec/imcdata.h b/contrib/ffmpeg/libavcodec/imcdata.h
new file mode 100644
index 000000000..92ed275f1
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/imcdata.h
@@ -0,0 +1,164 @@
+/*
+ * IMC compatible decoder
+ * Copyright (c) 2002-2004 Maxim Poliakovski
+ * Copyright (c) 2006 Benjamin Larsson
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+static const uint16_t band_tab[33] = {
+ 0, 3, 6, 9, 12, 16, 20, 24, 29, 34, 40,
+ 46, 53, 60, 68, 76, 84, 93, 102, 111, 121, 131,
+ 141, 151, 162, 173, 184, 195, 207, 219, 231, 243, 256,
+};
+
+
+static const int8_t cyclTab[32] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 32,
+};
+
+static const int8_t cyclTab2[32] = {
+ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 17, 18, 19, 20, 21, 22,
+23, 24, 25, 26, 27, 28, 29};
+
+static const float imc_weights1[31] = {
+ 0.119595, 0.123124, 0.129192, 9.97377e-2, 8.1923e-2, 9.61153e-2, 8.77885e-2, 8.61174e-2,
+ 9.00882e-2, 9.91658e-2, 0.112991, 0.131126, 0.152886, 0.177292, 0.221782, 0.244917, 0.267386,
+ 0.306816, 0.323046, 0.33729, 0.366773, 0.392557, 0.398076, 0.403302, 0.42451, 0.444777,
+ 0.449188, 0.455445, 0.477853, 0.500669, 0.510395};
+
+static const float imc_weights2[31] = {
+ 3.23466e-3, 3.49886e-3, 3.98413e-3, 1.98116e-3, 1.16465e-3, 1.79283e-3, 1.40372e-3, 1.33274e-3,
+ 1.50523e-3, 1.95064e-3, 2.77472e-3, 4.14725e-3, 6.2776e-3, 9.36401e-3, 1.71397e-2, 2.24052e-2,
+ 2.83971e-2, 4.11689e-2, 4.73165e-2, 5.31631e-2, 6.66614e-2, 8.00824e-2, 8.31588e-2, 8.61397e-2,
+ 9.89229e-2, 0.112197, 0.115227, 0.119613, 0.136174, 0.15445, 0.162685};
+
+static const float imc_quantizer1[4][8] = {
+ { 8.4431201e-1, 4.7358301e-1, 1.448354, 2.7073899e-1, 7.4449003e-1, 1.241991, 1.845484, 0.0},
+ { 8.6876702e-1, 4.7659001e-1, 1.478224, 2.5672799e-1, 7.55777e-1, 1.3229851, 2.03438, 0.0},
+ { 7.5891501e-1, 6.2272799e-1, 1.271322, 3.47904e-1, 7.5317699e-1, 1.150767, 1.628476, 0.0},
+ { 7.65257e-1, 6.44647e-1, 1.263824, 3.4548101e-1, 7.6384902e-1, 1.214466, 1.7638789, 0.0},
+};
+
+static const float imc_quantizer2[2][56] = {
+ { 1.39236e-1, 3.50548e-1, 5.9547901e-1, 8.5772401e-1, 1.121545, 1.3882281, 1.695882, 2.1270809,
+ 7.2221003e-2, 1.85177e-1, 2.9521701e-1, 4.12568e-1, 5.4068601e-1, 6.7679501e-1, 8.1196898e-1, 9.4765198e-1,
+ 1.0779999, 1.203415, 1.337265, 1.481871, 1.639982, 1.814766, 2.0701399, 2.449862,
+ 3.7533998e-2, 1.02722e-1, 1.6021401e-1, 2.16043e-1, 2.7231601e-1, 3.3025399e-1, 3.9022601e-1, 4.52849e-1,
+ 5.1794899e-1, 5.8529502e-1, 6.53956e-1, 7.2312802e-1, 7.9150802e-1, 8.5891002e-1, 9.28141e-1, 9.9706203e-1,
+ 1.062153, 1.12564, 1.189834, 1.256122, 1.324469, 1.3955311, 1.468906, 1.545084,
+ 1.6264729, 1.711524, 1.802705, 1.91023, 2.0533991, 2.22333, 2.4830019, 3.253329 },
+ { 1.11654e-1, 3.54469e-1, 6.4232099e-1, 9.6128798e-1, 1.295053, 1.61777, 1.989839, 2.51107,
+ 5.7721999e-2, 1.69879e-1, 2.97589e-1, 4.3858799e-1, 5.9039903e-1, 7.4934798e-1, 9.1628098e-1, 1.087297,
+ 1.262751, 1.4288321, 1.6040879, 1.79067, 2.000668, 2.2394669, 2.649332, 5.2760072,
+ 2.9722e-2, 8.7316997e-2, 1.4445201e-1, 2.04247e-1, 2.6879501e-1, 3.3716801e-1, 4.08811e-1, 4.8306999e-1,
+ 5.6049401e-1, 6.3955498e-1, 7.2044599e-1, 8.0427998e-1, 8.8933599e-1, 9.7537601e-1, 1.062461, 1.1510431,
+ 1.240236, 1.326715, 1.412513, 1.500502, 1.591749, 1.686413, 1.785239, 1.891233,
+ 2.0051291, 2.127681, 2.2709141, 2.475826, 2.7219379, 3.101985, 4.686213, 6.2287788},
+};
+
+
+static const float xTab[14] = {7.6, 3.6, 4.4, 3.7, 6.1, 5.1, 2.3, 1.6, 6.2, 1.5, 1.8, 1.2, 0, 0}; //10014048
+
+/* precomputed table for 10^(i/4), i=-15..16 */
+static const float imc_exp_tab[32] = {
+ 1.778280e-4, 3.162278e-4, 5.623413e-4, 1.000000e-3,
+ 1.778280e-3, 3.162278e-3, 5.623413e-3, 1.000000e-2,
+ 1.778280e-2, 3.162278e-2, 5.623413e-2, 1.000000e-1,
+ 1.778280e-1, 3.162278e-1, 5.623413e-1, 1.000000e00,
+ 1.778280e00, 3.162278e00, 5.623413e00, 1.000000e01,
+ 1.778280e01, 3.162278e01, 5.623413e01, 1.000000e02,
+ 1.778280e02, 3.162278e02, 5.623413e02, 1.000000e03,
+ 1.778280e03, 3.162278e03, 5.623413e03, 1.000000e04
+};
+static const float *imc_exp_tab2 = imc_exp_tab + 8;
+
+
+static const uint8_t imc_cb_select[4][32] = {
+ { 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2 },
+ { 0, 2, 0, 3, 2, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ { 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2 },
+ { 0, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+};
+
+static const uint8_t imc_huffman_sizes[4] = {
+ 17, 17, 18, 18
+};
+
+static const uint8_t imc_huffman_lens[4][4][18] = {
+ {
+ { 16, 15, 13, 11, 8, 5, 3, 1, 2, 4, 6, 9, 10, 12, 14, 16, 7, 0 },
+ { 10, 8, 7, 6, 4, 4, 3, 2, 2, 3, 4, 6, 7, 9, 11, 11, 7, 0 },
+ { 15, 15, 14, 11, 8, 6, 4, 2, 1, 4, 5, 7, 9, 10, 12, 13, 4, 0 },
+ { 13, 11, 10, 8, 6, 4, 2, 2, 2, 3, 5, 7, 9, 12, 15, 15, 14, 0 },
+ },
+ {
+ { 14, 12, 10, 8, 7, 4, 2, 2, 2, 3, 5, 7, 9, 11, 13, 14, 7, 0 },
+ { 14, 13, 11, 8, 6, 4, 3, 2, 2, 3, 5, 7, 9, 10, 12, 14, 3, 0 },
+ { 13, 12, 10, 7, 5, 4, 3, 2, 2, 3, 4, 6, 8, 9, 11, 13, 4, 0 },
+ { 13, 12, 10, 7, 5, 4, 3, 2, 2, 3, 4, 6, 8, 9, 11, 13, 4, 0 },
+ },
+ {
+ { 16, 14, 12, 10, 8, 5, 3, 1, 2, 4, 7, 9, 11, 13, 15, 17, 6, 17 },
+ { 15, 13, 11, 8, 6, 4, 2, 2, 2, 3, 5, 7, 10, 12, 14, 16, 9, 16 },
+ { 14, 12, 11, 9, 8, 6, 3, 1, 2, 5, 7, 10, 13, 15, 16, 17, 4, 17 },
+ { 16, 14, 12, 9, 7, 5, 2, 2, 2, 3, 4, 6, 8, 11, 13, 15, 10, 16 },
+ },
+ {
+ { 13, 11, 10, 8, 7, 5, 2, 2, 2, 4, 6, 9, 12, 14, 15, 16, 3, 16 },
+ { 11, 11, 10, 9, 8, 7, 5, 4, 3, 3, 3, 3, 3, 3, 4, 5, 6, 5 },
+ { 9, 9, 7, 6, 5, 4, 3, 3, 2, 3, 4, 5, 4, 5, 5, 6, 8, 6 },
+ { 13, 12, 10, 8, 5, 3, 3, 2, 2, 3, 4, 7, 9, 11, 14, 15, 6, 15 },
+ }
+};
+
+static const uint16_t imc_huffman_bits[4][4][18] = {
+ {
+ { 0xCC32, 0x6618, 0x1987, 0x0660, 0x00CD, 0x0018, 0x0007, 0x0000, 0x0002, 0x000D, 0x0032, 0x0199, 0x0331, 0x0CC2, 0x330D, 0xCC33, 0x0067, 0x0000 },
+ { 0x02FE, 0x00BE, 0x005E, 0x002D, 0x000A, 0x0009, 0x0003, 0x0003, 0x0000, 0x0002, 0x0008, 0x002C, 0x005D, 0x017E, 0x05FE, 0x05FF, 0x005C, 0x0000 },
+ { 0x5169, 0x5168, 0x28B5, 0x0517, 0x00A3, 0x0029, 0x0008, 0x0003, 0x0000, 0x0009, 0x0015, 0x0050, 0x0144, 0x028A, 0x0A2C, 0x145B, 0x000B, 0x0000 },
+ { 0x1231, 0x048D, 0x0247, 0x0090, 0x0025, 0x0008, 0x0001, 0x0003, 0x0000, 0x0005, 0x0013, 0x0049, 0x0122, 0x0919, 0x48C3, 0x48C2, 0x2460, 0x0000 },
+ },
+ {
+ { 0x2D1D, 0x0B46, 0x02D0, 0x00B5, 0x0059, 0x000A, 0x0003, 0x0001, 0x0000, 0x0004, 0x0017, 0x005B, 0x0169, 0x05A2, 0x168F, 0x2D1C, 0x0058, 0x0000 },
+ { 0x1800, 0x0C01, 0x0301, 0x0061, 0x0019, 0x0007, 0x0004, 0x0003, 0x0000, 0x0005, 0x000D, 0x0031, 0x00C1, 0x0181, 0x0601, 0x1801, 0x0002, 0x0000 },
+ { 0x1556, 0x0AAA, 0x02AB, 0x0054, 0x0014, 0x000B, 0x0002, 0x0003, 0x0000, 0x0003, 0x0008, 0x002B, 0x00AB, 0x0154, 0x0554, 0x1557, 0x0009, 0x0000 },
+ { 0x1556, 0x0AAA, 0x02AB, 0x0054, 0x0014, 0x000B, 0x0002, 0x0003, 0x0000, 0x0003, 0x0008, 0x002B, 0x00AB, 0x0154, 0x0554, 0x1557, 0x0009, 0x0000 },
+ },
+ {
+ { 0x2993, 0x0A65, 0x0298, 0x00A7, 0x0028, 0x0004, 0x0000, 0x0001, 0x0001, 0x0003, 0x0015, 0x0052, 0x014D, 0x0533, 0x14C8, 0x5324, 0x000B, 0x5325 },
+ { 0x09B8, 0x026F, 0x009A, 0x0012, 0x0005, 0x0000, 0x0001, 0x0002, 0x0003, 0x0001, 0x0003, 0x0008, 0x004C, 0x0136, 0x04DD, 0x1373, 0x0027, 0x1372 },
+ { 0x0787, 0x01E0, 0x00F1, 0x003D, 0x001F, 0x0006, 0x0001, 0x0001, 0x0001, 0x0002, 0x000E, 0x0079, 0x03C2, 0x0F0D, 0x1E19, 0x3C30, 0x0000, 0x3C31 },
+ { 0x4B06, 0x12C0, 0x04B1, 0x0097, 0x0024, 0x0008, 0x0002, 0x0003, 0x0000, 0x0003, 0x0005, 0x0013, 0x004A, 0x0259, 0x0961, 0x2582, 0x012D, 0x4B07 },
+ },
+ {
+ { 0x0A5A, 0x0297, 0x014A, 0x0053, 0x0028, 0x000B, 0x0003, 0x0000, 0x0002, 0x0004, 0x0015, 0x00A4, 0x052C, 0x14B7, 0x296C, 0x52DB, 0x0003, 0x52DA },
+ { 0x0193, 0x0192, 0x00C8, 0x0065, 0x0033, 0x0018, 0x0007, 0x0004, 0x0000, 0x0004, 0x0005, 0x0007, 0x0006, 0x0003, 0x0005, 0x0005, 0x000D, 0x0004 },
+ { 0x0012, 0x0013, 0x0005, 0x0003, 0x0000, 0x0003, 0x0005, 0x0004, 0x0003, 0x0003, 0x0005, 0x0005, 0x0004, 0x0004, 0x0003, 0x0005, 0x0008, 0x0004 },
+ { 0x0D66, 0x06B2, 0x01AD, 0x006A, 0x000C, 0x0005, 0x0004, 0x0000, 0x0003, 0x0002, 0x0007, 0x0034, 0x00D7, 0x0358, 0x1ACF, 0x359C, 0x001B, 0x359D },
+ }
+};
+
diff --git a/src/libffmpeg/libavcodec/imgconvert.c b/contrib/ffmpeg/libavcodec/imgconvert.c
index cc1a825fc..d5b4cdca0 100644
--- a/src/libffmpeg/libavcodec/imgconvert.c
+++ b/contrib/ffmpeg/libavcodec/imgconvert.c
@@ -2,18 +2,20 @@
* Misc image convertion routines
* Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -32,7 +34,7 @@
#include "dsputil.h"
#ifdef USE_FASTMEMCPY
-#include "fastmemcpy.h"
+#include "libvo/fastmemcpy.h"
#endif
#ifdef HAVE_MMX
@@ -183,7 +185,7 @@ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
},
[PIX_FMT_RGB555] = {
.name = "rgb555",
- .nb_channels = 4, .is_alpha = 1,
+ .nb_channels = 3,
.color_type = FF_COLOR_RGB,
.pixel_type = FF_PIXEL_PACKED,
.depth = 5,
@@ -191,6 +193,20 @@ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
},
/* gray / mono formats */
+ [PIX_FMT_GRAY16BE] = {
+ .name = "gray16be",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_GRAY,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 16,
+ },
+ [PIX_FMT_GRAY16LE] = {
+ .name = "gray16le",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_GRAY,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 16,
+ },
[PIX_FMT_GRAY8] = {
.name = "gray",
.nb_channels = 1,
@@ -235,6 +251,111 @@ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
.depth = 8,
.x_chroma_shift = 2, .y_chroma_shift = 0,
},
+ [PIX_FMT_BGR32] = {
+ .name = "bgr32",
+ .nb_channels = 4, .is_alpha = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR565] = {
+ .name = "bgr565",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 5,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR555] = {
+ .name = "bgr555",
+ .nb_channels = 3,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 5,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB8] = {
+ .name = "rgb8",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB4] = {
+ .name = "rgb4",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 4,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB4_BYTE] = {
+ .name = "rgb4_byte",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR8] = {
+ .name = "bgr8",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR4] = {
+ .name = "bgr4",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 4,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_BGR4_BYTE] = {
+ .name = "bgr4_byte",
+ .nb_channels = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_NV12] = {
+ .name = "nv12",
+ .nb_channels = 2,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 1,
+ },
+ [PIX_FMT_NV21] = {
+ .name = "nv12",
+ .nb_channels = 2,
+ .color_type = FF_COLOR_YUV,
+ .pixel_type = FF_PIXEL_PLANAR,
+ .depth = 8,
+ .x_chroma_shift = 1, .y_chroma_shift = 1,
+ },
+
+ [PIX_FMT_BGR32_1] = {
+ .name = "bgr32_1",
+ .nb_channels = 4, .is_alpha = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
+ [PIX_FMT_RGB32_1] = {
+ .name = "rgb32_1",
+ .nb_channels = 4, .is_alpha = 1,
+ .color_type = FF_COLOR_RGB,
+ .pixel_type = FF_PIXEL_PACKED,
+ .depth = 8,
+ .x_chroma_shift = 0, .y_chroma_shift = 0,
+ },
};
void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
@@ -292,6 +413,18 @@ int avpicture_fill(AVPicture *picture, uint8_t *ptr,
picture->linesize[1] = w2;
picture->linesize[2] = w2;
return size + 2 * size2;
+ case PIX_FMT_NV12:
+ case PIX_FMT_NV21:
+ w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
+ h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
+ size2 = w2 * h2 * 2;
+ picture->data[0] = ptr;
+ picture->data[1] = picture->data[0] + size;
+ picture->data[2] = NULL;
+ picture->linesize[0] = width;
+ picture->linesize[1] = w2;
+ picture->linesize[2] = 0;
+ return size + 2 * size2;
case PIX_FMT_RGB24:
case PIX_FMT_BGR24:
picture->data[0] = ptr;
@@ -300,11 +433,18 @@ int avpicture_fill(AVPicture *picture, uint8_t *ptr,
picture->linesize[0] = width * 3;
return size * 3;
case PIX_FMT_RGBA32:
+ case PIX_FMT_BGR32:
+ case PIX_FMT_RGB32_1:
+ case PIX_FMT_BGR32_1:
picture->data[0] = ptr;
picture->data[1] = NULL;
picture->data[2] = NULL;
picture->linesize[0] = width * 4;
return size * 4;
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
+ case PIX_FMT_BGR555:
+ case PIX_FMT_BGR565:
case PIX_FMT_RGB555:
case PIX_FMT_RGB565:
case PIX_FMT_YUV422:
@@ -325,12 +465,23 @@ int avpicture_fill(AVPicture *picture, uint8_t *ptr,
picture->data[2] = NULL;
picture->linesize[0] = width + width/2;
return size + size/2;
+ case PIX_FMT_RGB8:
+ case PIX_FMT_BGR8:
+ case PIX_FMT_RGB4_BYTE:
+ case PIX_FMT_BGR4_BYTE:
case PIX_FMT_GRAY8:
picture->data[0] = ptr;
picture->data[1] = NULL;
picture->data[2] = NULL;
picture->linesize[0] = width;
return size;
+ case PIX_FMT_RGB4:
+ case PIX_FMT_BGR4:
+ picture->data[0] = ptr;
+ picture->data[1] = NULL;
+ picture->data[2] = NULL;
+ picture->linesize[0] = width / 2;
+ return size / 2;
case PIX_FMT_MONOWHITE:
case PIX_FMT_MONOBLACK:
picture->data[0] = ptr;
@@ -370,6 +521,8 @@ int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
if (pix_fmt == PIX_FMT_YUV422 ||
pix_fmt == PIX_FMT_UYVY422 ||
+ pix_fmt == PIX_FMT_BGR565 ||
+ pix_fmt == PIX_FMT_BGR555 ||
pix_fmt == PIX_FMT_RGB565 ||
pix_fmt == PIX_FMT_RGB555)
w = width * 2;
@@ -484,6 +637,8 @@ static int avg_bits_per_pixel(int pix_fmt)
case PIX_FMT_UYVY422:
case PIX_FMT_RGB565:
case PIX_FMT_RGB555:
+ case PIX_FMT_BGR565:
+ case PIX_FMT_BGR555:
bits = 16;
break;
case PIX_FMT_UYVY411:
@@ -602,6 +757,8 @@ void img_copy(AVPicture *dst, const AVPicture *src,
case PIX_FMT_UYVY422:
case PIX_FMT_RGB565:
case PIX_FMT_RGB555:
+ case PIX_FMT_BGR565:
+ case PIX_FMT_BGR555:
bits = 16;
break;
case PIX_FMT_UYVY411:
@@ -1084,7 +1241,7 @@ static uint8_t c_jpeg_to_ccir[256];
static void img_convert_init(void)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
for(i = 0;i < 256; i++) {
y_ccir_to_jpeg[i] = Y_CCIR_TO_JPEG(i);
@@ -1472,19 +1629,10 @@ static inline unsigned int bitcopy_n(unsigned int a, int n)
b = bitcopy_n(v << 3, 3);\
}
-#define RGBA_IN(r, g, b, a, s)\
-{\
- unsigned int v = ((const uint16_t *)(s))[0];\
- r = bitcopy_n(v >> (10 - 3), 3);\
- g = bitcopy_n(v >> (5 - 3), 3);\
- b = bitcopy_n(v << 3, 3);\
- a = (-(v >> 15)) & 0xff;\
-}
-#define RGBA_OUT(d, r, g, b, a)\
+#define RGB_OUT(d, r, g, b)\
{\
- ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | \
- ((a << 8) & 0x8000);\
+ ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
}
#define BPP 2
@@ -1701,6 +1849,75 @@ static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
gray_to_mono(dst, src, width, height, 0x00);
}
+static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ int x, y, src_wrap, dst_wrap;
+ uint8_t *s, *d;
+ s = src->data[0];
+ src_wrap = src->linesize[0] - width;
+ d = dst->data[0];
+ dst_wrap = dst->linesize[0] - width * 2;
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ *d++ = *s;
+ *d++ = *s++;
+ }
+ s += src_wrap;
+ d += dst_wrap;
+ }
+}
+
+static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ int x, y, src_wrap, dst_wrap;
+ uint8_t *s, *d;
+ s = src->data[0];
+ src_wrap = src->linesize[0] - width * 2;
+ d = dst->data[0];
+ dst_wrap = dst->linesize[0] - width;
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ *d++ = *s;
+ s += 2;
+ }
+ s += src_wrap;
+ d += dst_wrap;
+ }
+}
+
+static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ gray16_to_gray(dst, src, width, height);
+}
+
+static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ gray16_to_gray(dst, src + 1, width, height);
+}
+
+static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
+ int width, int height)
+{
+ int x, y, src_wrap, dst_wrap;
+ uint16_t *s, *d;
+ s = src->data[0];
+ src_wrap = (src->linesize[0] - width * 2)/2;
+ d = dst->data[0];
+ dst_wrap = (dst->linesize[0] - width * 2)/2;
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ *d++ = bswap_16(*s++);
+ }
+ s += src_wrap;
+ d += dst_wrap;
+ }
+}
+
+
typedef struct ConvertEntry {
void (*convert)(AVPicture *dst,
const AVPicture *src, int width, int height);
@@ -1834,6 +2051,12 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
[PIX_FMT_RGB24] = {
.convert = rgba32_to_rgb24
},
+ [PIX_FMT_BGR24] = {
+ .convert = rgba32_to_bgr24
+ },
+ [PIX_FMT_RGB565] = {
+ .convert = rgba32_to_rgb565
+ },
[PIX_FMT_RGB555] = {
.convert = rgba32_to_rgb555
},
@@ -1848,6 +2071,9 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
},
},
[PIX_FMT_BGR24] = {
+ [PIX_FMT_RGBA32] = {
+ .convert = bgr24_to_rgba32
+ },
[PIX_FMT_RGB24] = {
.convert = bgr24_to_rgb24
},
@@ -1873,6 +2099,9 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
},
},
[PIX_FMT_RGB565] = {
+ [PIX_FMT_RGBA32] = {
+ .convert = rgb565_to_rgba32
+ },
[PIX_FMT_RGB24] = {
.convert = rgb565_to_rgb24
},
@@ -1883,6 +2112,22 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
.convert = rgb565_to_gray
},
},
+ [PIX_FMT_GRAY16BE] = {
+ [PIX_FMT_GRAY8] = {
+ .convert = gray16be_to_gray
+ },
+ [PIX_FMT_GRAY16LE] = {
+ .convert = gray16_to_gray16
+ },
+ },
+ [PIX_FMT_GRAY16LE] = {
+ [PIX_FMT_GRAY8] = {
+ .convert = gray16le_to_gray
+ },
+ [PIX_FMT_GRAY16BE] = {
+ .convert = gray16_to_gray16
+ },
+ },
[PIX_FMT_GRAY8] = {
[PIX_FMT_RGB555] = {
.convert = gray_to_rgb555
@@ -1905,6 +2150,12 @@ static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
[PIX_FMT_MONOBLACK] = {
.convert = gray_to_monoblack
},
+ [PIX_FMT_GRAY16LE] = {
+ .convert = gray_to_gray16
+ },
+ [PIX_FMT_GRAY16BE] = {
+ .convert = gray_to_gray16
+ },
},
[PIX_FMT_MONOWHITE] = {
[PIX_FMT_GRAY8] = {
@@ -2048,6 +2299,7 @@ int img_pad(AVPicture *dst, const AVPicture *src, int height, int width, int pix
return 0;
}
+#ifndef CONFIG_SWSCALER
/* XXX: always use linesize. Return -1 if not supported */
int img_convert(AVPicture *dst, int dst_pix_fmt,
const AVPicture *src, int src_pix_fmt,
@@ -2289,6 +2541,7 @@ int img_convert(AVPicture *dst, int dst_pix_fmt,
avpicture_free(tmp);
return ret;
}
+#endif
/* NOTE: we scan all the pixels to have an exact information */
static int get_alpha_info_pal8(const AVPicture *src, int width, int height)
@@ -2334,9 +2587,6 @@ int img_get_alpha_info(const AVPicture *src,
case PIX_FMT_RGBA32:
ret = get_alpha_info_rgba32(src, width, height);
break;
- case PIX_FMT_RGB555:
- ret = get_alpha_info_rgb555(src, width, height);
- break;
case PIX_FMT_PAL8:
ret = get_alpha_info_pal8(src, width, height);
break;
@@ -2403,7 +2653,7 @@ static void deinterlace_line(uint8_t *dst,
int size)
{
#ifndef HAVE_MMX
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int sum;
for(;size > 0;size--) {
@@ -2446,7 +2696,7 @@ static void deinterlace_line_inplace(uint8_t *lum_m4, uint8_t *lum_m3, uint8_t *
int size)
{
#ifndef HAVE_MMX
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int sum;
for(;size > 0;size--) {
diff --git a/src/libffmpeg/libavcodec/imgconvert_template.h b/contrib/ffmpeg/libavcodec/imgconvert_template.h
index e58b0cae2..4cc898bab 100644
--- a/src/libffmpeg/libavcodec/imgconvert_template.h
+++ b/contrib/ffmpeg/libavcodec/imgconvert_template.h
@@ -2,18 +2,20 @@
* Templates for image convertion routines
* Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +29,7 @@ static void glue(yuv420p_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
const uint8_t *y1_ptr, *y2_ptr, *cb_ptr, *cr_ptr;
uint8_t *d, *d1, *d2;
int w, y, cb, cr, r_add, g_add, b_add, width2;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
unsigned int r, g, b;
d = dst->data[0];
@@ -121,7 +123,7 @@ static void glue(yuvj420p_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
const uint8_t *y1_ptr, *y2_ptr, *cb_ptr, *cr_ptr;
uint8_t *d, *d1, *d2;
int w, y, cb, cr, r_add, g_add, b_add, width2;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
unsigned int r, g, b;
d = dst->data[0];
@@ -408,7 +410,8 @@ static void glue(pal8_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
}
}
-#if !defined(FMT_RGBA32) && defined(RGBA_OUT)
+// RGB24 has optimised routines
+#if !defined(FMT_RGBA32) && !defined(FMT_RGB24)
/* alpha support */
static void glue(rgba32_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
@@ -417,7 +420,10 @@ static void glue(rgba32_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
const uint8_t *s;
uint8_t *d;
int src_wrap, dst_wrap, j, y;
- unsigned int v, r, g, b, a;
+ unsigned int v, r, g, b;
+#ifdef RGBA_OUT
+ unsigned int a;
+#endif
s = src->data[0];
src_wrap = src->linesize[0] - width * 4;
@@ -428,11 +434,15 @@ static void glue(rgba32_to_, RGB_NAME)(AVPicture *dst, const AVPicture *src,
for(y=0;y<height;y++) {
for(j = 0;j < width; j++) {
v = ((const uint32_t *)(s))[0];
- a = (v >> 24) & 0xff;
r = (v >> 16) & 0xff;
g = (v >> 8) & 0xff;
b = v & 0xff;
+#ifdef RGBA_OUT
+ a = (v >> 24) & 0xff;
RGBA_OUT(d, r, g, b, a);
+#else
+ RGB_OUT(d, r, g, b);
+#endif
s += 4;
d += BPP;
}
@@ -447,7 +457,10 @@ static void glue(RGB_NAME, _to_rgba32)(AVPicture *dst, const AVPicture *src,
const uint8_t *s;
uint8_t *d;
int src_wrap, dst_wrap, j, y;
- unsigned int r, g, b, a;
+ unsigned int r, g, b;
+#ifdef RGBA_IN
+ unsigned int a;
+#endif
s = src->data[0];
src_wrap = src->linesize[0] - width * BPP;
@@ -457,8 +470,13 @@ static void glue(RGB_NAME, _to_rgba32)(AVPicture *dst, const AVPicture *src,
for(y=0;y<height;y++) {
for(j = 0;j < width; j++) {
+#ifdef RGBA_IN
RGBA_IN(r, g, b, a, s);
((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;
+#else
+ RGB_IN(r, g, b, s);
+ ((uint32_t *)(d))[0] = (0xff << 24) | (r << 16) | (g << 8) | b;
+#endif
d += 4;
s += BPP;
}
@@ -467,7 +485,7 @@ static void glue(RGB_NAME, _to_rgba32)(AVPicture *dst, const AVPicture *src,
}
}
-#endif /* !defined(FMT_RGBA32) && defined(RGBA_IN) */
+#endif /* !defined(FMT_RGBA32) */
#ifndef FMT_RGB24
@@ -537,7 +555,7 @@ static void yuv444p_to_rgb24(AVPicture *dst, const AVPicture *src,
const uint8_t *y1_ptr, *cb_ptr, *cr_ptr;
uint8_t *d, *d1;
int w, y, cb, cr, r_add, g_add, b_add;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
unsigned int r, g, b;
d = dst->data[0];
@@ -570,7 +588,7 @@ static void yuvj444p_to_rgb24(AVPicture *dst, const AVPicture *src,
const uint8_t *y1_ptr, *cb_ptr, *cr_ptr;
uint8_t *d, *d1;
int w, y, cb, cr, r_add, g_add, b_add;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
unsigned int r, g, b;
d = dst->data[0];
diff --git a/src/libffmpeg/libavcodec/imgresample.c b/contrib/ffmpeg/libavcodec/imgresample.c
index 8ffcd7960..ce1a05ce4 100644
--- a/src/libffmpeg/libavcodec/imgresample.c
+++ b/contrib/ffmpeg/libavcodec/imgresample.c
@@ -2,18 +2,20 @@
* High quality image resampling with polyphase filters
* Copyright (c) 2001 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +29,7 @@
#include "dsputil.h"
#ifdef USE_FASTMEMCPY
-#include "fastmemcpy.h"
+#include "libvo/fastmemcpy.h"
#endif
#define NB_COMPONENTS 3
@@ -45,6 +47,11 @@
#define LINE_BUF_HEIGHT (NB_TAPS * 4)
+struct SwsContext {
+ struct ImgReSampleContext *resampling_ctx;
+ enum PixelFormat src_pix_fmt, dst_pix_fmt;
+};
+
struct ImgReSampleContext {
int iwidth, iheight, owidth, oheight;
int topBand, bottomBand, leftBand, rightBand;
@@ -164,7 +171,7 @@ static void v_resample(uint8_t *dst, int dst_width, const uint8_t *src,
src_pos += src_incr;\
}
-#define DUMP(reg) movq_r2m(reg, tmp); printf(#reg "=%016Lx\n", tmp.uq);
+#define DUMP(reg) movq_r2m(reg, tmp); printf(#reg "=%016"PRIx64"\n", tmp.uq);
/* XXX: do four pixels at a time */
static void h_resample_fast4_mmx(uint8_t *dst, int dst_width,
@@ -674,6 +681,42 @@ void sws_freeContext(struct SwsContext *ctx)
av_free(ctx);
}
+
+/**
+ * Checks if context is valid or reallocs a new one instead.
+ * If context is NULL, just calls sws_getContext() to get a new one.
+ * Otherwise, checks if the parameters are the same already saved in context.
+ * If that is the case, returns the current context.
+ * Otherwise, frees context and gets a new one.
+ *
+ * Be warned that srcFilter, dstFilter are not checked, they are
+ * asumed to remain valid.
+ */
+struct SwsContext *sws_getCachedContext(struct SwsContext *ctx,
+ int srcW, int srcH, int srcFormat,
+ int dstW, int dstH, int dstFormat, int flags,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param)
+{
+ if (ctx != NULL) {
+ if ((ctx->resampling_ctx->iwidth != srcW) ||
+ (ctx->resampling_ctx->iheight != srcH) ||
+ (ctx->src_pix_fmt != srcFormat) ||
+ (ctx->resampling_ctx->owidth != dstW) ||
+ (ctx->resampling_ctx->oheight != dstH) ||
+ (ctx->dst_pix_fmt != dstFormat))
+ {
+ sws_freeContext(ctx);
+ ctx = NULL;
+ }
+ }
+ if (ctx == NULL) {
+ return sws_getContext(srcW, srcH, srcFormat,
+ dstW, dstH, dstFormat, flags,
+ srcFilter, dstFilter, param);
+ }
+ return ctx;
+}
+
int sws_scale(struct SwsContext *ctx, uint8_t* src[], int srcStride[],
int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[])
{
@@ -684,7 +727,7 @@ int sws_scale(struct SwsContext *ctx, uint8_t* src[], int srcStride[],
uint8_t *buf1 = NULL, *buf2 = NULL;
enum PixelFormat current_pix_fmt;
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < 4; i++) {
src_pict.data[i] = src[i];
src_pict.linesize[i] = srcStride[i];
dst_pict.data[i] = dst[i];
diff --git a/src/libffmpeg/libavcodec/indeo2.c b/contrib/ffmpeg/libavcodec/indeo2.c
index 3814e5250..f3917ff3a 100644
--- a/src/libffmpeg/libavcodec/indeo2.c
+++ b/contrib/ffmpeg/libavcodec/indeo2.c
@@ -1,19 +1,21 @@
/*
- * Indel Indeo 2 codec
+ * Intel Indeo 2 codec
* Copyright (c) 2005 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/indeo2data.h b/contrib/ffmpeg/libavcodec/indeo2data.h
index 2430b53c3..71d250af7 100644
--- a/src/libffmpeg/libavcodec/indeo2data.h
+++ b/contrib/ffmpeg/libavcodec/indeo2data.h
@@ -1,3 +1,24 @@
+/*
+ * Intel Indeo 2 codec
+ * copyright (c) 2005 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#define IR2_CODES 143
static const uint16_t ir2_codes[IR2_CODES][2] = {
#ifdef ALT_BITSTREAM_READER_LE
diff --git a/src/libffmpeg/libavcodec/indeo3.c b/contrib/ffmpeg/libavcodec/indeo3.c
index 90eb37150..33dcff820 100644
--- a/src/libffmpeg/libavcodec/indeo3.c
+++ b/contrib/ffmpeg/libavcodec/indeo3.c
@@ -2,18 +2,20 @@
* Intel Indeo 3 (IV31, IV32, etc.) video decoder for ffmpeg
* written, produced, and directed by Alan Smithee
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -381,7 +383,7 @@ static void iv_Decode_Chunk(Indeo3DecodeContext *s,
} else if(cmd == 3) {
if(strip->usl7 == 0) {
strip->usl7 = 1;
- ref_vectors = buf2 + (*buf1 * 2);
+ ref_vectors = (signed char*)buf2 + (*buf1 * 2);
buf1++;
continue;
}
diff --git a/src/libffmpeg/libavcodec/indeo3data.h b/contrib/ffmpeg/libavcodec/indeo3data.h
index 77bbc07ba..e69a09f0e 100644
--- a/src/libffmpeg/libavcodec/indeo3data.h
+++ b/contrib/ffmpeg/libavcodec/indeo3data.h
@@ -1,3 +1,23 @@
+/*
+ * Intel Indeo 3 (IV31, IV32, etc.) video decoder for ffmpeg
+ * written, produced, and directed by Alan Smithee
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
static const uint32_t correction[] = {
0x00000000, 0x00000202, 0xfffffdfe, 0x000002ff, 0xfffffd01, 0xffffff03, 0x000000fd, 0x00000404,
diff --git a/src/libffmpeg/libavcodec/interplayvideo.c b/contrib/ffmpeg/libavcodec/interplayvideo.c
index 73165e795..95059c365 100644
--- a/src/libffmpeg/libavcodec/interplayvideo.c
+++ b/contrib/ffmpeg/libavcodec/interplayvideo.c
@@ -2,18 +2,20 @@
* Interplay MVE Video Decoder
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/jfdctfst.c b/contrib/ffmpeg/libavcodec/jfdctfst.c
index 804fd5766..38424563d 100644
--- a/src/libffmpeg/libavcodec/jfdctfst.c
+++ b/contrib/ffmpeg/libavcodec/jfdctfst.c
@@ -1,9 +1,42 @@
/*
* jfdctfst.c
*
- * Copyright (C) 1994-1996, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
- * For conditions of distribution and use, see the accompanying README file.
+ *
+ * The authors make NO WARRANTY or representation, either express or implied,
+ * with respect to this software, its quality, accuracy, merchantability, or
+ * fitness for a particular purpose. This software is provided "AS IS", and
+ * you, its user, assume the entire risk as to its quality and accuracy.
+ *
+ * This software is copyright (C) 1994-1996, Thomas G. Lane.
+ * All Rights Reserved except as specified below.
+ *
+ * Permission is hereby granted to use, copy, modify, and distribute this
+ * software (or portions thereof) for any purpose, without fee, subject to
+ * these conditions:
+ * (1) If any part of the source code for this software is distributed, then
+ * this README file must be included, with this copyright and no-warranty
+ * notice unaltered; and any additions, deletions, or changes to the original
+ * files must be clearly indicated in accompanying documentation.
+ * (2) If only executable code is distributed, then the accompanying
+ * documentation must state that "this software is based in part on the work
+ * of the Independent JPEG Group".
+ * (3) Permission for use of this software is granted only if the user accepts
+ * full responsibility for any undesirable consequences; the authors accept
+ * NO LIABILITY for damages of any kind.
+ *
+ * These conditions apply to any software derived from or based on the IJG
+ * code, not just to the unmodified library. If you use our work, you ought
+ * to acknowledge us.
+ *
+ * Permission is NOT granted for the use of any IJG author's name or company
+ * name in advertising or publicity relating to this software or products
+ * derived from it. This software may be referred to only as "the Independent
+ * JPEG Group's software".
+ *
+ * We specifically permit and encourage the use of this software as the basis
+ * of commercial products, provided that all warranty or liability claims are
+ * assumed by the product vendor.
*
* This file contains a fast, not so accurate integer implementation of the
* forward DCT (Discrete Cosine Transform).
diff --git a/src/libffmpeg/libavcodec/jfdctint.c b/contrib/ffmpeg/libavcodec/jfdctint.c
index 41d274991..58f3a1446 100644
--- a/src/libffmpeg/libavcodec/jfdctint.c
+++ b/contrib/ffmpeg/libavcodec/jfdctint.c
@@ -1,9 +1,42 @@
/*
* jfdctint.c
*
- * Copyright (C) 1991-1996, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
- * For conditions of distribution and use, see the accompanying README file.
+ *
+ * The authors make NO WARRANTY or representation, either express or implied,
+ * with respect to this software, its quality, accuracy, merchantability, or
+ * fitness for a particular purpose. This software is provided "AS IS", and
+ * you, its user, assume the entire risk as to its quality and accuracy.
+ *
+ * This software is copyright (C) 1991-1996, Thomas G. Lane.
+ * All Rights Reserved except as specified below.
+ *
+ * Permission is hereby granted to use, copy, modify, and distribute this
+ * software (or portions thereof) for any purpose, without fee, subject to
+ * these conditions:
+ * (1) If any part of the source code for this software is distributed, then
+ * this README file must be included, with this copyright and no-warranty
+ * notice unaltered; and any additions, deletions, or changes to the original
+ * files must be clearly indicated in accompanying documentation.
+ * (2) If only executable code is distributed, then the accompanying
+ * documentation must state that "this software is based in part on the work
+ * of the Independent JPEG Group".
+ * (3) Permission for use of this software is granted only if the user accepts
+ * full responsibility for any undesirable consequences; the authors accept
+ * NO LIABILITY for damages of any kind.
+ *
+ * These conditions apply to any software derived from or based on the IJG
+ * code, not just to the unmodified library. If you use our work, you ought
+ * to acknowledge us.
+ *
+ * Permission is NOT granted for the use of any IJG author's name or company
+ * name in advertising or publicity relating to this software or products
+ * derived from it. This software may be referred to only as "the Independent
+ * JPEG Group's software".
+ *
+ * We specifically permit and encourage the use of this software as the basis
+ * of commercial products, provided that all warranty or liability claims are
+ * assumed by the product vendor.
*
* This file contains a slow-but-accurate integer implementation of the
* forward DCT (Discrete Cosine Transform).
diff --git a/src/libffmpeg/libavcodec/jpeg_ls.c b/contrib/ffmpeg/libavcodec/jpeg_ls.c
index 862a3b422..1b4df2b1a 100644
--- a/src/libffmpeg/libavcodec/jpeg_ls.c
+++ b/contrib/ffmpeg/libavcodec/jpeg_ls.c
@@ -3,18 +3,20 @@
* Copyright (c) 2003 Michael Niedermayer
* Copyright (c) 2006 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -75,9 +77,7 @@ static void ls_init_state(JLSState *state){
state->limit = (4 * state->bpp) - state->qbpp;
for(i = 0; i < 367; i++) {
- state->A[i] = (state->range + 32) >> 6;
- if(state->A[i] < 2)
- state->A[i] = 2;
+ state->A[i] = FFMAX((state->range + 32) >> 6, 2);
state->N[i] = 1;
}
@@ -187,6 +187,34 @@ static int decode_lse(MJpegDecodeContext *s)
return 0;
}
+static void inline downscale_state(JLSState *state, int Q){
+ if(state->N[Q] == state->reset){
+ state->A[Q] >>=1;
+ state->B[Q] >>=1;
+ state->N[Q] >>=1;
+ }
+ state->N[Q]++;
+}
+
+static inline int update_state_regular(JLSState *state, int Q, int err){
+ state->A[Q] += FFABS(err);
+ err *= state->twonear;
+ state->B[Q] += err;
+
+ downscale_state(state, Q);
+
+ if(state->B[Q] <= -state->N[Q]) {
+ state->B[Q]= FFMAX(state->B[Q] + state->N[Q], 1-state->N[Q]);
+ if(state->C[Q] > -128)
+ state->C[Q]--;
+ }else if(state->B[Q] > 0){
+ state->B[Q]= FFMIN(state->B[Q] - state->N[Q], 0);
+ if(state->C[Q] < 127)
+ state->C[Q]++;
+ }
+
+ return err;
+}
/**
* Get context-dependent Golomb code, decode it and update context
@@ -211,30 +239,7 @@ static inline int ls_get_code_regular(GetBitContext *gb, JLSState *state, int Q)
if(!state->near && !k && (2 * state->B[Q] <= -state->N[Q]))
ret = -(ret + 1);
- state->A[Q] += ABS(ret);
- ret *= state->twonear;
- state->B[Q] += ret;
-
- if(state->N[Q] == state->reset) {
- state->A[Q] >>= 1;
- state->B[Q] >>= 1;
- state->N[Q] >>= 1;
- }
- state->N[Q]++;
-
- if(state->B[Q] <= -state->N[Q]) {
- state->B[Q] += state->N[Q];
- if(state->C[Q] > -128)
- state->C[Q]--;
- if(state->B[Q] <= -state->N[Q])
- state->B[Q] = -state->N[Q] + 1;
- }else if(state->B[Q] > 0){
- state->B[Q] -= state->N[Q];
- if(state->C[Q] < 127)
- state->C[Q]++;
- if(state->B[Q] > 0)
- state->B[Q] = 0;
- }
+ ret= update_state_regular(state, Q, ret);
return ret;
}
@@ -246,10 +251,9 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state, int RI
int k, ret, temp, map;
int Q = 365 + RItype;
- if(!RItype)
- temp = state->A[Q];
- else
- temp = state->A[Q] + (state->N[Q] >> 1);
+ temp= state->A[Q];
+ if(RItype)
+ temp += state->N[Q] >> 1;
for(k = 0; (state->N[Q] << k) < temp; k++);
@@ -272,22 +276,19 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state, int RI
}
/* update state */
- state->A[Q] += ABS(ret) - RItype;
+ state->A[Q] += FFABS(ret) - RItype;
ret *= state->twonear;
- if(state->N[Q] == state->reset){
- state->A[Q] >>=1;
- state->B[Q] >>=1;
- state->N[Q] >>=1;
- }
- state->N[Q]++;
+ downscale_state(state, Q);
return ret;
}
+#define R(a, i ) (bits == 8 ? ((uint8_t*)(a))[i] : ((uint16_t*)(a))[i] )
+#define W(a, i, v) (bits == 8 ? (((uint8_t*)(a))[i]=v) : (((uint16_t*)(a))[i]=v))
/**
* Decode one line of image
*/
-static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, uint8_t *last, uint8_t *dst, int last2, int w, int stride, int comp){
+static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, void *last, void *dst, int last2, int w, int stride, int comp, int bits){
int i, x = 0;
int Ra, Rb, Rc, Rd;
int D0, D1, D2;
@@ -296,15 +297,15 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, uint8_
int err, pred;
/* compute gradients */
- Ra = x ? dst[x - stride] : last[x];
- Rb = last[x];
- Rc = x ? last[x - stride] : last2;
- Rd = (x >= w - stride) ? last[x] : last[x + stride];
+ Ra = x ? R(dst, x - stride) : R(last, x);
+ Rb = R(last, x);
+ Rc = x ? R(last, x - stride) : last2;
+ Rd = (x >= w - stride) ? R(last, x) : R(last, x + stride);
D0 = Rd - Rb;
D1 = Rb - Rc;
D2 = Rc - Ra;
/* run mode */
- if((ABS(D0) <= state->near) && (ABS(D1) <= state->near) && (ABS(D2) <= state->near)) {
+ if((FFABS(D0) <= state->near) && (FFABS(D1) <= state->near) && (FFABS(D2) <= state->near)) {
int r;
int RItype;
@@ -316,7 +317,7 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, uint8_
r = (w - x) / stride;
}
for(i = 0; i < r; i++) {
- dst[x] = Ra;
+ W(dst, x, Ra);
x += stride;
}
/* if EOL reached, we stop decoding */
@@ -332,13 +333,13 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, uint8_
if(r)
r = get_bits_long(&s->gb, r);
for(i = 0; i < r; i++) {
- dst[x] = Ra;
+ W(dst, x, Ra);
x += stride;
}
/* decode run termination value */
- Rb = last[x];
- RItype = (ABS(Ra - Rb) <= state->near) ? 1 : 0;
+ Rb = R(last, x);
+ RItype = (FFABS(Ra - Rb) <= state->near) ? 1 : 0;
err = ls_get_code_runterm(&s->gb, state, RItype, log2_run[state->run_index[comp]]);
if(state->run_index[comp])
state->run_index[comp]--;
@@ -351,17 +352,6 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, uint8_
else
pred = Rb + err;
}
-
- if(state->near){
- if(pred < -state->near)
- pred += state->range * state->twonear;
- else if(pred > state->maxval + state->near)
- pred -= state->range * state->twonear;
- pred = clip(pred, 0, state->maxval);
- }
-
- dst[x] = pred;
- x += stride;
} else { /* regular mode */
int context, sign;
@@ -385,17 +375,18 @@ static inline void ls_decode_line(JLSState *state, MJpegDecodeContext *s, uint8_
/* we have to do something more for near-lossless coding */
pred += err;
- if(state->near) {
- if(pred < -state->near)
- pred += state->range * state->twonear;
- else if(pred > state->maxval + state->near)
- pred -= state->range * state->twonear;
- pred = clip(pred, 0, state->maxval);
- }
-
- dst[x] = pred;
- x += stride;
}
+ if(state->near){
+ if(pred < -state->near)
+ pred += state->range * state->twonear;
+ else if(pred > state->maxval + state->near)
+ pred -= state->range * state->twonear;
+ pred = clip(pred, 0, state->maxval);
+ }
+
+ pred &= state->maxval;
+ W(dst, x, pred);
+ x += stride;
}
}
@@ -403,7 +394,7 @@ static int ls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
int i, t = 0;
uint8_t *zero, *last, *cur;
JLSState *state;
- int off, stride, width;
+ int off = 0, stride = 1, width, shift;
zero = av_mallocz(s->picture.linesize[0]);
last = zero;
@@ -421,6 +412,11 @@ static int ls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
reset_ls_coding_parameters(state, 0);
ls_init_state(state);
+ if(s->bits <= 8)
+ shift = point_transform + (8 - s->bits);
+ else
+ shift = point_transform + (16 - s->bits);
+
// av_log(s->avctx, AV_LOG_DEBUG, "JPEG-LS params: %ix%i NEAR=%i MV=%i T(%i,%i,%i) RESET=%i, LIMIT=%i, qbpp=%i, RANGE=%i\n",s->width,s->height,state->near,state->maxval,state->T1,state->T2,state->T3,state->reset,state->limit,state->qbpp, state->range);
// av_log(s->avctx, AV_LOG_DEBUG, "JPEG params: ILV=%i Pt=%i BPP=%i, scan = %i\n", ilv, point_transform, s->bits, s->cur_scan);
if(ilv == 0) { /* separate planes */
@@ -429,8 +425,13 @@ static int ls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
width = s->width * stride;
cur += off;
for(i = 0; i < s->height; i++) {
- ls_decode_line(state, s, last, cur, t, width, stride, off);
- t = last[0];
+ if(s->bits <= 8){
+ ls_decode_line(state, s, last, cur, t, width, stride, off, 8);
+ t = last[0];
+ }else{
+ ls_decode_line(state, s, last, cur, t, width, stride, off, 16);
+ t = *((uint16_t*)last);
+ }
last = cur;
cur += s->picture.linesize[0];
@@ -446,7 +447,7 @@ static int ls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
width = s->width * 3;
for(i = 0; i < s->height; i++) {
for(j = 0; j < 3; j++) {
- ls_decode_line(state, s, last + j, cur + j, Rc[j], width, 3, j);
+ ls_decode_line(state, s, last + j, cur + j, Rc[j], width, 3, j, 8);
Rc[j] = last[j];
if (s->restart_interval && !--s->restart_count) {
@@ -464,6 +465,31 @@ static int ls_decode_picture(MJpegDecodeContext *s, int near, int point_transfor
return -1;
}
+ if(shift){ /* we need to do point transform or normalize samples */
+ int x, w;
+
+ w = s->width * s->nb_components;
+
+ if(s->bits <= 8){
+ uint8_t *src = s->picture.data[0];
+
+ for(i = 0; i < s->height; i++){
+ for(x = off; x < w; x+= stride){
+ src[x] <<= shift;
+ }
+ src += s->picture.linesize[0];
+ }
+ }else{
+ uint16_t *src = s->picture.data[0];
+
+ for(i = 0; i < s->height; i++){
+ for(x = 0; x < w; x++){
+ src[x] <<= shift;
+ }
+ src += s->picture.linesize[0]/2;
+ }
+ }
+ }
av_free(state);
av_free(zero);
@@ -489,35 +515,13 @@ static inline void ls_encode_regular(JLSState *state, PutBitContext *pb, int Q,
err += state->range;
if(err >= ((state->range + 1) >> 1)) {
err -= state->range;
- val = 2 * ABS(err) - 1 - map;
+ val = 2 * FFABS(err) - 1 - map;
} else
val = 2 * err + map;
set_ur_golomb_jpegls(pb, val, k, state->limit, state->qbpp);
- state->A[Q] += ABS(err);
- state->B[Q] += err * state->twonear;
-
- if(state->N[Q] == state->reset) {
- state->A[Q] >>= 1;
- state->B[Q] >>= 1;
- state->N[Q] >>= 1;
- }
- state->N[Q]++;
-
- if(state->B[Q] <= -state->N[Q]) {
- state->B[Q] += state->N[Q];
- if(state->C[Q] > -128)
- state->C[Q]--;
- if(state->B[Q] <= -state->N[Q])
- state->B[Q] = -state->N[Q] + 1;
- }else if(state->B[Q] > 0){
- state->B[Q] -= state->N[Q];
- if(state->C[Q] < 127)
- state->C[Q]++;
- if(state->B[Q] > 0)
- state->B[Q] = 0;
- }
+ update_state_regular(state, Q, err);
}
/**
@@ -547,12 +551,7 @@ static inline void ls_encode_runterm(JLSState *state, PutBitContext *pb, int RIt
state->B[Q]++;
state->A[Q] += (val + 1 - RItype) >> 1;
- if(state->N[Q] == state->reset) {
- state->A[Q] >>= 1;
- state->B[Q] >>= 1;
- state->N[Q] >>= 1;
- }
- state->N[Q]++;
+ downscale_state(state, Q);
}
/**
@@ -578,7 +577,7 @@ static inline void ls_encode_run(JLSState *state, PutBitContext *pb, int run, in
/**
* Encode one line of image
*/
-static inline void ls_encode_line(JLSState *state, PutBitContext *pb, uint8_t *last, uint8_t *cur, int last2, int w, int stride, int comp){
+static inline void ls_encode_line(JLSState *state, PutBitContext *pb, void *last, void *cur, int last2, int w, int stride, int comp, int bits){
int x = 0;
int Ra, Rb, Rc, Rd;
int D0, D1, D2;
@@ -587,32 +586,32 @@ static inline void ls_encode_line(JLSState *state, PutBitContext *pb, uint8_t *l
int err, pred, sign;
/* compute gradients */
- Ra = x ? cur[x - stride] : last[x];
- Rb = last[x];
- Rc = x ? last[x - stride] : last2;
- Rd = (x >= w - stride) ? last[x] : last[x + stride];
+ Ra = x ? R(cur, x - stride) : R(last, x);
+ Rb = R(last, x);
+ Rc = x ? R(last, x - stride) : last2;
+ Rd = (x >= w - stride) ? R(last, x) : R(last, x + stride);
D0 = Rd - Rb;
D1 = Rb - Rc;
D2 = Rc - Ra;
/* run mode */
- if((ABS(D0) <= state->near) && (ABS(D1) <= state->near) && (ABS(D2) <= state->near)) {
+ if((FFABS(D0) <= state->near) && (FFABS(D1) <= state->near) && (FFABS(D2) <= state->near)) {
int RUNval, RItype, run;
run = 0;
RUNval = Ra;
- while(x < w && (ABS(cur[x] - RUNval) <= state->near)){
+ while(x < w && (FFABS(R(cur, x) - RUNval) <= state->near)){
run++;
- cur[x] = Ra;
+ W(cur, x, Ra);
x += stride;
}
ls_encode_run(state, pb, run, comp, x < w);
if(x >= w)
return;
- Rb = last[x];
- RItype = (ABS(Ra - Rb) <= state->near);
+ Rb = R(last, x);
+ RItype = (FFABS(Ra - Rb) <= state->near);
pred = RItype ? Ra : Rb;
- err = cur[x] - pred;
+ err = R(cur, x) - pred;
if(!RItype && Ra > Rb)
err = -err;
@@ -627,7 +626,7 @@ static inline void ls_encode_line(JLSState *state, PutBitContext *pb, uint8_t *l
Ra = clip(pred + err * state->twonear, 0, state->maxval);
else
Ra = clip(pred - err * state->twonear, 0, state->maxval);
- cur[x] = Ra;
+ W(cur, x, Ra);
}
if(err < 0)
err += state->range;
@@ -638,7 +637,6 @@ static inline void ls_encode_line(JLSState *state, PutBitContext *pb, uint8_t *l
if(state->run_index[comp] > 0)
state->run_index[comp]--;
- x += stride;
} else { /* regular mode */
int context;
@@ -649,11 +647,11 @@ static inline void ls_encode_line(JLSState *state, PutBitContext *pb, uint8_t *l
context = -context;
sign = 1;
pred = clip(pred - state->C[context], 0, state->maxval);
- err = pred - cur[x];
+ err = pred - R(cur, x);
}else{
sign = 0;
pred = clip(pred + state->C[context], 0, state->maxval);
- err = cur[x] - pred;
+ err = R(cur, x) - pred;
}
if(state->near){
@@ -665,12 +663,12 @@ static inline void ls_encode_line(JLSState *state, PutBitContext *pb, uint8_t *l
Ra = clip(pred + err * state->twonear, 0, state->maxval);
else
Ra = clip(pred - err * state->twonear, 0, state->maxval);
- cur[x] = Ra;
+ W(cur, x, Ra);
}
ls_encode_regular(state, pb, context, err);
- x += stride;
}
+ x += stride;
}
}
@@ -678,7 +676,7 @@ static void ls_store_lse(JLSState *state, PutBitContext *pb){
/* Test if we have default params and don't need to store LSE */
JLSState state2;
memset(&state2, 0, sizeof(JLSState));
- state2.bpp = 8;
+ state2.bpp = state->bpp;
state2.near = state->near;
reset_ls_coding_parameters(&state2, 1);
if(state->T1 == state2.T1 && state->T2 == state2.T2 && state->T3 == state2.T3 && state->reset == state2.reset)
@@ -715,13 +713,16 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_
p->pict_type= FF_I_TYPE;
p->key_frame= 1;
- comps = (avctx->pix_fmt == PIX_FMT_GRAY8) ? 1 : 3;
+ if(avctx->pix_fmt == PIX_FMT_GRAY8 || avctx->pix_fmt == PIX_FMT_GRAY16)
+ comps = 1;
+ else
+ comps = 3;
/* write our own JPEG header, can't use mjpeg_picture_header */
put_marker(&pb, SOI);
put_marker(&pb, SOF48);
put_bits(&pb, 16, 8 + comps * 3); // header size depends on components
- put_bits(&pb, 8, 8); // bpp
+ put_bits(&pb, 8, (avctx->pix_fmt == PIX_FMT_GRAY16) ? 16 : 8); // bpp
put_bits(&pb, 16, avctx->height);
put_bits(&pb, 16, avctx->width);
put_bits(&pb, 8, comps); // components
@@ -745,7 +746,7 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_
state = av_mallocz(sizeof(JLSState));
/* initialize JPEG-LS state from JPEG parameters */
state->near = near;
- state->bpp = 8;
+ state->bpp = (avctx->pix_fmt == PIX_FMT_GRAY16) ? 16 : 8;
reset_ls_coding_parameters(state, 0);
ls_init_state(state);
@@ -758,11 +759,20 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_
int t = 0;
for(i = 0; i < avctx->height; i++) {
- ls_encode_line(state, &pb2, last, cur, t, avctx->width, 1, 0);
+ ls_encode_line(state, &pb2, last, cur, t, avctx->width, 1, 0, 8);
t = last[0];
last = cur;
cur += p->linesize[0];
}
+ }else if(avctx->pix_fmt == PIX_FMT_GRAY16){
+ int t = 0;
+
+ for(i = 0; i < avctx->height; i++) {
+ ls_encode_line(state, &pb2, last, cur, t, avctx->width, 1, 0, 16);
+ t = *((uint16_t*)last);
+ last = cur;
+ cur += p->linesize[0];
+ }
}else if(avctx->pix_fmt == PIX_FMT_RGB24){
int j, width;
int Rc[3] = {0, 0, 0};
@@ -770,7 +780,7 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_
width = avctx->width * 3;
for(i = 0; i < avctx->height; i++) {
for(j = 0; j < 3; j++) {
- ls_encode_line(state, &pb2, last + j, cur + j, Rc[j], width, 3, j);
+ ls_encode_line(state, &pb2, last + j, cur + j, Rc[j], width, 3, j, 8);
Rc[j] = last[j];
}
last = cur;
@@ -783,7 +793,7 @@ static int encode_picture_ls(AVCodecContext *avctx, unsigned char *buf, int buf_
width = avctx->width * 3;
for(i = 0; i < avctx->height; i++) {
for(j = 2; j >= 0; j--) {
- ls_encode_line(state, &pb2, last + j, cur + j, Rc[j], width, 3, j);
+ ls_encode_line(state, &pb2, last + j, cur + j, Rc[j], width, 3, j, 8);
Rc[j] = last[j];
}
last = cur;
@@ -825,7 +835,7 @@ static int encode_init_ls(AVCodecContext *ctx) {
c->avctx = ctx;
ctx->coded_frame = &c->picture;
- if(ctx->pix_fmt != PIX_FMT_GRAY8 && ctx->pix_fmt != PIX_FMT_RGB24 && ctx->pix_fmt != PIX_FMT_BGR24){
+ if(ctx->pix_fmt != PIX_FMT_GRAY8 && ctx->pix_fmt != PIX_FMT_GRAY16 && ctx->pix_fmt != PIX_FMT_RGB24 && ctx->pix_fmt != PIX_FMT_BGR24){
av_log(ctx, AV_LOG_ERROR, "Only grayscale and RGB24/BGR24 images are supported\n");
return -1;
}
@@ -840,6 +850,6 @@ AVCodec jpegls_encoder = { //FIXME avoid MPV_* lossless jpeg shouldnt need them
encode_init_ls,
encode_picture_ls,
NULL,
- .pix_fmts= (enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, -1},
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_BGR24, PIX_FMT_RGB24, PIX_FMT_GRAY8, PIX_FMT_GRAY16, -1},
};
#endif
diff --git a/src/libffmpeg/libavcodec/jrevdct.c b/contrib/ffmpeg/libavcodec/jrevdct.c
index dc2ffaff7..f055cc1ac 100644
--- a/src/libffmpeg/libavcodec/jrevdct.c
+++ b/contrib/ffmpeg/libavcodec/jrevdct.c
@@ -1,9 +1,42 @@
/*
* jrevdct.c
*
- * Copyright (C) 1991, 1992, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
- * For conditions of distribution and use, see the accompanying README file.
+ *
+ * The authors make NO WARRANTY or representation, either express or implied,
+ * with respect to this software, its quality, accuracy, merchantability, or
+ * fitness for a particular purpose. This software is provided "AS IS", and
+ * you, its user, assume the entire risk as to its quality and accuracy.
+ *
+ * This software is copyright (C) 1991, 1992, Thomas G. Lane.
+ * All Rights Reserved except as specified below.
+ *
+ * Permission is hereby granted to use, copy, modify, and distribute this
+ * software (or portions thereof) for any purpose, without fee, subject to
+ * these conditions:
+ * (1) If any part of the source code for this software is distributed, then
+ * this README file must be included, with this copyright and no-warranty
+ * notice unaltered; and any additions, deletions, or changes to the original
+ * files must be clearly indicated in accompanying documentation.
+ * (2) If only executable code is distributed, then the accompanying
+ * documentation must state that "this software is based in part on the work
+ * of the Independent JPEG Group".
+ * (3) Permission for use of this software is granted only if the user accepts
+ * full responsibility for any undesirable consequences; the authors accept
+ * NO LIABILITY for damages of any kind.
+ *
+ * These conditions apply to any software derived from or based on the IJG
+ * code, not just to the unmodified library. If you use our work, you ought
+ * to acknowledge us.
+ *
+ * Permission is NOT granted for the use of any IJG author's name or company
+ * name in advertising or publicity relating to this software or products
+ * derived from it. This software may be referred to only as "the Independent
+ * JPEG Group's software".
+ *
+ * We specifically permit and encourage the use of this software as the basis
+ * of commercial products, provided that all warranty or liability claims are
+ * assumed by the product vendor.
*
* This file contains the basic inverse-DCT transformation subroutine.
*
diff --git a/src/libffmpeg/libavcodec/kmvc.c b/contrib/ffmpeg/libavcodec/kmvc.c
index 036efa559..e8f39fca1 100644
--- a/src/libffmpeg/libavcodec/kmvc.c
+++ b/contrib/ffmpeg/libavcodec/kmvc.c
@@ -2,18 +2,20 @@
* KMVC decoder
* Copyright (c) 2006 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -395,10 +397,8 @@ static int decode_end(AVCodecContext * avctx)
{
KmvcContext *const c = (KmvcContext *) avctx->priv_data;
- if (c->frm0)
- av_free(c->frm0);
- if (c->frm1)
- av_free(c->frm1);
+ av_freep(&c->frm0);
+ av_freep(&c->frm1);
if (c->pic.data[0])
avctx->release_buffer(avctx, &c->pic);
diff --git a/src/libffmpeg/libavcodec/lcl.c b/contrib/ffmpeg/libavcodec/lcl.c
index 0bc118af2..b02ea1543 100644
--- a/src/libffmpeg/libavcodec/lcl.c
+++ b/contrib/ffmpeg/libavcodec/lcl.c
@@ -2,18 +2,20 @@
* LCL (LossLess Codec Library) Codec
* Copyright (c) 2002-2004 Roberto Togni
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -190,7 +192,7 @@ static unsigned int mszh_decomp(unsigned char * srcptr, int srclen, unsigned cha
-
+#ifdef CONFIG_DECODERS
/*
*
* Decode a frame
@@ -544,9 +546,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8
/* always report that the buffer was completely consumed */
return buf_size;
}
+#endif
-
-
+#ifdef CONFIG_ENCODERS
/*
*
* Encode a frame
@@ -605,9 +607,9 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
return c->zstream.total_out;
#endif
}
+#endif /* CONFIG_ENCODERS */
-
-
+#ifdef CONFIG_DECODERS
/*
*
* Init lcl decoder
@@ -769,9 +771,9 @@ static int decode_init(AVCodecContext *avctx)
return 0;
}
+#endif /* CONFIG_DECODERS */
-
-
+#ifdef CONFIG_ENCODERS
/*
*
* Init lcl encoder
@@ -839,11 +841,11 @@ static int encode_init(AVCodecContext *avctx)
return 0;
#endif
}
+#endif /* CONFIG_ENCODERS */
-
-
+#ifdef CONFIG_DECODERS
/*
*
* Uninit lcl decoder
@@ -861,9 +863,9 @@ static int decode_end(AVCodecContext *avctx)
return 0;
}
+#endif
-
-
+#ifdef CONFIG_ENCODERS
/*
*
* Uninit lcl encoder
@@ -881,7 +883,9 @@ static int encode_end(AVCodecContext *avctx)
return 0;
}
+#endif
+#ifdef CONFIG_MSZH_DECODER
AVCodec mszh_decoder = {
"mszh",
CODEC_TYPE_VIDEO,
@@ -893,8 +897,9 @@ AVCodec mszh_decoder = {
decode_frame,
CODEC_CAP_DR1,
};
+#endif
-
+#ifdef CONFIG_ZLIB_DECODER
AVCodec zlib_decoder = {
"zlib",
CODEC_TYPE_VIDEO,
@@ -906,6 +911,7 @@ AVCodec zlib_decoder = {
decode_frame,
CODEC_CAP_DR1,
};
+#endif
#ifdef CONFIG_ENCODERS
diff --git a/contrib/ffmpeg/libavcodec/liba52/a52.h b/contrib/ffmpeg/libavcodec/liba52/a52.h
new file mode 100644
index 000000000..f2ea5f836
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/a52.h
@@ -0,0 +1,73 @@
+/*
+ * a52.h
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef A52_H
+#define A52_H
+
+#include "../avcodec.h"
+
+#undef malloc
+#undef free
+#undef realloc
+
+#if defined(LIBA52_FIXED)
+typedef int32_t sample_t;
+typedef int32_t level_t;
+#elif defined(LIBA52_DOUBLE)
+typedef double sample_t;
+typedef double level_t;
+#else
+typedef float sample_t;
+typedef float level_t;
+#endif
+
+typedef struct a52_state_s a52_state_t;
+
+#define A52_CHANNEL 0
+#define A52_MONO 1
+#define A52_STEREO 2
+#define A52_3F 3
+#define A52_2F1R 4
+#define A52_3F1R 5
+#define A52_2F2R 6
+#define A52_3F2R 7
+#define A52_CHANNEL1 8
+#define A52_CHANNEL2 9
+#define A52_DOLBY 10
+#define A52_CHANNEL_MASK 15
+
+#define A52_LFE 16
+#define A52_ADJUST_LEVEL 32
+
+a52_state_t * a52_init (uint32_t mm_accel);
+sample_t * a52_samples (a52_state_t * state);
+int a52_syncinfo (uint8_t * buf, int * flags,
+ int * sample_rate, int * bit_rate);
+int a52_frame (a52_state_t * state, uint8_t * buf, int * flags,
+ level_t * level, sample_t bias);
+void a52_dynrng (a52_state_t * state,
+ level_t (* call) (level_t, void *), void * data);
+int a52_block (a52_state_t * state);
+void a52_free (a52_state_t * state);
+
+#endif /* A52_H */
diff --git a/contrib/ffmpeg/libavcodec/liba52/a52_internal.h b/contrib/ffmpeg/libavcodec/liba52/a52_internal.h
new file mode 100644
index 000000000..49fd4ef99
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/a52_internal.h
@@ -0,0 +1,162 @@
+/*
+ * a52_internal.h
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct {
+ uint8_t bai; /* fine SNR offset, fast gain */
+ uint8_t deltbae; /* delta bit allocation exists */
+ int8_t deltba[50]; /* per-band delta bit allocation */
+} ba_t;
+
+typedef struct {
+ uint8_t exp[256]; /* decoded channel exponents */
+ int8_t bap[256]; /* derived channel bit allocation */
+} expbap_t;
+
+struct a52_state_s {
+ uint8_t fscod; /* sample rate */
+ uint8_t halfrate; /* halfrate factor */
+ uint8_t acmod; /* coded channels */
+ uint8_t lfeon; /* coded lfe channel */
+ level_t clev; /* centre channel mix level */
+ level_t slev; /* surround channels mix level */
+
+ int output; /* type of output */
+ level_t level; /* output level */
+ sample_t bias; /* output bias */
+
+ int dynrnge; /* apply dynamic range */
+ level_t dynrng; /* dynamic range */
+ void * dynrngdata; /* dynamic range callback funtion and data */
+ level_t (* dynrngcall) (level_t range, void * dynrngdata);
+
+ uint8_t chincpl; /* channel coupled */
+ uint8_t phsflginu; /* phase flags in use (stereo only) */
+ uint8_t cplstrtmant; /* coupling channel start mantissa */
+ uint8_t cplendmant; /* coupling channel end mantissa */
+ uint32_t cplbndstrc; /* coupling band structure */
+ level_t cplco[5][18]; /* coupling coordinates */
+
+ /* derived information */
+ uint8_t cplstrtbnd; /* coupling start band (for bit allocation) */
+ uint8_t ncplbnd; /* number of coupling bands */
+
+ uint8_t rematflg; /* stereo rematrixing */
+
+ uint8_t endmant[5]; /* channel end mantissa */
+
+ uint16_t bai; /* bit allocation information */
+
+ uint32_t * buffer_start;
+ uint16_t lfsr_state; /* dither state */
+ uint32_t bits_left;
+ uint32_t current_word;
+
+ uint8_t csnroffst; /* coarse SNR offset */
+ ba_t cplba; /* coupling bit allocation parameters */
+ ba_t ba[5]; /* channel bit allocation parameters */
+ ba_t lfeba; /* lfe bit allocation parameters */
+
+ uint8_t cplfleak; /* coupling fast leak init */
+ uint8_t cplsleak; /* coupling slow leak init */
+
+ expbap_t cpl_expbap;
+ expbap_t fbw_expbap[5];
+ expbap_t lfe_expbap;
+
+ sample_t * samples;
+ int downmixed;
+};
+
+#define LEVEL_PLUS6DB 2.0
+#define LEVEL_PLUS3DB 1.4142135623730951
+#define LEVEL_3DB 0.7071067811865476
+#define LEVEL_45DB 0.5946035575013605
+#define LEVEL_6DB 0.5
+
+#define EXP_REUSE (0)
+#define EXP_D15 (1)
+#define EXP_D25 (2)
+#define EXP_D45 (3)
+
+#define DELTA_BIT_REUSE (0)
+#define DELTA_BIT_NEW (1)
+#define DELTA_BIT_NONE (2)
+#define DELTA_BIT_RESERVED (3)
+
+void a52_bit_allocate (a52_state_t * state, ba_t * ba, int bndstart,
+ int start, int end, int fastleak, int slowleak,
+ expbap_t * expbap);
+
+int a52_downmix_init (int input, int flags, level_t * level,
+ level_t clev, level_t slev);
+int a52_downmix_coeff (level_t * coeff, int acmod, int output, level_t level,
+ level_t clev, level_t slev);
+void a52_downmix (sample_t * samples, int acmod, int output, sample_t bias,
+ level_t clev, level_t slev);
+void a52_upmix (sample_t * samples, int acmod, int output);
+
+void a52_imdct_init (uint32_t mm_accel);
+void a52_imdct_256 (sample_t * data, sample_t * delay, sample_t bias);
+void a52_imdct_512 (sample_t * data, sample_t * delay, sample_t bias);
+//extern void (* a52_imdct_256) (sample_t data[], sample_t delay[], sample_t bias);
+//extern void (* a52_imdct_512) (sample_t data[], sample_t delay[], sample_t bias);
+
+#define ROUND(x) ((int)((x) + ((x) > 0 ? 0.5 : -0.5)))
+
+#ifndef LIBA52_FIXED
+
+typedef sample_t quantizer_t;
+#define SAMPLE(x) (x)
+#define LEVEL(x) (x)
+#define MUL(a,b) ((a) * (b))
+#define MUL_L(a,b) ((a) * (b))
+#define MUL_C(a,b) ((a) * (b))
+#define DIV(a,b) ((a) / (b))
+#define BIAS(x) ((x) + bias)
+
+#else /* LIBA52_FIXED */
+
+typedef int16_t quantizer_t;
+#define SAMPLE(x) (sample_t)((x) * (1 << 30))
+#define LEVEL(x) (level_t)((x) * (1 << 26))
+
+#if 0
+#define MUL(a,b) ((int)(((int64_t)(a) * (b) + (1 << 29)) >> 30))
+#define MUL_L(a,b) ((int)(((int64_t)(a) * (b) + (1 << 25)) >> 26))
+#elif 1
+#define MUL(a,b) \
+({ int32_t _ta=(a), _tb=(b), _tc; \
+ _tc=(_ta & 0xffff)*(_tb >> 16)+(_ta >> 16)*(_tb & 0xffff); (int32_t)(((_tc >> 14))+ (((_ta >> 16)*(_tb >> 16)) << 2 )); })
+#define MUL_L(a,b) \
+({ int32_t _ta=(a), _tb=(b), _tc; \
+ _tc=(_ta & 0xffff)*(_tb >> 16)+(_ta >> 16)*(_tb & 0xffff); (int32_t)((_tc >> 10) + (((_ta >> 16)*(_tb >> 16)) << 6)); })
+#else
+#define MUL(a,b) (((a) >> 15) * ((b) >> 15))
+#define MUL_L(a,b) (((a) >> 13) * ((b) >> 13))
+#endif
+
+#define MUL_C(a,b) MUL_L (a, LEVEL (b))
+#define DIV(a,b) ((((int64_t)LEVEL (a)) << 26) / (b))
+#define BIAS(x) (x)
+
+#endif
diff --git a/contrib/ffmpeg/libavcodec/liba52/a52_util.h b/contrib/ffmpeg/libavcodec/liba52/a52_util.h
new file mode 100644
index 000000000..8ef2cece9
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/a52_util.h
@@ -0,0 +1,32 @@
+/*
+ * a52_util.h
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef A52_UTIL_H
+#define A52_UTIL_H
+
+uint16_t a52_crc16_block(uint8_t *data,uint32_t num_bytes);
+
+void* a52_resample_init(uint32_t mm_accel,int flags,int chans);
+extern int (* a52_resample) (float * _f, int16_t * s16);
+
+#endif /* A52_H */
diff --git a/contrib/ffmpeg/libavcodec/liba52/bit_allocate.c b/contrib/ffmpeg/libavcodec/liba52/bit_allocate.c
new file mode 100644
index 000000000..415a08d21
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/bit_allocate.c
@@ -0,0 +1,260 @@
+/*
+ * bit_allocate.c
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "a52.h"
+#include "a52_internal.h"
+
+static int hthtab[3][50] = {
+ {0x730, 0x730, 0x7c0, 0x800, 0x820, 0x840, 0x850, 0x850, 0x860, 0x860,
+ 0x860, 0x860, 0x860, 0x870, 0x870, 0x870, 0x880, 0x880, 0x890, 0x890,
+ 0x8a0, 0x8a0, 0x8b0, 0x8b0, 0x8c0, 0x8c0, 0x8d0, 0x8e0, 0x8f0, 0x900,
+ 0x910, 0x910, 0x910, 0x910, 0x900, 0x8f0, 0x8c0, 0x870, 0x820, 0x7e0,
+ 0x7a0, 0x770, 0x760, 0x7a0, 0x7c0, 0x7c0, 0x6e0, 0x400, 0x3c0, 0x3c0},
+ {0x710, 0x710, 0x7a0, 0x7f0, 0x820, 0x830, 0x840, 0x850, 0x850, 0x860,
+ 0x860, 0x860, 0x860, 0x860, 0x870, 0x870, 0x870, 0x880, 0x880, 0x880,
+ 0x890, 0x890, 0x8a0, 0x8a0, 0x8b0, 0x8b0, 0x8c0, 0x8c0, 0x8e0, 0x8f0,
+ 0x900, 0x910, 0x910, 0x910, 0x910, 0x900, 0x8e0, 0x8b0, 0x870, 0x820,
+ 0x7e0, 0x7b0, 0x760, 0x770, 0x7a0, 0x7c0, 0x780, 0x5d0, 0x3c0, 0x3c0},
+ {0x680, 0x680, 0x750, 0x7b0, 0x7e0, 0x810, 0x820, 0x830, 0x840, 0x850,
+ 0x850, 0x850, 0x860, 0x860, 0x860, 0x860, 0x860, 0x860, 0x860, 0x860,
+ 0x870, 0x870, 0x870, 0x870, 0x880, 0x880, 0x880, 0x890, 0x8a0, 0x8b0,
+ 0x8c0, 0x8d0, 0x8e0, 0x8f0, 0x900, 0x910, 0x910, 0x910, 0x900, 0x8f0,
+ 0x8d0, 0x8b0, 0x840, 0x7f0, 0x790, 0x760, 0x7a0, 0x7c0, 0x7b0, 0x720}
+};
+
+static int8_t baptab[305] = {
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, /* 93 padding elems */
+
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 14, 14, 14, 14, 14, 14, 14,
+ 14, 12, 12, 12, 12, 11, 11, 11, 11, 10, 10, 10, 10, 9, 9, 9,
+ 9, 8, 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 5,
+ 5, 4, 4, -3, -3, 3, 3, 3, -2, -2, -1, -1, -1, -1, -1, 0,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0 /* 148 padding elems */
+};
+
+static int bndtab[30] = {21, 22, 23, 24, 25, 26, 27, 28, 31, 34,
+ 37, 40, 43, 46, 49, 55, 61, 67, 73, 79,
+ 85, 97, 109, 121, 133, 157, 181, 205, 229, 253};
+
+static int8_t latab[256] = {
+ -64, -63, -62, -61, -60, -59, -58, -57, -56, -55, -54, -53,
+ -52, -52, -51, -50, -49, -48, -47, -47, -46, -45, -44, -44,
+ -43, -42, -41, -41, -40, -39, -38, -38, -37, -36, -36, -35,
+ -35, -34, -33, -33, -32, -32, -31, -30, -30, -29, -29, -28,
+ -28, -27, -27, -26, -26, -25, -25, -24, -24, -23, -23, -22,
+ -22, -21, -21, -21, -20, -20, -19, -19, -19, -18, -18, -18,
+ -17, -17, -17, -16, -16, -16, -15, -15, -15, -14, -14, -14,
+ -13, -13, -13, -13, -12, -12, -12, -12, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -9, -9, -9, -9, -9, -8, -8,
+ -8, -8, -8, -8, -7, -7, -7, -7, -7, -7, -6, -6,
+ -6, -6, -6, -6, -6, -6, -5, -5, -5, -5, -5, -5,
+ -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4,
+ -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3,
+ -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2,
+ -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0
+};
+
+#define UPDATE_LEAK() \
+do { \
+ fastleak += fdecay; \
+ if (fastleak > psd + fgain) \
+ fastleak = psd + fgain; \
+ slowleak += sdecay; \
+ if (slowleak > psd + sgain) \
+ slowleak = psd + sgain; \
+} while (0)
+
+#define COMPUTE_MASK() \
+do { \
+ if (psd > dbknee) \
+ mask -= (psd - dbknee) >> 2; \
+ if (mask > hth [i >> halfrate]) \
+ mask = hth [i >> halfrate]; \
+ mask -= snroffset + 128 * deltba[i]; \
+ mask = (mask > 0) ? 0 : ((-mask) >> 5); \
+ mask -= floor; \
+} while (0)
+
+void a52_bit_allocate (a52_state_t * state, ba_t * ba, int bndstart,
+ int start, int end, int fastleak, int slowleak,
+ expbap_t * expbap)
+{
+ static int slowgain[4] = {0x540, 0x4d8, 0x478, 0x410};
+ static int dbpbtab[4] = {0xc00, 0x500, 0x300, 0x100};
+ static int floortab[8] = {0x910, 0x950, 0x990, 0x9d0,
+ 0xa10, 0xa90, 0xb10, 0x1400};
+
+ int i, j;
+ uint8_t * exp;
+ int8_t * bap;
+ int fdecay, fgain, sdecay, sgain, dbknee, floor, snroffset;
+ int psd, mask;
+ int8_t * deltba;
+ int * hth;
+ int halfrate;
+
+ halfrate = state->halfrate;
+ fdecay = (63 + 20 * ((state->bai >> 7) & 3)) >> halfrate; /* fdcycod */
+ fgain = 128 + 128 * (ba->bai & 7); /* fgaincod */
+ sdecay = (15 + 2 * (state->bai >> 9)) >> halfrate; /* sdcycod */
+ sgain = slowgain[(state->bai >> 5) & 3]; /* sgaincod */
+ dbknee = dbpbtab[(state->bai >> 3) & 3]; /* dbpbcod */
+ hth = hthtab[state->fscod];
+ /*
+ * if there is no delta bit allocation, make deltba point to an area
+ * known to contain zeroes. baptab+156 here.
+ */
+ deltba = (ba->deltbae == DELTA_BIT_NONE) ? baptab + 156 : ba->deltba;
+ floor = floortab[state->bai & 7]; /* floorcod */
+ snroffset = 960 - 64 * state->csnroffst - 4 * (ba->bai >> 3) + floor;
+ floor >>= 5;
+
+ exp = expbap->exp;
+ bap = expbap->bap;
+
+ i = bndstart;
+ j = start;
+ if (start == 0) { /* not the coupling channel */
+ int lowcomp;
+
+ lowcomp = 0;
+ j = end - 1;
+ do {
+ if (i < j) {
+ if (exp[i+1] == exp[i] - 2)
+ lowcomp = 384;
+ else if (lowcomp && (exp[i+1] > exp[i]))
+ lowcomp -= 64;
+ }
+ psd = 128 * exp[i];
+ mask = psd + fgain + lowcomp;
+ COMPUTE_MASK ();
+ bap[i] = (baptab+156)[mask + 4 * exp[i]];
+ i++;
+ } while ((i < 3) || ((i < 7) && (exp[i] > exp[i-1])));
+ fastleak = psd + fgain;
+ slowleak = psd + sgain;
+
+ while (i < 7) {
+ if (i < j) {
+ if (exp[i+1] == exp[i] - 2)
+ lowcomp = 384;
+ else if (lowcomp && (exp[i+1] > exp[i]))
+ lowcomp -= 64;
+ }
+ psd = 128 * exp[i];
+ UPDATE_LEAK ();
+ mask = ((fastleak + lowcomp < slowleak) ?
+ fastleak + lowcomp : slowleak);
+ COMPUTE_MASK ();
+ bap[i] = (baptab+156)[mask + 4 * exp[i]];
+ i++;
+ }
+
+ if (end == 7) /* lfe channel */
+ return;
+
+ do {
+ if (exp[i+1] == exp[i] - 2)
+ lowcomp = 320;
+ else if (lowcomp && (exp[i+1] > exp[i]))
+ lowcomp -= 64;
+ psd = 128 * exp[i];
+ UPDATE_LEAK ();
+ mask = ((fastleak + lowcomp < slowleak) ?
+ fastleak + lowcomp : slowleak);
+ COMPUTE_MASK ();
+ bap[i] = (baptab+156)[mask + 4 * exp[i]];
+ i++;
+ } while (i < 20);
+
+ while (lowcomp > 128) { /* two iterations maximum */
+ lowcomp -= 128;
+ psd = 128 * exp[i];
+ UPDATE_LEAK ();
+ mask = ((fastleak + lowcomp < slowleak) ?
+ fastleak + lowcomp : slowleak);
+ COMPUTE_MASK ();
+ bap[i] = (baptab+156)[mask + 4 * exp[i]];
+ i++;
+ }
+ j = i;
+ }
+
+ do {
+ int startband, endband;
+
+ startband = j;
+ endband = (bndtab[i-20] < end) ? bndtab[i-20] : end;
+ psd = 128 * exp[j++];
+ while (j < endband) {
+ int next, delta;
+
+ next = 128 * exp[j++];
+ delta = next - psd;
+ switch (delta >> 9) {
+ case -6: case -5: case -4: case -3: case -2:
+ psd = next;
+ break;
+ case -1:
+ psd = next + latab[(-delta) >> 1];
+ break;
+ case 0:
+ psd += latab[delta >> 1];
+ break;
+ }
+ }
+ /* minpsd = -289 */
+ UPDATE_LEAK ();
+ mask = (fastleak < slowleak) ? fastleak : slowleak;
+ COMPUTE_MASK ();
+ i++;
+ j = startband;
+ do {
+ /* max(mask+4*exp)=147=-(minpsd+fgain-deltba-snroffset)>>5+4*exp */
+ /* min(mask+4*exp)=-156=-(sgain-deltba-snroffset)>>5 */
+ bap[j] = (baptab+156)[mask + 4 * exp[j]];
+ } while (++j < endband);
+ } while (j < end);
+}
diff --git a/contrib/ffmpeg/libavcodec/liba52/bitstream.c b/contrib/ffmpeg/libavcodec/liba52/bitstream.c
new file mode 100644
index 000000000..f6b05c5e6
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/bitstream.c
@@ -0,0 +1,91 @@
+/*
+ * bitstream.c
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "a52.h"
+#include "a52_internal.h"
+#include "bitstream.h"
+
+#define BUFFER_SIZE 4096
+
+void a52_bitstream_set_ptr (a52_state_t * state, uint8_t * buf)
+{
+ int align;
+
+ align = (long)buf & 3;
+ state->buffer_start = (uint32_t *) (buf - align);
+ state->bits_left = 0;
+ state->current_word = 0;
+ bitstream_get (state, align * 8);
+}
+
+static inline void bitstream_fill_current (a52_state_t * state)
+{
+ uint32_t tmp;
+
+ tmp = *(state->buffer_start++);
+ state->current_word = swab32 (tmp);
+}
+
+/*
+ * The fast paths for _get is in the
+ * bitstream.h header file so it can be inlined.
+ *
+ * The "bottom half" of this routine is suffixed _bh
+ *
+ * -ah
+ */
+
+uint32_t a52_bitstream_get_bh (a52_state_t * state, uint32_t num_bits)
+{
+ uint32_t result;
+
+ num_bits -= state->bits_left;
+ result = ((state->current_word << (32 - state->bits_left)) >>
+ (32 - state->bits_left));
+
+ bitstream_fill_current (state);
+
+ if (num_bits != 0)
+ result = (result << num_bits) | (state->current_word >> (32 - num_bits));
+
+ state->bits_left = 32 - num_bits;
+
+ return result;
+}
+
+int32_t a52_bitstream_get_bh_2 (a52_state_t * state, uint32_t num_bits)
+{
+ int32_t result;
+
+ num_bits -= state->bits_left;
+ result = ((((int32_t)state->current_word) << (32 - state->bits_left)) >>
+ (32 - state->bits_left));
+
+ bitstream_fill_current(state);
+
+ if (num_bits != 0)
+ result = (result << num_bits) | (state->current_word >> (32 - num_bits));
+
+ state->bits_left = 32 - num_bits;
+
+ return result;
+}
diff --git a/contrib/ffmpeg/libavcodec/liba52/bitstream.h b/contrib/ffmpeg/libavcodec/liba52/bitstream.h
new file mode 100644
index 000000000..4a64bf3d9
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/bitstream.h
@@ -0,0 +1,77 @@
+/*
+ * bitstream.h
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* (stolen from the kernel) */
+#ifdef WORDS_BIGENDIAN
+
+# define swab32(x) (x)
+
+#else
+
+# if 0 && defined (__i386__)
+
+# define swab32(x) __i386_swab32(x)
+ static inline const uint32_t __i386_swab32(uint32_t x)
+ {
+ __asm__("bswap %0" : "=r" (x) : "0" (x));
+ return x;
+ }
+
+# else
+
+# define swab32(x)\
+((((uint8_t*)&x)[0] << 24) | (((uint8_t*)&x)[1] << 16) | \
+ (((uint8_t*)&x)[2] << 8) | (((uint8_t*)&x)[3]))
+
+# endif
+#endif
+
+void a52_bitstream_set_ptr (a52_state_t * state, uint8_t * buf);
+uint32_t a52_bitstream_get_bh (a52_state_t * state, uint32_t num_bits);
+int32_t a52_bitstream_get_bh_2 (a52_state_t * state, uint32_t num_bits);
+
+static inline uint32_t bitstream_get (a52_state_t * state, uint32_t num_bits)
+{
+ uint32_t result;
+
+ if (num_bits < state->bits_left) {
+ result = (state->current_word << (32 - state->bits_left)) >> (32 - num_bits);
+ state->bits_left -= num_bits;
+ return result;
+ }
+
+ return a52_bitstream_get_bh (state, num_bits);
+}
+
+static inline int32_t bitstream_get_2 (a52_state_t * state, uint32_t num_bits)
+{
+ int32_t result;
+
+ if (num_bits < state->bits_left) {
+ result = (((int32_t)state->current_word) << (32 - state->bits_left)) >> (32 - num_bits);
+ state->bits_left -= num_bits;
+ return result;
+ }
+
+ return a52_bitstream_get_bh_2 (state, num_bits);
+}
diff --git a/contrib/ffmpeg/libavcodec/liba52/crc.c b/contrib/ffmpeg/libavcodec/liba52/crc.c
new file mode 100644
index 000000000..1ec4b085f
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/crc.c
@@ -0,0 +1,73 @@
+/*
+ * crc.c
+ *
+ * Copyright (C) Aaron Holtzman - May 1999
+ *
+ * This file is part of ac3dec, a free Dolby AC-3 stream decoder.
+ *
+ * ac3dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * ac3dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Make; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <inttypes.h>
+
+static const uint16_t crc_lut[256] =
+{
+ 0x0000,0x8005,0x800f,0x000a,0x801b,0x001e,0x0014,0x8011,
+ 0x8033,0x0036,0x003c,0x8039,0x0028,0x802d,0x8027,0x0022,
+ 0x8063,0x0066,0x006c,0x8069,0x0078,0x807d,0x8077,0x0072,
+ 0x0050,0x8055,0x805f,0x005a,0x804b,0x004e,0x0044,0x8041,
+ 0x80c3,0x00c6,0x00cc,0x80c9,0x00d8,0x80dd,0x80d7,0x00d2,
+ 0x00f0,0x80f5,0x80ff,0x00fa,0x80eb,0x00ee,0x00e4,0x80e1,
+ 0x00a0,0x80a5,0x80af,0x00aa,0x80bb,0x00be,0x00b4,0x80b1,
+ 0x8093,0x0096,0x009c,0x8099,0x0088,0x808d,0x8087,0x0082,
+ 0x8183,0x0186,0x018c,0x8189,0x0198,0x819d,0x8197,0x0192,
+ 0x01b0,0x81b5,0x81bf,0x01ba,0x81ab,0x01ae,0x01a4,0x81a1,
+ 0x01e0,0x81e5,0x81ef,0x01ea,0x81fb,0x01fe,0x01f4,0x81f1,
+ 0x81d3,0x01d6,0x01dc,0x81d9,0x01c8,0x81cd,0x81c7,0x01c2,
+ 0x0140,0x8145,0x814f,0x014a,0x815b,0x015e,0x0154,0x8151,
+ 0x8173,0x0176,0x017c,0x8179,0x0168,0x816d,0x8167,0x0162,
+ 0x8123,0x0126,0x012c,0x8129,0x0138,0x813d,0x8137,0x0132,
+ 0x0110,0x8115,0x811f,0x011a,0x810b,0x010e,0x0104,0x8101,
+ 0x8303,0x0306,0x030c,0x8309,0x0318,0x831d,0x8317,0x0312,
+ 0x0330,0x8335,0x833f,0x033a,0x832b,0x032e,0x0324,0x8321,
+ 0x0360,0x8365,0x836f,0x036a,0x837b,0x037e,0x0374,0x8371,
+ 0x8353,0x0356,0x035c,0x8359,0x0348,0x834d,0x8347,0x0342,
+ 0x03c0,0x83c5,0x83cf,0x03ca,0x83db,0x03de,0x03d4,0x83d1,
+ 0x83f3,0x03f6,0x03fc,0x83f9,0x03e8,0x83ed,0x83e7,0x03e2,
+ 0x83a3,0x03a6,0x03ac,0x83a9,0x03b8,0x83bd,0x83b7,0x03b2,
+ 0x0390,0x8395,0x839f,0x039a,0x838b,0x038e,0x0384,0x8381,
+ 0x0280,0x8285,0x828f,0x028a,0x829b,0x029e,0x0294,0x8291,
+ 0x82b3,0x02b6,0x02bc,0x82b9,0x02a8,0x82ad,0x82a7,0x02a2,
+ 0x82e3,0x02e6,0x02ec,0x82e9,0x02f8,0x82fd,0x82f7,0x02f2,
+ 0x02d0,0x82d5,0x82df,0x02da,0x82cb,0x02ce,0x02c4,0x82c1,
+ 0x8243,0x0246,0x024c,0x8249,0x0258,0x825d,0x8257,0x0252,
+ 0x0270,0x8275,0x827f,0x027a,0x826b,0x026e,0x0264,0x8261,
+ 0x0220,0x8225,0x822f,0x022a,0x823b,0x023e,0x0234,0x8231,
+ 0x8213,0x0216,0x021c,0x8219,0x0208,0x820d,0x8207,0x0202
+};
+
+uint16_t a52_crc16_block(uint8_t *data,uint32_t num_bytes)
+{
+ uint32_t i;
+ uint16_t state=0;
+
+ for(i=0;i<num_bytes;i++)
+ state = crc_lut[data[i] ^ (state>>8)] ^ (state<<8);
+
+ return state;
+}
diff --git a/contrib/ffmpeg/libavcodec/liba52/downmix.c b/contrib/ffmpeg/libavcodec/liba52/downmix.c
new file mode 100644
index 000000000..7999b7db0
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/downmix.c
@@ -0,0 +1,679 @@
+/*
+ * downmix.c
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "a52.h"
+#include "a52_internal.h"
+
+#define CONVERT(acmod,output) (((output) << 3) + (acmod))
+
+int a52_downmix_init (int input, int flags, level_t * level,
+ level_t clev, level_t slev)
+{
+ static uint8_t table[11][8] = {
+ {A52_CHANNEL, A52_DOLBY, A52_STEREO, A52_STEREO,
+ A52_STEREO, A52_STEREO, A52_STEREO, A52_STEREO},
+ {A52_MONO, A52_MONO, A52_MONO, A52_MONO,
+ A52_MONO, A52_MONO, A52_MONO, A52_MONO},
+ {A52_CHANNEL, A52_DOLBY, A52_STEREO, A52_STEREO,
+ A52_STEREO, A52_STEREO, A52_STEREO, A52_STEREO},
+ {A52_CHANNEL, A52_DOLBY, A52_STEREO, A52_3F,
+ A52_STEREO, A52_3F, A52_STEREO, A52_3F},
+ {A52_CHANNEL, A52_DOLBY, A52_STEREO, A52_STEREO,
+ A52_2F1R, A52_2F1R, A52_2F1R, A52_2F1R},
+ {A52_CHANNEL, A52_DOLBY, A52_STEREO, A52_STEREO,
+ A52_2F1R, A52_3F1R, A52_2F1R, A52_3F1R},
+ {A52_CHANNEL, A52_DOLBY, A52_STEREO, A52_3F,
+ A52_2F2R, A52_2F2R, A52_2F2R, A52_2F2R},
+ {A52_CHANNEL, A52_DOLBY, A52_STEREO, A52_3F,
+ A52_2F2R, A52_3F2R, A52_2F2R, A52_3F2R},
+ {A52_CHANNEL1, A52_MONO, A52_MONO, A52_MONO,
+ A52_MONO, A52_MONO, A52_MONO, A52_MONO},
+ {A52_CHANNEL2, A52_MONO, A52_MONO, A52_MONO,
+ A52_MONO, A52_MONO, A52_MONO, A52_MONO},
+ {A52_CHANNEL, A52_DOLBY, A52_STEREO, A52_DOLBY,
+ A52_DOLBY, A52_DOLBY, A52_DOLBY, A52_DOLBY}
+ };
+ int output;
+
+ output = flags & A52_CHANNEL_MASK;
+ if (output > A52_DOLBY)
+ return -1;
+
+ output = table[output][input & 7];
+
+ if (output == A52_STEREO &&
+ (input == A52_DOLBY || (input == A52_3F && clev == LEVEL (LEVEL_3DB))))
+ output = A52_DOLBY;
+
+ if (flags & A52_ADJUST_LEVEL) {
+ level_t adjust;
+
+ switch (CONVERT (input & 7, output)) {
+
+ case CONVERT (A52_3F, A52_MONO):
+ adjust = DIV (LEVEL_3DB, LEVEL (1) + clev);
+ break;
+
+ case CONVERT (A52_STEREO, A52_MONO):
+ case CONVERT (A52_2F2R, A52_2F1R):
+ case CONVERT (A52_3F2R, A52_3F1R):
+ level_3db:
+ adjust = LEVEL (LEVEL_3DB);
+ break;
+
+ case CONVERT (A52_3F2R, A52_2F1R):
+ if (clev < LEVEL (LEVEL_PLUS3DB - 1))
+ goto level_3db;
+ /* break thru */
+ case CONVERT (A52_3F, A52_STEREO):
+ case CONVERT (A52_3F1R, A52_2F1R):
+ case CONVERT (A52_3F1R, A52_2F2R):
+ case CONVERT (A52_3F2R, A52_2F2R):
+ adjust = DIV (1, LEVEL (1) + clev);
+ break;
+
+ case CONVERT (A52_2F1R, A52_MONO):
+ adjust = DIV (LEVEL_PLUS3DB, LEVEL (2) + slev);
+ break;
+
+ case CONVERT (A52_2F1R, A52_STEREO):
+ case CONVERT (A52_3F1R, A52_3F):
+ adjust = DIV (1, LEVEL (1) + MUL_C (slev, LEVEL_3DB));
+ break;
+
+ case CONVERT (A52_3F1R, A52_MONO):
+ adjust = DIV (LEVEL_3DB, LEVEL (1) + clev + MUL_C (slev, 0.5));
+ break;
+
+ case CONVERT (A52_3F1R, A52_STEREO):
+ adjust = DIV (1, LEVEL (1) + clev + MUL_C (slev, LEVEL_3DB));
+ break;
+
+ case CONVERT (A52_2F2R, A52_MONO):
+ adjust = DIV (LEVEL_3DB, LEVEL (1) + slev);
+ break;
+
+ case CONVERT (A52_2F2R, A52_STEREO):
+ case CONVERT (A52_3F2R, A52_3F):
+ adjust = DIV (1, LEVEL (1) + slev);
+ break;
+
+ case CONVERT (A52_3F2R, A52_MONO):
+ adjust = DIV (LEVEL_3DB, LEVEL (1) + clev + slev);
+ break;
+
+ case CONVERT (A52_3F2R, A52_STEREO):
+ adjust = DIV (1, LEVEL (1) + clev + slev);
+ break;
+
+ case CONVERT (A52_MONO, A52_DOLBY):
+ adjust = LEVEL (LEVEL_PLUS3DB);
+ break;
+
+ case CONVERT (A52_3F, A52_DOLBY):
+ case CONVERT (A52_2F1R, A52_DOLBY):
+ adjust = LEVEL (1 / (1 + LEVEL_3DB));
+ break;
+
+ case CONVERT (A52_3F1R, A52_DOLBY):
+ case CONVERT (A52_2F2R, A52_DOLBY):
+ adjust = LEVEL (1 / (1 + 2 * LEVEL_3DB));
+ break;
+
+ case CONVERT (A52_3F2R, A52_DOLBY):
+ adjust = LEVEL (1 / (1 + 3 * LEVEL_3DB));
+ break;
+
+ default:
+ return output;
+ }
+
+ *level = MUL_L (*level, adjust);
+ }
+
+ return output;
+}
+
+int a52_downmix_coeff (level_t * coeff, int acmod, int output, level_t level,
+ level_t clev, level_t slev)
+{
+ level_t level_3db;
+
+ level_3db = MUL_C (level, LEVEL_3DB);
+
+ switch (CONVERT (acmod, output & A52_CHANNEL_MASK)) {
+
+ case CONVERT (A52_CHANNEL, A52_CHANNEL):
+ case CONVERT (A52_MONO, A52_MONO):
+ case CONVERT (A52_STEREO, A52_STEREO):
+ case CONVERT (A52_3F, A52_3F):
+ case CONVERT (A52_2F1R, A52_2F1R):
+ case CONVERT (A52_3F1R, A52_3F1R):
+ case CONVERT (A52_2F2R, A52_2F2R):
+ case CONVERT (A52_3F2R, A52_3F2R):
+ case CONVERT (A52_STEREO, A52_DOLBY):
+ coeff[0] = coeff[1] = coeff[2] = coeff[3] = coeff[4] = level;
+ return 0;
+
+ case CONVERT (A52_CHANNEL, A52_MONO):
+ coeff[0] = coeff[1] = MUL_C (level, LEVEL_6DB);
+ return 3;
+
+ case CONVERT (A52_STEREO, A52_MONO):
+ coeff[0] = coeff[1] = level_3db;
+ return 3;
+
+ case CONVERT (A52_3F, A52_MONO):
+ coeff[0] = coeff[2] = level_3db;
+ coeff[1] = MUL_C (MUL_L (level_3db, clev), LEVEL_PLUS6DB);
+ return 7;
+
+ case CONVERT (A52_2F1R, A52_MONO):
+ coeff[0] = coeff[1] = level_3db;
+ coeff[2] = MUL_L (level_3db, slev);
+ return 7;
+
+ case CONVERT (A52_2F2R, A52_MONO):
+ coeff[0] = coeff[1] = level_3db;
+ coeff[2] = coeff[3] = MUL_L (level_3db, slev);
+ return 15;
+
+ case CONVERT (A52_3F1R, A52_MONO):
+ coeff[0] = coeff[2] = level_3db;
+ coeff[1] = MUL_C (MUL_L (level_3db, clev), LEVEL_PLUS6DB);
+ coeff[3] = MUL_L (level_3db, slev);
+ return 15;
+
+ case CONVERT (A52_3F2R, A52_MONO):
+ coeff[0] = coeff[2] = level_3db;
+ coeff[1] = MUL_C (MUL_L (level_3db, clev), LEVEL_PLUS6DB);
+ coeff[3] = coeff[4] = MUL_L (level_3db, slev);
+ return 31;
+
+ case CONVERT (A52_MONO, A52_DOLBY):
+ coeff[0] = level_3db;
+ return 0;
+
+ case CONVERT (A52_3F, A52_DOLBY):
+ coeff[0] = coeff[2] = coeff[3] = coeff[4] = level;
+ coeff[1] = level_3db;
+ return 7;
+
+ case CONVERT (A52_3F, A52_STEREO):
+ case CONVERT (A52_3F1R, A52_2F1R):
+ case CONVERT (A52_3F2R, A52_2F2R):
+ coeff[0] = coeff[2] = coeff[3] = coeff[4] = level;
+ coeff[1] = MUL_L (level, clev);
+ return 7;
+
+ case CONVERT (A52_2F1R, A52_DOLBY):
+ coeff[0] = coeff[1] = level;
+ coeff[2] = level_3db;
+ return 7;
+
+ case CONVERT (A52_2F1R, A52_STEREO):
+ coeff[0] = coeff[1] = level;
+ coeff[2] = MUL_L (level_3db, slev);
+ return 7;
+
+ case CONVERT (A52_3F1R, A52_DOLBY):
+ coeff[0] = coeff[2] = level;
+ coeff[1] = coeff[3] = level_3db;
+ return 15;
+
+ case CONVERT (A52_3F1R, A52_STEREO):
+ coeff[0] = coeff[2] = level;
+ coeff[1] = MUL_L (level, clev);
+ coeff[3] = MUL_L (level_3db, slev);
+ return 15;
+
+ case CONVERT (A52_2F2R, A52_DOLBY):
+ coeff[0] = coeff[1] = level;
+ coeff[2] = coeff[3] = level_3db;
+ return 15;
+
+ case CONVERT (A52_2F2R, A52_STEREO):
+ coeff[0] = coeff[1] = level;
+ coeff[2] = coeff[3] = MUL_L (level, slev);
+ return 15;
+
+ case CONVERT (A52_3F2R, A52_DOLBY):
+ coeff[0] = coeff[2] = level;
+ coeff[1] = coeff[3] = coeff[4] = level_3db;
+ return 31;
+
+ case CONVERT (A52_3F2R, A52_2F1R):
+ coeff[0] = coeff[2] = level;
+ coeff[1] = MUL_L (level, clev);
+ coeff[3] = coeff[4] = level_3db;
+ return 31;
+
+ case CONVERT (A52_3F2R, A52_STEREO):
+ coeff[0] = coeff[2] = level;
+ coeff[1] = MUL_L (level, clev);
+ coeff[3] = coeff[4] = MUL_L (level, slev);
+ return 31;
+
+ case CONVERT (A52_3F1R, A52_3F):
+ coeff[0] = coeff[1] = coeff[2] = level;
+ coeff[3] = MUL_L (level_3db, slev);
+ return 13;
+
+ case CONVERT (A52_3F2R, A52_3F):
+ coeff[0] = coeff[1] = coeff[2] = level;
+ coeff[3] = coeff[4] = MUL_L (level, slev);
+ return 29;
+
+ case CONVERT (A52_2F2R, A52_2F1R):
+ coeff[0] = coeff[1] = level;
+ coeff[2] = coeff[3] = level_3db;
+ return 12;
+
+ case CONVERT (A52_3F2R, A52_3F1R):
+ coeff[0] = coeff[1] = coeff[2] = level;
+ coeff[3] = coeff[4] = level_3db;
+ return 24;
+
+ case CONVERT (A52_2F1R, A52_2F2R):
+ coeff[0] = coeff[1] = level;
+ coeff[2] = level_3db;
+ return 0;
+
+ case CONVERT (A52_3F1R, A52_2F2R):
+ coeff[0] = coeff[2] = level;
+ coeff[1] = MUL_L (level, clev);
+ coeff[3] = level_3db;
+ return 7;
+
+ case CONVERT (A52_3F1R, A52_3F2R):
+ coeff[0] = coeff[1] = coeff[2] = level;
+ coeff[3] = level_3db;
+ return 0;
+
+ case CONVERT (A52_CHANNEL, A52_CHANNEL1):
+ coeff[0] = level;
+ coeff[1] = 0;
+ return 0;
+
+ case CONVERT (A52_CHANNEL, A52_CHANNEL2):
+ coeff[0] = 0;
+ coeff[1] = level;
+ return 0;
+ }
+
+ return -1; /* NOTREACHED */
+}
+
+static void mix2to1 (sample_t * dest, sample_t * src, sample_t bias)
+{
+ int i;
+
+ for (i = 0; i < 256; i++)
+ dest[i] += BIAS (src[i]);
+}
+
+static void mix3to1 (sample_t * samples, sample_t bias)
+{
+ int i;
+
+ for (i = 0; i < 256; i++)
+ samples[i] += BIAS (samples[i + 256] + samples[i + 512]);
+}
+
+static void mix4to1 (sample_t * samples, sample_t bias)
+{
+ int i;
+
+ for (i = 0; i < 256; i++)
+ samples[i] += BIAS (samples[i + 256] + samples[i + 512] +
+ samples[i + 768]);
+}
+
+static void mix5to1 (sample_t * samples, sample_t bias)
+{
+ int i;
+
+ for (i = 0; i < 256; i++)
+ samples[i] += BIAS (samples[i + 256] + samples[i + 512] +
+ samples[i + 768] + samples[i + 1024]);
+}
+
+static void mix3to2 (sample_t * samples, sample_t bias)
+{
+ int i;
+ sample_t common;
+
+ for (i = 0; i < 256; i++) {
+ common = BIAS (samples[i + 256]);
+ samples[i] += common;
+ samples[i + 256] = samples[i + 512] + common;
+ }
+}
+
+static void mix21to2 (sample_t * left, sample_t * right, sample_t bias)
+{
+ int i;
+ sample_t common;
+
+ for (i = 0; i < 256; i++) {
+ common = BIAS (right[i + 256]);
+ left[i] += common;
+ right[i] += common;
+ }
+}
+
+static void mix21toS (sample_t * samples, sample_t bias)
+{
+ int i;
+ sample_t surround;
+
+ for (i = 0; i < 256; i++) {
+ surround = samples[i + 512];
+ samples[i] += BIAS (-surround);
+ samples[i + 256] += BIAS (surround);
+ }
+}
+
+static void mix31to2 (sample_t * samples, sample_t bias)
+{
+ int i;
+ sample_t common;
+
+ for (i = 0; i < 256; i++) {
+ common = BIAS (samples[i + 256] + samples[i + 768]);
+ samples[i] += common;
+ samples[i + 256] = samples[i + 512] + common;
+ }
+}
+
+static void mix31toS (sample_t * samples, sample_t bias)
+{
+ int i;
+ sample_t common, surround;
+
+ for (i = 0; i < 256; i++) {
+ common = BIAS (samples[i + 256]);
+ surround = samples[i + 768];
+ samples[i] += common - surround;
+ samples[i + 256] = samples[i + 512] + common + surround;
+ }
+}
+
+static void mix22toS (sample_t * samples, sample_t bias)
+{
+ int i;
+ sample_t surround;
+
+ for (i = 0; i < 256; i++) {
+ surround = samples[i + 512] + samples[i + 768];
+ samples[i] += BIAS (-surround);
+ samples[i + 256] += BIAS (surround);
+ }
+}
+
+static void mix32to2 (sample_t * samples, sample_t bias)
+{
+ int i;
+ sample_t common;
+
+ for (i = 0; i < 256; i++) {
+ common = BIAS (samples[i + 256]);
+ samples[i] += common + samples[i + 768];
+ samples[i + 256] = common + samples[i + 512] + samples[i + 1024];
+ }
+}
+
+static void mix32toS (sample_t * samples, sample_t bias)
+{
+ int i;
+ sample_t common, surround;
+
+ for (i = 0; i < 256; i++) {
+ common = BIAS (samples[i + 256]);
+ surround = samples[i + 768] + samples[i + 1024];
+ samples[i] += common - surround;
+ samples[i + 256] = samples[i + 512] + common + surround;
+ }
+}
+
+static void move2to1 (sample_t * src, sample_t * dest, sample_t bias)
+{
+ int i;
+
+ for (i = 0; i < 256; i++)
+ dest[i] = BIAS (src[i] + src[i + 256]);
+}
+
+static void zero (sample_t * samples)
+{
+ int i;
+
+ for (i = 0; i < 256; i++)
+ samples[i] = 0;
+}
+
+void a52_downmix (sample_t * samples, int acmod, int output, sample_t bias,
+ level_t clev, level_t slev)
+{
+ switch (CONVERT (acmod, output & A52_CHANNEL_MASK)) {
+
+ case CONVERT (A52_CHANNEL, A52_CHANNEL2):
+ memcpy (samples, samples + 256, 256 * sizeof (sample_t));
+ break;
+
+ case CONVERT (A52_CHANNEL, A52_MONO):
+ case CONVERT (A52_STEREO, A52_MONO):
+ mix_2to1:
+ mix2to1 (samples, samples + 256, bias);
+ break;
+
+ case CONVERT (A52_2F1R, A52_MONO):
+ if (slev == 0)
+ goto mix_2to1;
+ case CONVERT (A52_3F, A52_MONO):
+ mix_3to1:
+ mix3to1 (samples, bias);
+ break;
+
+ case CONVERT (A52_3F1R, A52_MONO):
+ if (slev == 0)
+ goto mix_3to1;
+ case CONVERT (A52_2F2R, A52_MONO):
+ if (slev == 0)
+ goto mix_2to1;
+ mix4to1 (samples, bias);
+ break;
+
+ case CONVERT (A52_3F2R, A52_MONO):
+ if (slev == 0)
+ goto mix_3to1;
+ mix5to1 (samples, bias);
+ break;
+
+ case CONVERT (A52_MONO, A52_DOLBY):
+ memcpy (samples + 256, samples, 256 * sizeof (sample_t));
+ break;
+
+ case CONVERT (A52_3F, A52_STEREO):
+ case CONVERT (A52_3F, A52_DOLBY):
+ mix_3to2:
+ mix3to2 (samples, bias);
+ break;
+
+ case CONVERT (A52_2F1R, A52_STEREO):
+ if (slev == 0)
+ break;
+ mix21to2 (samples, samples + 256, bias);
+ break;
+
+ case CONVERT (A52_2F1R, A52_DOLBY):
+ mix21toS (samples, bias);
+ break;
+
+ case CONVERT (A52_3F1R, A52_STEREO):
+ if (slev == 0)
+ goto mix_3to2;
+ mix31to2 (samples, bias);
+ break;
+
+ case CONVERT (A52_3F1R, A52_DOLBY):
+ mix31toS (samples, bias);
+ break;
+
+ case CONVERT (A52_2F2R, A52_STEREO):
+ if (slev == 0)
+ break;
+ mix2to1 (samples, samples + 512, bias);
+ mix2to1 (samples + 256, samples + 768, bias);
+ break;
+
+ case CONVERT (A52_2F2R, A52_DOLBY):
+ mix22toS (samples, bias);
+ break;
+
+ case CONVERT (A52_3F2R, A52_STEREO):
+ if (slev == 0)
+ goto mix_3to2;
+ mix32to2 (samples, bias);
+ break;
+
+ case CONVERT (A52_3F2R, A52_DOLBY):
+ mix32toS (samples, bias);
+ break;
+
+ case CONVERT (A52_3F1R, A52_3F):
+ if (slev == 0)
+ break;
+ mix21to2 (samples, samples + 512, bias);
+ break;
+
+ case CONVERT (A52_3F2R, A52_3F):
+ if (slev == 0)
+ break;
+ mix2to1 (samples, samples + 768, bias);
+ mix2to1 (samples + 512, samples + 1024, bias);
+ break;
+
+ case CONVERT (A52_3F1R, A52_2F1R):
+ mix3to2 (samples, bias);
+ memcpy (samples + 512, samples + 768, 256 * sizeof (sample_t));
+ break;
+
+ case CONVERT (A52_2F2R, A52_2F1R):
+ mix2to1 (samples + 512, samples + 768, bias);
+ break;
+
+ case CONVERT (A52_3F2R, A52_2F1R):
+ mix3to2 (samples, bias);
+ move2to1 (samples + 768, samples + 512, bias);
+ break;
+
+ case CONVERT (A52_3F2R, A52_3F1R):
+ mix2to1 (samples + 768, samples + 1024, bias);
+ break;
+
+ case CONVERT (A52_2F1R, A52_2F2R):
+ memcpy (samples + 768, samples + 512, 256 * sizeof (sample_t));
+ break;
+
+ case CONVERT (A52_3F1R, A52_2F2R):
+ mix3to2 (samples, bias);
+ memcpy (samples + 512, samples + 768, 256 * sizeof (sample_t));
+ break;
+
+ case CONVERT (A52_3F2R, A52_2F2R):
+ mix3to2 (samples, bias);
+ memcpy (samples + 512, samples + 768, 256 * sizeof (sample_t));
+ memcpy (samples + 768, samples + 1024, 256 * sizeof (sample_t));
+ break;
+
+ case CONVERT (A52_3F1R, A52_3F2R):
+ memcpy (samples + 1024, samples + 768, 256 * sizeof (sample_t));
+ break;
+ }
+}
+
+void a52_upmix (sample_t * samples, int acmod, int output)
+{
+ switch (CONVERT (acmod, output & A52_CHANNEL_MASK)) {
+
+ case CONVERT (A52_CHANNEL, A52_CHANNEL2):
+ memcpy (samples + 256, samples, 256 * sizeof (sample_t));
+ break;
+
+ case CONVERT (A52_3F2R, A52_MONO):
+ zero (samples + 1024);
+ case CONVERT (A52_3F1R, A52_MONO):
+ case CONVERT (A52_2F2R, A52_MONO):
+ zero (samples + 768);
+ case CONVERT (A52_3F, A52_MONO):
+ case CONVERT (A52_2F1R, A52_MONO):
+ zero (samples + 512);
+ case CONVERT (A52_CHANNEL, A52_MONO):
+ case CONVERT (A52_STEREO, A52_MONO):
+ zero (samples + 256);
+ break;
+
+ case CONVERT (A52_3F2R, A52_STEREO):
+ case CONVERT (A52_3F2R, A52_DOLBY):
+ zero (samples + 1024);
+ case CONVERT (A52_3F1R, A52_STEREO):
+ case CONVERT (A52_3F1R, A52_DOLBY):
+ zero (samples + 768);
+ case CONVERT (A52_3F, A52_STEREO):
+ case CONVERT (A52_3F, A52_DOLBY):
+ mix_3to2:
+ memcpy (samples + 512, samples + 256, 256 * sizeof (sample_t));
+ zero (samples + 256);
+ break;
+
+ case CONVERT (A52_2F2R, A52_STEREO):
+ case CONVERT (A52_2F2R, A52_DOLBY):
+ zero (samples + 768);
+ case CONVERT (A52_2F1R, A52_STEREO):
+ case CONVERT (A52_2F1R, A52_DOLBY):
+ zero (samples + 512);
+ break;
+
+ case CONVERT (A52_3F2R, A52_3F):
+ zero (samples + 1024);
+ case CONVERT (A52_3F1R, A52_3F):
+ case CONVERT (A52_2F2R, A52_2F1R):
+ zero (samples + 768);
+ break;
+
+ case CONVERT (A52_3F2R, A52_3F1R):
+ zero (samples + 1024);
+ break;
+
+ case CONVERT (A52_3F2R, A52_2F1R):
+ zero (samples + 1024);
+ case CONVERT (A52_3F1R, A52_2F1R):
+ mix_31to21:
+ memcpy (samples + 768, samples + 512, 256 * sizeof (sample_t));
+ goto mix_3to2;
+
+ case CONVERT (A52_3F2R, A52_2F2R):
+ memcpy (samples + 1024, samples + 768, 256 * sizeof (sample_t));
+ goto mix_31to21;
+ }
+}
diff --git a/contrib/ffmpeg/libavcodec/liba52/imdct.c b/contrib/ffmpeg/libavcodec/liba52/imdct.c
new file mode 100644
index 000000000..21a2a6565
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/imdct.c
@@ -0,0 +1,411 @@
+/*
+ * imdct.c
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * The ifft algorithms in this file have been largely inspired by Dan
+ * Bernstein's work, djbfft, available at http://cr.yp.to/djbfft.html
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "a52.h"
+#include "a52_internal.h"
+#include "mm_accel.h"
+
+typedef struct complex_s {
+ sample_t real;
+ sample_t imag;
+} complex_t;
+
+static uint8_t fftorder[] = {
+ 0,128, 64,192, 32,160,224, 96, 16,144, 80,208,240,112, 48,176,
+ 8,136, 72,200, 40,168,232,104,248,120, 56,184, 24,152,216, 88,
+ 4,132, 68,196, 36,164,228,100, 20,148, 84,212,244,116, 52,180,
+ 252,124, 60,188, 28,156,220, 92, 12,140, 76,204,236,108, 44,172,
+ 2,130, 66,194, 34,162,226, 98, 18,146, 82,210,242,114, 50,178,
+ 10,138, 74,202, 42,170,234,106,250,122, 58,186, 26,154,218, 90,
+ 254,126, 62,190, 30,158,222, 94, 14,142, 78,206,238,110, 46,174,
+ 6,134, 70,198, 38,166,230,102,246,118, 54,182, 22,150,214, 86
+};
+
+/* Root values for IFFT */
+static sample_t roots16[3];
+static sample_t roots32[7];
+static sample_t roots64[15];
+static sample_t roots128[31];
+
+/* Twiddle factors for IMDCT */
+static complex_t pre1[128];
+static complex_t post1[64];
+static complex_t pre2[64];
+static complex_t post2[32];
+
+static sample_t a52_imdct_window[256];
+
+static void (* ifft128) (complex_t * buf);
+static void (* ifft64) (complex_t * buf);
+
+static inline void ifft2 (complex_t * buf)
+{
+ sample_t r, i;
+
+ r = buf[0].real;
+ i = buf[0].imag;
+ buf[0].real += buf[1].real;
+ buf[0].imag += buf[1].imag;
+ buf[1].real = r - buf[1].real;
+ buf[1].imag = i - buf[1].imag;
+}
+
+static inline void ifft4 (complex_t * buf)
+{
+ sample_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+
+ tmp1 = buf[0].real + buf[1].real;
+ tmp2 = buf[3].real + buf[2].real;
+ tmp3 = buf[0].imag + buf[1].imag;
+ tmp4 = buf[2].imag + buf[3].imag;
+ tmp5 = buf[0].real - buf[1].real;
+ tmp6 = buf[0].imag - buf[1].imag;
+ tmp7 = buf[2].imag - buf[3].imag;
+ tmp8 = buf[3].real - buf[2].real;
+
+ buf[0].real = tmp1 + tmp2;
+ buf[0].imag = tmp3 + tmp4;
+ buf[2].real = tmp1 - tmp2;
+ buf[2].imag = tmp3 - tmp4;
+ buf[1].real = tmp5 + tmp7;
+ buf[1].imag = tmp6 + tmp8;
+ buf[3].real = tmp5 - tmp7;
+ buf[3].imag = tmp6 - tmp8;
+}
+
+/* basic radix-2 ifft butterfly */
+
+#define BUTTERFLY_0(t0,t1,W0,W1,d0,d1) do { \
+ t0 = MUL (W1, d1) + MUL (W0, d0); \
+ t1 = MUL (W0, d1) - MUL (W1, d0); \
+} while (0)
+
+/* radix-2 ifft butterfly with bias */
+
+#define BUTTERFLY_B(t0,t1,W0,W1,d0,d1) do { \
+ t0 = BIAS (MUL (d1, W1) + MUL (d0, W0)); \
+ t1 = BIAS (MUL (d1, W0) - MUL (d0, W1)); \
+} while (0)
+
+/* the basic split-radix ifft butterfly */
+
+#define BUTTERFLY(a0,a1,a2,a3,wr,wi) do { \
+ BUTTERFLY_0 (tmp5, tmp6, wr, wi, a2.real, a2.imag); \
+ BUTTERFLY_0 (tmp8, tmp7, wr, wi, a3.imag, a3.real); \
+ tmp1 = tmp5 + tmp7; \
+ tmp2 = tmp6 + tmp8; \
+ tmp3 = tmp6 - tmp8; \
+ tmp4 = tmp7 - tmp5; \
+ a2.real = a0.real - tmp1; \
+ a2.imag = a0.imag - tmp2; \
+ a3.real = a1.real - tmp3; \
+ a3.imag = a1.imag - tmp4; \
+ a0.real += tmp1; \
+ a0.imag += tmp2; \
+ a1.real += tmp3; \
+ a1.imag += tmp4; \
+} while (0)
+
+/* split-radix ifft butterfly, specialized for wr=1 wi=0 */
+
+#define BUTTERFLY_ZERO(a0,a1,a2,a3) do { \
+ tmp1 = a2.real + a3.real; \
+ tmp2 = a2.imag + a3.imag; \
+ tmp3 = a2.imag - a3.imag; \
+ tmp4 = a3.real - a2.real; \
+ a2.real = a0.real - tmp1; \
+ a2.imag = a0.imag - tmp2; \
+ a3.real = a1.real - tmp3; \
+ a3.imag = a1.imag - tmp4; \
+ a0.real += tmp1; \
+ a0.imag += tmp2; \
+ a1.real += tmp3; \
+ a1.imag += tmp4; \
+} while (0)
+
+/* split-radix ifft butterfly, specialized for wr=wi */
+
+#define BUTTERFLY_HALF(a0,a1,a2,a3,w) do { \
+ tmp5 = MUL (a2.real + a2.imag, w); \
+ tmp6 = MUL (a2.imag - a2.real, w); \
+ tmp7 = MUL (a3.real - a3.imag, w); \
+ tmp8 = MUL (a3.imag + a3.real, w); \
+ tmp1 = tmp5 + tmp7; \
+ tmp2 = tmp6 + tmp8; \
+ tmp3 = tmp6 - tmp8; \
+ tmp4 = tmp7 - tmp5; \
+ a2.real = a0.real - tmp1; \
+ a2.imag = a0.imag - tmp2; \
+ a3.real = a1.real - tmp3; \
+ a3.imag = a1.imag - tmp4; \
+ a0.real += tmp1; \
+ a0.imag += tmp2; \
+ a1.real += tmp3; \
+ a1.imag += tmp4; \
+} while (0)
+
+static inline void ifft8 (complex_t * buf)
+{
+ sample_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+
+ ifft4 (buf);
+ ifft2 (buf + 4);
+ ifft2 (buf + 6);
+ BUTTERFLY_ZERO (buf[0], buf[2], buf[4], buf[6]);
+ BUTTERFLY_HALF (buf[1], buf[3], buf[5], buf[7], roots16[1]);
+}
+
+static void ifft_pass (complex_t * buf, sample_t * weight, int n)
+{
+ complex_t * buf1;
+ complex_t * buf2;
+ complex_t * buf3;
+ sample_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ int i;
+
+ buf++;
+ buf1 = buf + n;
+ buf2 = buf + 2 * n;
+ buf3 = buf + 3 * n;
+
+ BUTTERFLY_ZERO (buf[-1], buf1[-1], buf2[-1], buf3[-1]);
+
+ i = n - 1;
+
+ do {
+ BUTTERFLY (buf[0], buf1[0], buf2[0], buf3[0],
+ weight[0], weight[2*i-n]);
+ buf++;
+ buf1++;
+ buf2++;
+ buf3++;
+ weight++;
+ } while (--i);
+}
+
+static void ifft16 (complex_t * buf)
+{
+ ifft8 (buf);
+ ifft4 (buf + 8);
+ ifft4 (buf + 12);
+ ifft_pass (buf, roots16, 4);
+}
+
+static void ifft32 (complex_t * buf)
+{
+ ifft16 (buf);
+ ifft8 (buf + 16);
+ ifft8 (buf + 24);
+ ifft_pass (buf, roots32, 8);
+}
+
+static void ifft64_c (complex_t * buf)
+{
+ ifft32 (buf);
+ ifft16 (buf + 32);
+ ifft16 (buf + 48);
+ ifft_pass (buf, roots64, 16);
+}
+
+static void ifft128_c (complex_t * buf)
+{
+ ifft32 (buf);
+ ifft16 (buf + 32);
+ ifft16 (buf + 48);
+ ifft_pass (buf, roots64, 16);
+
+ ifft32 (buf + 64);
+ ifft32 (buf + 96);
+ ifft_pass (buf, roots128, 32);
+}
+
+void a52_imdct_512 (sample_t * data, sample_t * delay, sample_t bias)
+{
+ int i, k;
+ sample_t t_r, t_i, a_r, a_i, b_r, b_i, w_1, w_2;
+ const sample_t * window = a52_imdct_window;
+ complex_t buf[128];
+
+ for (i = 0; i < 128; i++) {
+ k = fftorder[i];
+ t_r = pre1[i].real;
+ t_i = pre1[i].imag;
+ BUTTERFLY_0 (buf[i].real, buf[i].imag, t_r, t_i, data[k], data[255-k]);
+ }
+
+ ifft128 (buf);
+
+ /* Post IFFT complex multiply plus IFFT complex conjugate*/
+ /* Window and convert to real valued signal */
+ for (i = 0; i < 64; i++) {
+ /* y[n] = z[n] * (xcos1[n] + j * xsin1[n]) ; */
+ t_r = post1[i].real;
+ t_i = post1[i].imag;
+ BUTTERFLY_0 (a_r, a_i, t_i, t_r, buf[i].imag, buf[i].real);
+ BUTTERFLY_0 (b_r, b_i, t_r, t_i, buf[127-i].imag, buf[127-i].real);
+
+ w_1 = window[2*i];
+ w_2 = window[255-2*i];
+ BUTTERFLY_B (data[255-2*i], data[2*i], w_2, w_1, a_r, delay[2*i]);
+ delay[2*i] = a_i;
+
+ w_1 = window[2*i+1];
+ w_2 = window[254-2*i];
+ BUTTERFLY_B (data[2*i+1], data[254-2*i], w_1, w_2, b_r, delay[2*i+1]);
+ delay[2*i+1] = b_i;
+ }
+}
+
+void a52_imdct_256 (sample_t * data, sample_t * delay, sample_t bias)
+{
+ int i, k;
+ sample_t t_r, t_i, a_r, a_i, b_r, b_i, c_r, c_i, d_r, d_i, w_1, w_2;
+ const sample_t * window = a52_imdct_window;
+ complex_t buf1[64], buf2[64];
+
+ /* Pre IFFT complex multiply plus IFFT cmplx conjugate */
+ for (i = 0; i < 64; i++) {
+ k = fftorder[i];
+ t_r = pre2[i].real;
+ t_i = pre2[i].imag;
+ BUTTERFLY_0 (buf1[i].real, buf1[i].imag, t_r, t_i, data[k], data[254-k]);
+ BUTTERFLY_0 (buf2[i].real, buf2[i].imag, t_r, t_i, data[k+1], data[255-k]);
+ }
+
+ ifft64 (buf1);
+ ifft64 (buf2);
+
+ /* Post IFFT complex multiply */
+ /* Window and convert to real valued signal */
+ for (i = 0; i < 32; i++) {
+ /* y1[n] = z1[n] * (xcos2[n] + j * xs in2[n]) ; */
+ t_r = post2[i].real;
+ t_i = post2[i].imag;
+ BUTTERFLY_0 (a_r, a_i, t_i, t_r, buf1[i].imag, buf1[i].real);
+ BUTTERFLY_0 (b_r, b_i, t_r, t_i, buf1[63-i].imag, buf1[63-i].real);
+ BUTTERFLY_0 (c_r, c_i, t_i, t_r, buf2[i].imag, buf2[i].real);
+ BUTTERFLY_0 (d_r, d_i, t_r, t_i, buf2[63-i].imag, buf2[63-i].real);
+
+ w_1 = window[2*i];
+ w_2 = window[255-2*i];
+ BUTTERFLY_B (data[255-2*i], data[2*i], w_2, w_1, a_r, delay[2*i]);
+ delay[2*i] = c_i;
+
+ w_1 = window[128+2*i];
+ w_2 = window[127-2*i];
+ BUTTERFLY_B (data[128+2*i], data[127-2*i], w_1, w_2, a_i, delay[127-2*i]);
+ delay[127-2*i] = c_r;
+
+ w_1 = window[2*i+1];
+ w_2 = window[254-2*i];
+ BUTTERFLY_B (data[254-2*i], data[2*i+1], w_2, w_1, b_i, delay[2*i+1]);
+ delay[2*i+1] = d_r;
+
+ w_1 = window[129+2*i];
+ w_2 = window[126-2*i];
+ BUTTERFLY_B (data[129+2*i], data[126-2*i], w_1, w_2, b_r, delay[126-2*i]);
+ delay[126-2*i] = d_i;
+ }
+}
+
+static double besselI0 (double x)
+{
+ double bessel = 1;
+ int i = 100;
+
+ do
+ bessel = bessel * x / (i * i) + 1;
+ while (--i);
+ return bessel;
+}
+
+void a52_imdct_init (uint32_t mm_accel)
+{
+ int i, k;
+ double sum;
+ double local_imdct_window[256];
+
+ /* compute imdct window - kaiser-bessel derived window, alpha = 5.0 */
+ sum = 0;
+ for (i = 0; i < 256; i++) {
+ sum += besselI0 (i * (256 - i) * (5 * M_PI / 256) * (5 * M_PI / 256));
+ local_imdct_window[i] = sum;
+ }
+ sum++;
+ for (i = 0; i < 256; i++)
+ a52_imdct_window[i] = SAMPLE (sqrt (local_imdct_window[i] / sum));
+
+ for (i = 0; i < 3; i++)
+ roots16[i] = SAMPLE (cos ((M_PI / 8) * (i + 1)));
+
+ for (i = 0; i < 7; i++)
+ roots32[i] = SAMPLE (cos ((M_PI / 16) * (i + 1)));
+
+ for (i = 0; i < 15; i++)
+ roots64[i] = SAMPLE (cos ((M_PI / 32) * (i + 1)));
+
+ for (i = 0; i < 31; i++)
+ roots128[i] = SAMPLE (cos ((M_PI / 64) * (i + 1)));
+
+ for (i = 0; i < 64; i++) {
+ k = fftorder[i] / 2 + 64;
+ pre1[i].real = SAMPLE (cos ((M_PI / 256) * (k - 0.25)));
+ pre1[i].imag = SAMPLE (sin ((M_PI / 256) * (k - 0.25)));
+ }
+
+ for (i = 64; i < 128; i++) {
+ k = fftorder[i] / 2 + 64;
+ pre1[i].real = SAMPLE (-cos ((M_PI / 256) * (k - 0.25)));
+ pre1[i].imag = SAMPLE (-sin ((M_PI / 256) * (k - 0.25)));
+ }
+
+ for (i = 0; i < 64; i++) {
+ post1[i].real = SAMPLE (cos ((M_PI / 256) * (i + 0.5)));
+ post1[i].imag = SAMPLE (sin ((M_PI / 256) * (i + 0.5)));
+ }
+
+ for (i = 0; i < 64; i++) {
+ k = fftorder[i] / 4;
+ pre2[i].real = SAMPLE (cos ((M_PI / 128) * (k - 0.25)));
+ pre2[i].imag = SAMPLE (sin ((M_PI / 128) * (k - 0.25)));
+ }
+
+ for (i = 0; i < 32; i++) {
+ post2[i].real = SAMPLE (cos ((M_PI / 128) * (i + 0.5)));
+ post2[i].imag = SAMPLE (sin ((M_PI / 128) * (i + 0.5)));
+ }
+
+#ifdef LIBA52_DJBFFT
+ if (mm_accel & MM_ACCEL_DJBFFT) {
+ ifft128 = (void (*) (complex_t *)) fftc4_un128;
+ ifft64 = (void (*) (complex_t *)) fftc4_un64;
+ } else
+#endif
+ {
+ ifft128 = ifft128_c;
+ ifft64 = ifft64_c;
+ }
+}
diff --git a/contrib/ffmpeg/libavcodec/liba52/mm_accel.h b/contrib/ffmpeg/libavcodec/liba52/mm_accel.h
new file mode 100644
index 000000000..9a475f5a2
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/mm_accel.h
@@ -0,0 +1,42 @@
+/*
+ * mm_accel.h
+ * Copyright (C) 2000-2002 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef MM_ACCEL_H
+#define MM_ACCEL_H
+
+/* generic accelerations */
+#define MM_ACCEL_DJBFFT 0x00000001
+
+/* x86 accelerations */
+#define MM_ACCEL_X86_MMX 0x80000000
+#define MM_ACCEL_X86_3DNOW 0x40000000
+#define MM_ACCEL_X86_MMXEXT 0x20000000
+#define MM_ACCEL_X86_SSE 0x10000000
+#define MM_ACCEL_X86_3DNOWEXT 0x08000000
+
+/* PPC accelerations */
+#define MM_ACCEL_PPC_ALTIVEC 0x00010000
+
+uint32_t mm_accel (void);
+
+#endif /* MM_ACCEL_H */
diff --git a/contrib/ffmpeg/libavcodec/liba52/parse.c b/contrib/ffmpeg/libavcodec/liba52/parse.c
new file mode 100644
index 000000000..5a0701564
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/parse.c
@@ -0,0 +1,939 @@
+/*
+ * parse.c
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "a52.h"
+#include "a52_internal.h"
+#include "bitstream.h"
+#include "tables.h"
+
+#if defined(HAVE_MEMALIGN) && !defined(__cplusplus)
+/* some systems have memalign() but no declaration for it */
+void * memalign (size_t align, size_t size);
+#else
+/* assume malloc alignment is sufficient */
+#define memalign(align,size) malloc (size)
+#endif
+
+typedef struct {
+ quantizer_t q1[2];
+ quantizer_t q2[2];
+ quantizer_t q4;
+ int q1_ptr;
+ int q2_ptr;
+ int q4_ptr;
+} quantizer_set_t;
+
+static uint8_t halfrate[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3};
+
+a52_state_t * a52_init (uint32_t mm_accel)
+{
+ a52_state_t * state;
+ int i;
+
+ state = (a52_state_t *) malloc (sizeof (a52_state_t));
+ if (state == NULL)
+ return NULL;
+
+ state->samples = (sample_t *) memalign (16, 256 * 12 * sizeof (sample_t));
+ if (state->samples == NULL) {
+ free (state);
+ return NULL;
+ }
+
+ for (i = 0; i < 256 * 12; i++)
+ state->samples[i] = 0;
+
+ state->downmixed = 1;
+
+ state->lfsr_state = 1;
+
+ a52_imdct_init (mm_accel);
+
+ return state;
+}
+
+sample_t * a52_samples (a52_state_t * state)
+{
+ return state->samples;
+}
+
+int a52_syncinfo (uint8_t * buf, int * flags,
+ int * sample_rate, int * bit_rate)
+{
+ static int rate[] = { 32, 40, 48, 56, 64, 80, 96, 112,
+ 128, 160, 192, 224, 256, 320, 384, 448,
+ 512, 576, 640};
+ static uint8_t lfeon[8] = {0x10, 0x10, 0x04, 0x04, 0x04, 0x01, 0x04, 0x01};
+ int frmsizecod;
+ int bitrate;
+ int half;
+ int acmod;
+
+ if ((buf[0] != 0x0b) || (buf[1] != 0x77)) /* syncword */
+ return 0;
+
+ if (buf[5] >= 0x60) /* bsid >= 12 */
+ return 0;
+ half = halfrate[buf[5] >> 3];
+
+ /* acmod, dsurmod and lfeon */
+ acmod = buf[6] >> 5;
+ *flags = ((((buf[6] & 0xf8) == 0x50) ? A52_DOLBY : acmod) |
+ ((buf[6] & lfeon[acmod]) ? A52_LFE : 0));
+
+ frmsizecod = buf[4] & 63;
+ if (frmsizecod >= 38)
+ return 0;
+ bitrate = rate [frmsizecod >> 1];
+ *bit_rate = (bitrate * 1000) >> half;
+
+ switch (buf[4] & 0xc0) {
+ case 0:
+ *sample_rate = 48000 >> half;
+ return 4 * bitrate;
+ case 0x40:
+ *sample_rate = 44100 >> half;
+ return 2 * (320 * bitrate / 147 + (frmsizecod & 1));
+ case 0x80:
+ *sample_rate = 32000 >> half;
+ return 6 * bitrate;
+ default:
+ return 0;
+ }
+}
+
+int a52_frame (a52_state_t * state, uint8_t * buf, int * flags,
+ level_t * level, sample_t bias)
+{
+ static level_t clev[4] = { LEVEL (LEVEL_3DB), LEVEL (LEVEL_45DB),
+ LEVEL (LEVEL_6DB), LEVEL (LEVEL_45DB) };
+ static level_t slev[4] = { LEVEL (LEVEL_3DB), LEVEL (LEVEL_6DB),
+ 0, LEVEL (LEVEL_6DB) };
+ int chaninfo;
+ int acmod;
+
+ state->fscod = buf[4] >> 6;
+ state->halfrate = halfrate[buf[5] >> 3];
+ state->acmod = acmod = buf[6] >> 5;
+
+ a52_bitstream_set_ptr (state, buf + 6);
+ bitstream_get (state, 3); /* skip acmod we already parsed */
+
+ if ((acmod == 2) && (bitstream_get (state, 2) == 2)) /* dsurmod */
+ acmod = A52_DOLBY;
+
+ state->clev = state->slev = 0;
+
+ if ((acmod & 1) && (acmod != 1))
+ state->clev = clev[bitstream_get (state, 2)]; /* cmixlev */
+
+ if (acmod & 4)
+ state->slev = slev[bitstream_get (state, 2)]; /* surmixlev */
+
+ state->lfeon = bitstream_get (state, 1);
+
+ state->output = a52_downmix_init (acmod, *flags, level,
+ state->clev, state->slev);
+ if (state->output < 0)
+ return 1;
+ if (state->lfeon && (*flags & A52_LFE))
+ state->output |= A52_LFE;
+ *flags = state->output;
+ /* the 2* compensates for differences in imdct */
+ state->dynrng = state->level = MUL_C (*level, 2);
+ state->bias = bias;
+ state->dynrnge = 1;
+ state->dynrngcall = NULL;
+ state->cplba.deltbae = DELTA_BIT_NONE;
+ state->ba[0].deltbae = state->ba[1].deltbae = state->ba[2].deltbae =
+ state->ba[3].deltbae = state->ba[4].deltbae = DELTA_BIT_NONE;
+
+ chaninfo = !acmod;
+ do {
+ bitstream_get (state, 5); /* dialnorm */
+ if (bitstream_get (state, 1)) /* compre */
+ bitstream_get (state, 8); /* compr */
+ if (bitstream_get (state, 1)) /* langcode */
+ bitstream_get (state, 8); /* langcod */
+ if (bitstream_get (state, 1)) /* audprodie */
+ bitstream_get (state, 7); /* mixlevel + roomtyp */
+ } while (chaninfo--);
+
+ bitstream_get (state, 2); /* copyrightb + origbs */
+
+ if (bitstream_get (state, 1)) /* timecod1e */
+ bitstream_get (state, 14); /* timecod1 */
+ if (bitstream_get (state, 1)) /* timecod2e */
+ bitstream_get (state, 14); /* timecod2 */
+
+ if (bitstream_get (state, 1)) { /* addbsie */
+ int addbsil;
+
+ addbsil = bitstream_get (state, 6);
+ do {
+ bitstream_get (state, 8); /* addbsi */
+ } while (addbsil--);
+ }
+
+ return 0;
+}
+
+void a52_dynrng (a52_state_t * state,
+ level_t (* call) (level_t, void *), void * data)
+{
+ state->dynrnge = 0;
+ if (call) {
+ state->dynrnge = 1;
+ state->dynrngcall = call;
+ state->dynrngdata = data;
+ }
+}
+
+static int parse_exponents (a52_state_t * state, int expstr, int ngrps,
+ uint8_t exponent, uint8_t * dest)
+{
+ int exps;
+
+ while (ngrps--) {
+ exps = bitstream_get (state, 7);
+
+ exponent += exp_1[exps];
+ if (exponent > 24)
+ return 1;
+
+ switch (expstr) {
+ case EXP_D45:
+ *(dest++) = exponent;
+ *(dest++) = exponent;
+ case EXP_D25:
+ *(dest++) = exponent;
+ case EXP_D15:
+ *(dest++) = exponent;
+ }
+
+ exponent += exp_2[exps];
+ if (exponent > 24)
+ return 1;
+
+ switch (expstr) {
+ case EXP_D45:
+ *(dest++) = exponent;
+ *(dest++) = exponent;
+ case EXP_D25:
+ *(dest++) = exponent;
+ case EXP_D15:
+ *(dest++) = exponent;
+ }
+
+ exponent += exp_3[exps];
+ if (exponent > 24)
+ return 1;
+
+ switch (expstr) {
+ case EXP_D45:
+ *(dest++) = exponent;
+ *(dest++) = exponent;
+ case EXP_D25:
+ *(dest++) = exponent;
+ case EXP_D15:
+ *(dest++) = exponent;
+ }
+ }
+
+ return 0;
+}
+
+static int parse_deltba (a52_state_t * state, int8_t * deltba)
+{
+ int deltnseg, deltlen, delta, j;
+
+ memset (deltba, 0, 50);
+
+ deltnseg = bitstream_get (state, 3);
+ j = 0;
+ do {
+ j += bitstream_get (state, 5);
+ deltlen = bitstream_get (state, 4);
+ delta = bitstream_get (state, 3);
+ delta -= (delta >= 4) ? 3 : 4;
+ if (!deltlen)
+ continue;
+ if (j + deltlen >= 50)
+ return 1;
+ while (deltlen--)
+ deltba[j++] = delta;
+ } while (deltnseg--);
+
+ return 0;
+}
+
+static inline int zero_snr_offsets (int nfchans, a52_state_t * state)
+{
+ int i;
+
+ if ((state->csnroffst) ||
+ (state->chincpl && state->cplba.bai >> 3) || /* cplinu, fsnroffst */
+ (state->lfeon && state->lfeba.bai >> 3)) /* fsnroffst */
+ return 0;
+ for (i = 0; i < nfchans; i++)
+ if (state->ba[i].bai >> 3) /* fsnroffst */
+ return 0;
+ return 1;
+}
+
+static inline int16_t dither_gen (a52_state_t * state)
+{
+ int16_t nstate;
+
+ nstate = dither_lut[state->lfsr_state >> 8] ^ (state->lfsr_state << 8);
+
+ state->lfsr_state = (uint16_t) nstate;
+
+ return (3 * nstate) >> 2;
+}
+
+#ifndef LIBA52_FIXED
+#define COEFF(c,t,l,s,e) (c) = (t) * (s)[e]
+#else
+#define COEFF(c,_t,_l,s,e) do { \
+ quantizer_t t = (_t); \
+ level_t l = (_l); \
+ int shift = e - 5; \
+ sample_t tmp = t * (l >> 16) + ((t * (l & 0xffff)) >> 16); \
+ if (shift >= 0) \
+ (c) = tmp >> shift; \
+ else \
+ (c) = tmp << -shift; \
+} while (0)
+#endif
+
+static void coeff_get (a52_state_t * state, sample_t * coeff,
+ expbap_t * expbap, quantizer_set_t * quant,
+ level_t level, int dither, int end)
+{
+ int i;
+ uint8_t * exp;
+ int8_t * bap;
+
+#ifndef LIBA52_FIXED
+ sample_t factor[25];
+
+ for (i = 0; i <= 24; i++)
+ factor[i] = scale_factor[i] * level;
+#endif
+
+ exp = expbap->exp;
+ bap = expbap->bap;
+
+ for (i = 0; i < end; i++) {
+ int bapi;
+
+ bapi = bap[i];
+ switch (bapi) {
+ case 0:
+ if (dither) {
+ COEFF (coeff[i], dither_gen (state), level, factor, exp[i]);
+ continue;
+ } else {
+ coeff[i] = 0;
+ continue;
+ }
+
+ case -1:
+ if (quant->q1_ptr >= 0) {
+ COEFF (coeff[i], quant->q1[quant->q1_ptr--], level,
+ factor, exp[i]);
+ continue;
+ } else {
+ int code;
+
+ code = bitstream_get (state, 5);
+
+ quant->q1_ptr = 1;
+ quant->q1[0] = q_1_2[code];
+ quant->q1[1] = q_1_1[code];
+ COEFF (coeff[i], q_1_0[code], level, factor, exp[i]);
+ continue;
+ }
+
+ case -2:
+ if (quant->q2_ptr >= 0) {
+ COEFF (coeff[i], quant->q2[quant->q2_ptr--], level,
+ factor, exp[i]);
+ continue;
+ } else {
+ int code;
+
+ code = bitstream_get (state, 7);
+
+ quant->q2_ptr = 1;
+ quant->q2[0] = q_2_2[code];
+ quant->q2[1] = q_2_1[code];
+ COEFF (coeff[i], q_2_0[code], level, factor, exp[i]);
+ continue;
+ }
+
+ case 3:
+ COEFF (coeff[i], q_3[bitstream_get (state, 3)], level,
+ factor, exp[i]);
+ continue;
+
+ case -3:
+ if (quant->q4_ptr == 0) {
+ quant->q4_ptr = -1;
+ COEFF (coeff[i], quant->q4, level, factor, exp[i]);
+ continue;
+ } else {
+ int code;
+
+ code = bitstream_get (state, 7);
+
+ quant->q4_ptr = 0;
+ quant->q4 = q_4_1[code];
+ COEFF (coeff[i], q_4_0[code], level, factor, exp[i]);
+ continue;
+ }
+
+ case 4:
+ COEFF (coeff[i], q_5[bitstream_get (state, 4)], level,
+ factor, exp[i]);
+ continue;
+
+ default:
+ COEFF (coeff[i], bitstream_get_2 (state, bapi) << (16 - bapi),
+ level, factor, exp[i]);
+ }
+ }
+}
+
+static void coeff_get_coupling (a52_state_t * state, int nfchans,
+ level_t * coeff, sample_t (* samples)[256],
+ quantizer_set_t * quant, uint8_t dithflag[5])
+{
+ int cplbndstrc, bnd, i, i_end, ch;
+ uint8_t * exp;
+ int8_t * bap;
+ level_t cplco[5];
+
+ exp = state->cpl_expbap.exp;
+ bap = state->cpl_expbap.bap;
+ bnd = 0;
+ cplbndstrc = state->cplbndstrc;
+ i = state->cplstrtmant;
+ while (i < state->cplendmant) {
+ i_end = i + 12;
+ while (cplbndstrc & 1) {
+ cplbndstrc >>= 1;
+ i_end += 12;
+ }
+ cplbndstrc >>= 1;
+ for (ch = 0; ch < nfchans; ch++)
+ cplco[ch] = MUL_L (state->cplco[ch][bnd], coeff[ch]);
+ bnd++;
+
+ while (i < i_end) {
+ quantizer_t cplcoeff;
+ int bapi;
+
+ bapi = bap[i];
+ switch (bapi) {
+ case 0:
+ for (ch = 0; ch < nfchans; ch++)
+ if ((state->chincpl >> ch) & 1) {
+ if (dithflag[ch])
+#ifndef LIBA52_FIXED
+ samples[ch][i] = (scale_factor[exp[i]] *
+ cplco[ch] * dither_gen (state));
+#else
+ COEFF (samples[ch][i], dither_gen (state),
+ cplco[ch], scale_factor, exp[i]);
+#endif
+ else
+ samples[ch][i] = 0;
+ }
+ i++;
+ continue;
+
+ case -1:
+ if (quant->q1_ptr >= 0) {
+ cplcoeff = quant->q1[quant->q1_ptr--];
+ break;
+ } else {
+ int code;
+
+ code = bitstream_get (state, 5);
+
+ quant->q1_ptr = 1;
+ quant->q1[0] = q_1_2[code];
+ quant->q1[1] = q_1_1[code];
+ cplcoeff = q_1_0[code];
+ break;
+ }
+
+ case -2:
+ if (quant->q2_ptr >= 0) {
+ cplcoeff = quant->q2[quant->q2_ptr--];
+ break;
+ } else {
+ int code;
+
+ code = bitstream_get (state, 7);
+
+ quant->q2_ptr = 1;
+ quant->q2[0] = q_2_2[code];
+ quant->q2[1] = q_2_1[code];
+ cplcoeff = q_2_0[code];
+ break;
+ }
+
+ case 3:
+ cplcoeff = q_3[bitstream_get (state, 3)];
+ break;
+
+ case -3:
+ if (quant->q4_ptr == 0) {
+ quant->q4_ptr = -1;
+ cplcoeff = quant->q4;
+ break;
+ } else {
+ int code;
+
+ code = bitstream_get (state, 7);
+
+ quant->q4_ptr = 0;
+ quant->q4 = q_4_1[code];
+ cplcoeff = q_4_0[code];
+ break;
+ }
+
+ case 4:
+ cplcoeff = q_5[bitstream_get (state, 4)];
+ break;
+
+ default:
+ cplcoeff = bitstream_get_2 (state, bapi) << (16 - bapi);
+ }
+#ifndef LIBA52_FIXED
+ cplcoeff *= scale_factor[exp[i]];
+#endif
+ for (ch = 0; ch < nfchans; ch++)
+ if ((state->chincpl >> ch) & 1)
+#ifndef LIBA52_FIXED
+ samples[ch][i] = cplcoeff * cplco[ch];
+#else
+ COEFF (samples[ch][i], cplcoeff, cplco[ch],
+ scale_factor, exp[i]);
+#endif
+ i++;
+ }
+ }
+}
+
+int a52_block (a52_state_t * state)
+{
+ static const uint8_t nfchans_tbl[] = {2, 1, 2, 3, 3, 4, 4, 5, 1, 1, 2};
+ static int rematrix_band[4] = {25, 37, 61, 253};
+ int i, nfchans, chaninfo;
+ uint8_t cplexpstr, chexpstr[5], lfeexpstr, do_bit_alloc, done_cpl;
+ uint8_t blksw[5], dithflag[5];
+ level_t coeff[5];
+ int chanbias;
+ quantizer_set_t quant;
+ sample_t * samples;
+
+ nfchans = nfchans_tbl[state->acmod];
+
+ for (i = 0; i < nfchans; i++)
+ blksw[i] = bitstream_get (state, 1);
+
+ for (i = 0; i < nfchans; i++)
+ dithflag[i] = bitstream_get (state, 1);
+
+ chaninfo = !state->acmod;
+ do {
+ if (bitstream_get (state, 1)) { /* dynrnge */
+ int dynrng;
+
+ dynrng = bitstream_get_2 (state, 8);
+ if (state->dynrnge) {
+ level_t range;
+
+#if !defined(LIBA52_FIXED)
+ range = ((((dynrng & 0x1f) | 0x20) << 13) *
+ scale_factor[3 - (dynrng >> 5)]);
+#else
+ range = ((dynrng & 0x1f) | 0x20) << (21 + (dynrng >> 5));
+#endif
+ if (state->dynrngcall)
+ range = state->dynrngcall (range, state->dynrngdata);
+ state->dynrng = MUL_L (state->level, range);
+ }
+ }
+ } while (chaninfo--);
+
+ if (bitstream_get (state, 1)) { /* cplstre */
+ state->chincpl = 0;
+ if (bitstream_get (state, 1)) { /* cplinu */
+ static uint8_t bndtab[16] = {31, 35, 37, 39, 41, 42, 43, 44,
+ 45, 45, 46, 46, 47, 47, 48, 48};
+ int cplbegf;
+ int cplendf;
+ int ncplsubnd;
+
+ for (i = 0; i < nfchans; i++)
+ state->chincpl |= bitstream_get (state, 1) << i;
+ switch (state->acmod) {
+ case 0: case 1:
+ return 1;
+ case 2:
+ state->phsflginu = bitstream_get (state, 1);
+ }
+ cplbegf = bitstream_get (state, 4);
+ cplendf = bitstream_get (state, 4);
+
+ if (cplendf + 3 - cplbegf < 0)
+ return 1;
+ state->ncplbnd = ncplsubnd = cplendf + 3 - cplbegf;
+ state->cplstrtbnd = bndtab[cplbegf];
+ state->cplstrtmant = cplbegf * 12 + 37;
+ state->cplendmant = cplendf * 12 + 73;
+
+ state->cplbndstrc = 0;
+ for (i = 0; i < ncplsubnd - 1; i++)
+ if (bitstream_get (state, 1)) {
+ state->cplbndstrc |= 1 << i;
+ state->ncplbnd--;
+ }
+ }
+ }
+
+ if (state->chincpl) { /* cplinu */
+ int j, cplcoe;
+
+ cplcoe = 0;
+ for (i = 0; i < nfchans; i++)
+ if ((state->chincpl) >> i & 1)
+ if (bitstream_get (state, 1)) { /* cplcoe */
+ int mstrcplco, cplcoexp, cplcomant;
+
+ cplcoe = 1;
+ mstrcplco = 3 * bitstream_get (state, 2);
+ for (j = 0; j < state->ncplbnd; j++) {
+ cplcoexp = bitstream_get (state, 4);
+ cplcomant = bitstream_get (state, 4);
+ if (cplcoexp == 15)
+ cplcomant <<= 14;
+ else
+ cplcomant = (cplcomant | 0x10) << 13;
+#ifndef LIBA52_FIXED
+ state->cplco[i][j] =
+ cplcomant * scale_factor[cplcoexp + mstrcplco];
+#else
+ state->cplco[i][j] = (cplcomant << 11) >> (cplcoexp + mstrcplco);
+#endif
+
+ }
+ }
+ if ((state->acmod == 2) && state->phsflginu && cplcoe)
+ for (j = 0; j < state->ncplbnd; j++)
+ if (bitstream_get (state, 1)) /* phsflg */
+ state->cplco[1][j] = -state->cplco[1][j];
+ }
+
+ if ((state->acmod == 2) && (bitstream_get (state, 1))) { /* rematstr */
+ int end;
+
+ state->rematflg = 0;
+ end = (state->chincpl) ? state->cplstrtmant : 253; /* cplinu */
+ i = 0;
+ do
+ state->rematflg |= bitstream_get (state, 1) << i;
+ while (rematrix_band[i++] < end);
+ }
+
+ cplexpstr = EXP_REUSE;
+ lfeexpstr = EXP_REUSE;
+ if (state->chincpl) /* cplinu */
+ cplexpstr = bitstream_get (state, 2);
+ for (i = 0; i < nfchans; i++)
+ chexpstr[i] = bitstream_get (state, 2);
+ if (state->lfeon)
+ lfeexpstr = bitstream_get (state, 1);
+
+ for (i = 0; i < nfchans; i++)
+ if (chexpstr[i] != EXP_REUSE) {
+ if ((state->chincpl >> i) & 1)
+ state->endmant[i] = state->cplstrtmant;
+ else {
+ int chbwcod;
+
+ chbwcod = bitstream_get (state, 6);
+ if (chbwcod > 60)
+ return 1;
+ state->endmant[i] = chbwcod * 3 + 73;
+ }
+ }
+
+ do_bit_alloc = 0;
+
+ if (cplexpstr != EXP_REUSE) {
+ int cplabsexp, ncplgrps;
+
+ do_bit_alloc = 64;
+ ncplgrps = ((state->cplendmant - state->cplstrtmant) /
+ (3 << (cplexpstr - 1)));
+ cplabsexp = bitstream_get (state, 4) << 1;
+ if (parse_exponents (state, cplexpstr, ncplgrps, cplabsexp,
+ state->cpl_expbap.exp + state->cplstrtmant))
+ return 1;
+ }
+ for (i = 0; i < nfchans; i++)
+ if (chexpstr[i] != EXP_REUSE) {
+ int grp_size, nchgrps;
+
+ do_bit_alloc |= 1 << i;
+ grp_size = 3 << (chexpstr[i] - 1);
+ nchgrps = (state->endmant[i] + grp_size - 4) / grp_size;
+ state->fbw_expbap[i].exp[0] = bitstream_get (state, 4);
+ if (parse_exponents (state, chexpstr[i], nchgrps,
+ state->fbw_expbap[i].exp[0],
+ state->fbw_expbap[i].exp + 1))
+ return 1;
+ bitstream_get (state, 2); /* gainrng */
+ }
+ if (lfeexpstr != EXP_REUSE) {
+ do_bit_alloc |= 32;
+ state->lfe_expbap.exp[0] = bitstream_get (state, 4);
+ if (parse_exponents (state, lfeexpstr, 2, state->lfe_expbap.exp[0],
+ state->lfe_expbap.exp + 1))
+ return 1;
+ }
+
+ if (bitstream_get (state, 1)) { /* baie */
+ do_bit_alloc = 127;
+ state->bai = bitstream_get (state, 11);
+ }
+ if (bitstream_get (state, 1)) { /* snroffste */
+ do_bit_alloc = 127;
+ state->csnroffst = bitstream_get (state, 6);
+ if (state->chincpl) /* cplinu */
+ state->cplba.bai = bitstream_get (state, 7);
+ for (i = 0; i < nfchans; i++)
+ state->ba[i].bai = bitstream_get (state, 7);
+ if (state->lfeon)
+ state->lfeba.bai = bitstream_get (state, 7);
+ }
+ if ((state->chincpl) && (bitstream_get (state, 1))) { /* cplleake */
+ do_bit_alloc |= 64;
+ state->cplfleak = 9 - bitstream_get (state, 3);
+ state->cplsleak = 9 - bitstream_get (state, 3);
+ }
+
+ if (bitstream_get (state, 1)) { /* deltbaie */
+ do_bit_alloc = 127;
+ if (state->chincpl) /* cplinu */
+ state->cplba.deltbae = bitstream_get (state, 2);
+ for (i = 0; i < nfchans; i++)
+ state->ba[i].deltbae = bitstream_get (state, 2);
+ if (state->chincpl && /* cplinu */
+ (state->cplba.deltbae == DELTA_BIT_NEW) &&
+ parse_deltba (state, state->cplba.deltba))
+ return 1;
+ for (i = 0; i < nfchans; i++)
+ if ((state->ba[i].deltbae == DELTA_BIT_NEW) &&
+ parse_deltba (state, state->ba[i].deltba))
+ return 1;
+ }
+
+ if (do_bit_alloc) {
+ if (zero_snr_offsets (nfchans, state)) {
+ memset (state->cpl_expbap.bap, 0, sizeof (state->cpl_expbap.bap));
+ for (i = 0; i < nfchans; i++)
+ memset (state->fbw_expbap[i].bap, 0,
+ sizeof (state->fbw_expbap[i].bap));
+ memset (state->lfe_expbap.bap, 0, sizeof (state->lfe_expbap.bap));
+ } else {
+ if (state->chincpl && (do_bit_alloc & 64)) /* cplinu */
+ a52_bit_allocate (state, &state->cplba, state->cplstrtbnd,
+ state->cplstrtmant, state->cplendmant,
+ state->cplfleak << 8, state->cplsleak << 8,
+ &state->cpl_expbap);
+ for (i = 0; i < nfchans; i++)
+ if (do_bit_alloc & (1 << i))
+ a52_bit_allocate (state, state->ba + i, 0, 0,
+ state->endmant[i], 0, 0,
+ state->fbw_expbap +i);
+ if (state->lfeon && (do_bit_alloc & 32)) {
+ state->lfeba.deltbae = DELTA_BIT_NONE;
+ a52_bit_allocate (state, &state->lfeba, 0, 0, 7, 0, 0,
+ &state->lfe_expbap);
+ }
+ }
+ }
+
+ if (bitstream_get (state, 1)) { /* skiple */
+ i = bitstream_get (state, 9); /* skipl */
+ while (i--)
+ bitstream_get (state, 8);
+ }
+
+ samples = state->samples;
+ if (state->output & A52_LFE)
+ samples += 256; /* shift for LFE channel */
+
+ chanbias = a52_downmix_coeff (coeff, state->acmod, state->output,
+ state->dynrng, state->clev, state->slev);
+
+ quant.q1_ptr = quant.q2_ptr = quant.q4_ptr = -1;
+ done_cpl = 0;
+
+ for (i = 0; i < nfchans; i++) {
+ int j;
+
+ coeff_get (state, samples + 256 * i, state->fbw_expbap +i, &quant,
+ coeff[i], dithflag[i], state->endmant[i]);
+
+ if ((state->chincpl >> i) & 1) {
+ if (!done_cpl) {
+ done_cpl = 1;
+ coeff_get_coupling (state, nfchans, coeff,
+ (sample_t (*)[256])samples, &quant,
+ dithflag);
+ }
+ j = state->cplendmant;
+ } else
+ j = state->endmant[i];
+ do
+ (samples + 256 * i)[j] = 0;
+ while (++j < 256);
+ }
+
+ if (state->acmod == 2) {
+ int j, end, band, rematflg;
+
+ end = ((state->endmant[0] < state->endmant[1]) ?
+ state->endmant[0] : state->endmant[1]);
+
+ i = 0;
+ j = 13;
+ rematflg = state->rematflg;
+ do {
+ if (! (rematflg & 1)) {
+ rematflg >>= 1;
+ j = rematrix_band[i++];
+ continue;
+ }
+ rematflg >>= 1;
+ band = rematrix_band[i++];
+ if (band > end)
+ band = end;
+ do {
+ sample_t tmp0, tmp1;
+
+ tmp0 = samples[j];
+ tmp1 = (samples+256)[j];
+ samples[j] = tmp0 + tmp1;
+ (samples+256)[j] = tmp0 - tmp1;
+ } while (++j < band);
+ } while (j < end);
+ }
+
+ if (state->lfeon) {
+ if (state->output & A52_LFE) {
+ coeff_get (state, samples - 256, &state->lfe_expbap, &quant,
+ state->dynrng, 0, 7);
+ for (i = 7; i < 256; i++)
+ (samples-256)[i] = 0;
+ a52_imdct_512 (samples - 256, samples + 1536 - 256, state->bias);
+ } else {
+ /* just skip the LFE coefficients */
+ coeff_get (state, samples + 1280, &state->lfe_expbap, &quant,
+ 0, 0, 7);
+ }
+ }
+
+ i = 0;
+ if (nfchans_tbl[state->output & A52_CHANNEL_MASK] < nfchans)
+ for (i = 1; i < nfchans; i++)
+ if (blksw[i] != blksw[0])
+ break;
+
+ if (i < nfchans) {
+ if (state->downmixed) {
+ state->downmixed = 0;
+ a52_upmix (samples + 1536, state->acmod, state->output);
+ }
+
+ for (i = 0; i < nfchans; i++) {
+ sample_t bias;
+
+ bias = 0;
+ if (!(chanbias & (1 << i)))
+ bias = state->bias;
+
+ if (coeff[i]) {
+ if (blksw[i])
+ a52_imdct_256 (samples + 256 * i, samples + 1536 + 256 * i,
+ bias);
+ else
+ a52_imdct_512 (samples + 256 * i, samples + 1536 + 256 * i,
+ bias);
+ } else {
+ int j;
+
+ for (j = 0; j < 256; j++)
+ (samples + 256 * i)[j] = bias;
+ }
+ }
+
+ a52_downmix (samples, state->acmod, state->output, state->bias,
+ state->clev, state->slev);
+ } else {
+ nfchans = nfchans_tbl[state->output & A52_CHANNEL_MASK];
+
+ a52_downmix (samples, state->acmod, state->output, 0,
+ state->clev, state->slev);
+
+ if (!state->downmixed) {
+ state->downmixed = 1;
+ a52_downmix (samples + 1536, state->acmod, state->output, 0,
+ state->clev, state->slev);
+ }
+
+ if (blksw[0])
+ for (i = 0; i < nfchans; i++)
+ a52_imdct_256 (samples + 256 * i, samples + 1536 + 256 * i,
+ state->bias);
+ else
+ for (i = 0; i < nfchans; i++)
+ a52_imdct_512 (samples + 256 * i, samples + 1536 + 256 * i,
+ state->bias);
+ }
+
+ return 0;
+}
+
+void a52_free (a52_state_t * state)
+{
+ free (state->samples);
+ free (state);
+}
diff --git a/contrib/ffmpeg/libavcodec/liba52/resample.c b/contrib/ffmpeg/libavcodec/liba52/resample.c
new file mode 100644
index 000000000..3f06aba1f
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/resample.c
@@ -0,0 +1,63 @@
+/*
+ * copyright (C) 2001 Arpad Gereoffy
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+// a52_resample_init should find the requested converter (from type flags ->
+// given number of channels) and set up some function pointers...
+
+// a52_resample() should do the conversion.
+
+#include "a52.h"
+#include "mm_accel.h"
+#include "config.h"
+#include "../../libpostproc/mangle.h"
+
+int (* a52_resample) (float * _f, int16_t * s16)=NULL;
+
+#include "resample_c.c"
+
+#ifdef ARCH_X86_32
+#include "resample_mmx.c"
+#endif
+
+void* a52_resample_init(uint32_t mm_accel,int flags,int chans){
+void* tmp;
+
+#ifdef ARCH_X86_32
+ if(mm_accel&MM_ACCEL_X86_MMX){
+ tmp=a52_resample_MMX(flags,chans);
+ if(tmp){
+ if(a52_resample==NULL) av_log(NULL, AV_LOG_INFO, "Using MMX optimized resampler\n");
+ a52_resample=tmp;
+ return tmp;
+ }
+ }
+#endif
+
+ tmp=a52_resample_C(flags,chans);
+ if(tmp){
+ if(a52_resample==NULL) av_log(NULL, AV_LOG_INFO, "No accelerated resampler found\n");
+ a52_resample=tmp;
+ return tmp;
+ }
+
+ av_log(NULL, AV_LOG_ERROR, "Unimplemented resampler for mode 0x%X -> %d channels conversion - Contact MPlayer developers!\n", flags, chans);
+ return NULL;
+}
diff --git a/contrib/ffmpeg/libavcodec/liba52/resample_c.c b/contrib/ffmpeg/libavcodec/liba52/resample_c.c
new file mode 100644
index 000000000..905146a7d
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/resample_c.c
@@ -0,0 +1,203 @@
+/*
+ * this code is based on a52dec/libao/audio_out_oss.c
+ * copyright (C) 2001 Arpad Gereoffy
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+static inline int16_t convert (int32_t i)
+{
+ if (i > 0x43c07fff)
+ return 32767;
+ else if (i < 0x43bf8000)
+ return -32768;
+ else
+ return i - 0x43c00000;
+}
+
+static int a52_resample_MONO_to_5_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[5*i] = s16[5*i+1] = s16[5*i+2] = s16[5*i+3] = 0;
+ s16[5*i+4] = convert (f[i]);
+ }
+ return 5*256;
+}
+
+static int a52_resample_MONO_to_1_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[i] = convert (f[i]);
+ }
+ return 1*256;
+}
+
+static int a52_resample_STEREO_to_2_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[2*i] = convert (f[i]);
+ s16[2*i+1] = convert (f[i+256]);
+ }
+ return 2*256;
+}
+
+static int a52_resample_3F_to_5_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[5*i] = convert (f[i]);
+ s16[5*i+1] = convert (f[i+512]);
+ s16[5*i+2] = s16[5*i+3] = 0;
+ s16[5*i+4] = convert (f[i+256]);
+ }
+ return 5*256;
+}
+
+static int a52_resample_2F_2R_to_4_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[4*i] = convert (f[i]);
+ s16[4*i+1] = convert (f[i+256]);
+ s16[4*i+2] = convert (f[i+512]);
+ s16[4*i+3] = convert (f[i+768]);
+ }
+ return 4*256;
+}
+
+static int a52_resample_3F_2R_to_5_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[5*i] = convert (f[i]);
+ s16[5*i+1] = convert (f[i+512]);
+ s16[5*i+2] = convert (f[i+768]);
+ s16[5*i+3] = convert (f[i+1024]);
+ s16[5*i+4] = convert (f[i+256]);
+ }
+ return 5*256;
+}
+
+static int a52_resample_MONO_LFE_to_6_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[6*i] = s16[6*i+1] = s16[6*i+2] = s16[6*i+3] = 0;
+ s16[6*i+4] = convert (f[i+256]);
+ s16[6*i+5] = convert (f[i]);
+ }
+ return 6*256;
+}
+
+static int a52_resample_STEREO_LFE_to_6_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[6*i] = convert (f[i+256]);
+ s16[6*i+1] = convert (f[i+512]);
+ s16[6*i+2] = s16[6*i+3] = s16[6*i+4] = 0;
+ s16[6*i+5] = convert (f[i]);
+ }
+ return 6*256;
+}
+
+static int a52_resample_3F_LFE_to_6_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[6*i] = convert (f[i+256]);
+ s16[6*i+1] = convert (f[i+768]);
+ s16[6*i+2] = s16[6*i+3] = 0;
+ s16[6*i+4] = convert (f[i+512]);
+ s16[6*i+5] = convert (f[i]);
+ }
+ return 6*256;
+}
+
+static int a52_resample_2F_2R_LFE_to_6_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[6*i] = convert (f[i+256]);
+ s16[6*i+1] = convert (f[i+512]);
+ s16[6*i+2] = convert (f[i+768]);
+ s16[6*i+3] = convert (f[i+1024]);
+ s16[6*i+4] = 0;
+ s16[6*i+5] = convert (f[i]);
+ }
+ return 6*256;
+}
+
+static int a52_resample_3F_2R_LFE_to_6_C(float * _f, int16_t * s16){
+ int i;
+ int32_t * f = (int32_t *) _f;
+ for (i = 0; i < 256; i++) {
+ s16[6*i] = convert (f[i+256]);
+ s16[6*i+1] = convert (f[i+768]);
+ s16[6*i+2] = convert (f[i+1024]);
+ s16[6*i+3] = convert (f[i+1280]);
+ s16[6*i+4] = convert (f[i+512]);
+ s16[6*i+5] = convert (f[i]);
+ }
+ return 6*256;
+}
+
+
+static void* a52_resample_C(int flags, int ch){
+ switch (flags) {
+ case A52_MONO:
+ if(ch==5) return a52_resample_MONO_to_5_C;
+ if(ch==1) return a52_resample_MONO_to_1_C;
+ break;
+ case A52_CHANNEL:
+ case A52_STEREO:
+ case A52_DOLBY:
+ if(ch==2) return a52_resample_STEREO_to_2_C;
+ break;
+ case A52_3F:
+ if(ch==5) return a52_resample_3F_to_5_C;
+ break;
+ case A52_2F2R:
+ if(ch==4) return a52_resample_2F_2R_to_4_C;
+ break;
+ case A52_3F2R:
+ if(ch==5) return a52_resample_3F_2R_to_5_C;
+ break;
+ case A52_MONO | A52_LFE:
+ if(ch==6) return a52_resample_MONO_LFE_to_6_C;
+ break;
+ case A52_CHANNEL | A52_LFE:
+ case A52_STEREO | A52_LFE:
+ case A52_DOLBY | A52_LFE:
+ if(ch==6) return a52_resample_STEREO_LFE_to_6_C;
+ break;
+ case A52_3F | A52_LFE:
+ if(ch==6) return a52_resample_3F_LFE_to_6_C;
+ break;
+ case A52_2F2R | A52_LFE:
+ if(ch==6) return a52_resample_2F_2R_LFE_to_6_C;
+ break;
+ case A52_3F2R | A52_LFE:
+ if(ch==6) return a52_resample_3F_2R_LFE_to_6_C;
+ break;
+ }
+ return NULL;
+}
diff --git a/contrib/ffmpeg/libavcodec/liba52/resample_mmx.c b/contrib/ffmpeg/libavcodec/liba52/resample_mmx.c
new file mode 100644
index 000000000..173c804d9
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/resample_mmx.c
@@ -0,0 +1,537 @@
+/*
+ * resample_mmx.c
+ * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* optimization TODO / NOTES
+ movntq is slightly faster (0.5% with the current test.c benchmark)
+ (but thats just test.c so that needs to be testd in reallity)
+ and it would mean (C / MMX2 / MMX / 3DNOW) versions
+*/
+
+static uint64_t __attribute__((aligned(8))) attribute_used magicF2W= 0x43c0000043c00000LL;
+static uint64_t __attribute__((aligned(8))) attribute_used wm1010= 0xFFFF0000FFFF0000LL;
+static uint64_t __attribute__((aligned(8))) attribute_used wm0101= 0x0000FFFF0000FFFFLL;
+static uint64_t __attribute__((aligned(8))) attribute_used wm1100= 0xFFFFFFFF00000000LL;
+
+static int a52_resample_MONO_to_5_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+ asm volatile(
+ "movl $-512, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+ "movq "MANGLE(wm1100)", %%mm3 \n\t"
+ "movq "MANGLE(wm0101)", %%mm4 \n\t"
+ "movq "MANGLE(wm1010)", %%mm5 \n\t"
+ "pxor %%mm6, %%mm6 \n\t"
+ "1: \n\t"
+ "movq (%1, %%esi, 2), %%mm0 \n\t"
+ "movq 8(%1, %%esi, 2), %%mm1 \n\t"
+ "leal (%%esi, %%esi, 4), %%edi \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "pand %%mm4, %%mm0 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "movq %%mm6, (%0, %%edi) \n\t" // 0 0 0 0
+ "movd %%mm0, 8(%0, %%edi) \n\t" // A 0
+ "pand %%mm3, %%mm0 \n\t"
+ "movd %%mm6, 12(%0, %%edi) \n\t" // 0 0
+ "movd %%mm1, 16(%0, %%edi) \n\t" // 0 B
+ "pand %%mm3, %%mm1 \n\t"
+ "movd %%mm6, 20(%0, %%edi) \n\t" // 0 0
+ "movq %%mm0, 24(%0, %%edi) \n\t" // 0 0 C 0
+ "movq %%mm1, 32(%0, %%edi) \n\t" // 0 0 0 B
+ "addl $8, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+1280), "r" (f+256)
+ :"%esi", "%edi", "memory"
+ );
+ return 5*256;
+}
+
+static int a52_resample_STEREO_to_2_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+/* benchmark scores are 0.3% better with SSE but we would need to set bias=0 and premultiply it
+#ifdef HAVE_SSE
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "1: \n\t"
+ "cvtps2pi (%1, %%esi), %%mm0 \n\t"
+ "cvtps2pi 1024(%1, %%esi), %%mm2\n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklwd %%mm2, %%mm0 \n\t"
+ "punpckhwd %%mm2, %%mm1 \n\t"
+ "movq %%mm0, (%0, %%esi) \n\t"
+ "movq %%mm1, 8(%0, %%esi) \n\t"
+ "addl $16, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+512), "r" (f+256)
+ :"%esi", "memory"
+ );*/
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+ "1: \n\t"
+ "movq (%1, %%esi), %%mm0 \n\t"
+ "movq 8(%1, %%esi), %%mm1 \n\t"
+ "movq 1024(%1, %%esi), %%mm2 \n\t"
+ "movq 1032(%1, %%esi), %%mm3 \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm7, %%mm2 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklwd %%mm2, %%mm0 \n\t"
+ "punpckhwd %%mm2, %%mm1 \n\t"
+ "movq %%mm0, (%0, %%esi) \n\t"
+ "movq %%mm1, 8(%0, %%esi) \n\t"
+ "addl $16, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+512), "r" (f+256)
+ :"%esi", "memory"
+ );
+ return 2*256;
+}
+
+static int a52_resample_3F_to_5_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+ "pxor %%mm6, %%mm6 \n\t"
+ "movq %%mm7, %%mm5 \n\t"
+ "punpckldq %%mm6, %%mm5 \n\t"
+ "1: \n\t"
+ "movd (%1, %%esi), %%mm0 \n\t"
+ "punpckldq 2048(%1, %%esi), %%mm0\n\t"
+ "movd 1024(%1, %%esi), %%mm1 \n\t"
+ "punpckldq 4(%1, %%esi), %%mm1 \n\t"
+ "movd 2052(%1, %%esi), %%mm2 \n\t"
+ "movq %%mm7, %%mm3 \n\t"
+ "punpckldq 1028(%1, %%esi), %%mm3\n\t"
+ "movd 8(%1, %%esi), %%mm4 \n\t"
+ "punpckldq 2056(%1, %%esi), %%mm4\n\t"
+ "leal (%%esi, %%esi, 4), %%edi \n\t"
+ "sarl $1, %%edi \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm5, %%mm2 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "psubd %%mm7, %%mm4 \n\t"
+ "packssdw %%mm6, %%mm0 \n\t"
+ "packssdw %%mm2, %%mm1 \n\t"
+ "packssdw %%mm4, %%mm3 \n\t"
+ "movq %%mm0, (%0, %%edi) \n\t"
+ "movq %%mm1, 8(%0, %%edi) \n\t"
+ "movq %%mm3, 16(%0, %%edi) \n\t"
+
+ "movd 1032(%1, %%esi), %%mm1 \n\t"
+ "punpckldq 12(%1, %%esi), %%mm1\n\t"
+ "movd 2060(%1, %%esi), %%mm2 \n\t"
+ "movq %%mm7, %%mm3 \n\t"
+ "punpckldq 1036(%1, %%esi), %%mm3\n\t"
+ "pxor %%mm0, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm5, %%mm2 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "movq %%mm0, 24(%0, %%edi) \n\t"
+ "movq %%mm2, 32(%0, %%edi) \n\t"
+
+ "addl $16, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+1280), "r" (f+256)
+ :"%esi", "%edi", "memory"
+ );
+ return 5*256;
+}
+
+static int a52_resample_2F_2R_to_4_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+ "1: \n\t"
+ "movq (%1, %%esi), %%mm0 \n\t"
+ "movq 8(%1, %%esi), %%mm1 \n\t"
+ "movq 1024(%1, %%esi), %%mm2 \n\t"
+ "movq 1032(%1, %%esi), %%mm3 \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm7, %%mm2 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "movq 2048(%1, %%esi), %%mm3 \n\t"
+ "movq 2056(%1, %%esi), %%mm4 \n\t"
+ "movq 3072(%1, %%esi), %%mm5 \n\t"
+ "movq 3080(%1, %%esi), %%mm6 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "psubd %%mm7, %%mm4 \n\t"
+ "psubd %%mm7, %%mm5 \n\t"
+ "psubd %%mm7, %%mm6 \n\t"
+ "packssdw %%mm4, %%mm3 \n\t"
+ "packssdw %%mm6, %%mm5 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "punpcklwd %%mm2, %%mm0 \n\t"
+ "punpckhwd %%mm2, %%mm1 \n\t"
+ "punpcklwd %%mm5, %%mm3 \n\t"
+ "punpckhwd %%mm5, %%mm4 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm5 \n\t"
+ "punpckldq %%mm3, %%mm0 \n\t"
+ "punpckhdq %%mm3, %%mm2 \n\t"
+ "punpckldq %%mm4, %%mm1 \n\t"
+ "punpckhdq %%mm4, %%mm5 \n\t"
+ "movq %%mm0, (%0, %%esi,2) \n\t"
+ "movq %%mm2, 8(%0, %%esi,2) \n\t"
+ "movq %%mm1, 16(%0, %%esi,2) \n\t"
+ "movq %%mm5, 24(%0, %%esi,2) \n\t"
+ "addl $16, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+1024), "r" (f+256)
+ :"%esi", "memory"
+ );
+ return 4*256;
+}
+
+static int a52_resample_3F_2R_to_5_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+ "1: \n\t"
+ "movd (%1, %%esi), %%mm0 \n\t"
+ "punpckldq 2048(%1, %%esi), %%mm0\n\t"
+ "movd 3072(%1, %%esi), %%mm1 \n\t"
+ "punpckldq 4096(%1, %%esi), %%mm1\n\t"
+ "movd 1024(%1, %%esi), %%mm2 \n\t"
+ "punpckldq 4(%1, %%esi), %%mm2 \n\t"
+ "movd 2052(%1, %%esi), %%mm3 \n\t"
+ "punpckldq 3076(%1, %%esi), %%mm3\n\t"
+ "movd 4100(%1, %%esi), %%mm4 \n\t"
+ "punpckldq 1028(%1, %%esi), %%mm4\n\t"
+ "movd 8(%1, %%esi), %%mm5 \n\t"
+ "punpckldq 2056(%1, %%esi), %%mm5\n\t"
+ "leal (%%esi, %%esi, 4), %%edi \n\t"
+ "sarl $1, %%edi \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm7, %%mm2 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "psubd %%mm7, %%mm4 \n\t"
+ "psubd %%mm7, %%mm5 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "packssdw %%mm5, %%mm4 \n\t"
+ "movq %%mm0, (%0, %%edi) \n\t"
+ "movq %%mm2, 8(%0, %%edi) \n\t"
+ "movq %%mm4, 16(%0, %%edi) \n\t"
+
+ "movd 3080(%1, %%esi), %%mm0 \n\t"
+ "punpckldq 4104(%1, %%esi), %%mm0\n\t"
+ "movd 1032(%1, %%esi), %%mm1 \n\t"
+ "punpckldq 12(%1, %%esi), %%mm1\n\t"
+ "movd 2060(%1, %%esi), %%mm2 \n\t"
+ "punpckldq 3084(%1, %%esi), %%mm2\n\t"
+ "movd 4108(%1, %%esi), %%mm3 \n\t"
+ "punpckldq 1036(%1, %%esi), %%mm3\n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm7, %%mm2 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "movq %%mm0, 24(%0, %%edi) \n\t"
+ "movq %%mm2, 32(%0, %%edi) \n\t"
+
+ "addl $16, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+1280), "r" (f+256)
+ :"%esi", "%edi", "memory"
+ );
+ return 5*256;
+}
+
+static int a52_resample_MONO_LFE_to_6_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+ "pxor %%mm6, %%mm6 \n\t"
+ "1: \n\t"
+ "movq 1024(%1, %%esi), %%mm0 \n\t"
+ "movq 1032(%1, %%esi), %%mm1 \n\t"
+ "movq (%1, %%esi), %%mm2 \n\t"
+ "movq 8(%1, %%esi), %%mm3 \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm7, %%mm2 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklwd %%mm2, %%mm0 \n\t"
+ "punpckhwd %%mm2, %%mm1 \n\t"
+ "leal (%%esi, %%esi, 2), %%edi \n\t"
+ "movq %%mm6, (%0, %%edi) \n\t"
+ "movd %%mm0, 8(%0, %%edi) \n\t"
+ "punpckhdq %%mm0, %%mm0 \n\t"
+ "movq %%mm6, 12(%0, %%edi) \n\t"
+ "movd %%mm0, 20(%0, %%edi) \n\t"
+ "movq %%mm6, 24(%0, %%edi) \n\t"
+ "movd %%mm1, 32(%0, %%edi) \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movq %%mm6, 36(%0, %%edi) \n\t"
+ "movd %%mm1, 44(%0, %%edi) \n\t"
+ "addl $16, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+1536), "r" (f+256)
+ :"%esi", "%edi", "memory"
+ );
+ return 6*256;
+}
+
+static int a52_resample_STEREO_LFE_to_6_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+ "pxor %%mm6, %%mm6 \n\t"
+ "1: \n\t"
+ "movq 1024(%1, %%esi), %%mm0 \n\t"
+ "movq 2048(%1, %%esi), %%mm1 \n\t"
+ "movq (%1, %%esi), %%mm5 \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm7, %%mm5 \n\t"
+ "leal (%%esi, %%esi, 2), %%edi \n\t"
+
+ "pxor %%mm4, %%mm4 \n\t"
+ "packssdw %%mm5, %%mm0 \n\t" // FfAa
+ "packssdw %%mm4, %%mm1 \n\t" // 00Bb
+ "punpckhwd %%mm0, %%mm4 \n\t" // F0f0
+ "punpcklwd %%mm1, %%mm0 \n\t" // BAba
+ "movq %%mm0, %%mm1 \n\t" // BAba
+ "punpckldq %%mm4, %%mm3 \n\t" // f0XX
+ "punpckldq %%mm6, %%mm0 \n\t" // 00ba
+ "punpckhdq %%mm1, %%mm3 \n\t" // BAf0
+
+ "movq %%mm0, (%0, %%edi) \n\t" // 00ba
+ "punpckhdq %%mm4, %%mm0 \n\t" // F000
+ "movq %%mm3, 8(%0, %%edi) \n\t" // BAf0
+ "movq %%mm0, 16(%0, %%edi) \n\t" // F000
+ "addl $8, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+1536), "r" (f+256)
+ :"%esi", "%edi", "memory"
+ );
+ return 6*256;
+}
+
+static int a52_resample_3F_LFE_to_6_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+ "pxor %%mm6, %%mm6 \n\t"
+ "1: \n\t"
+ "movq 1024(%1, %%esi), %%mm0 \n\t"
+ "movq 3072(%1, %%esi), %%mm1 \n\t"
+ "movq 2048(%1, %%esi), %%mm4 \n\t"
+ "movq (%1, %%esi), %%mm5 \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm7, %%mm4 \n\t"
+ "psubd %%mm7, %%mm5 \n\t"
+ "leal (%%esi, %%esi, 2), %%edi \n\t"
+
+ "packssdw %%mm4, %%mm0 \n\t" // EeAa
+ "packssdw %%mm5, %%mm1 \n\t" // FfBb
+ "movq %%mm0, %%mm2 \n\t" // EeAa
+ "punpcklwd %%mm1, %%mm0 \n\t" // BAba
+ "punpckhwd %%mm1, %%mm2 \n\t" // FEfe
+ "movq %%mm0, %%mm1 \n\t" // BAba
+ "punpckldq %%mm6, %%mm0 \n\t" // 00ba
+ "punpckhdq %%mm1, %%mm1 \n\t" // BABA
+
+ "movq %%mm0, (%0, %%edi) \n\t"
+ "punpckhdq %%mm2, %%mm0 \n\t" // FE00
+ "punpckldq %%mm1, %%mm2 \n\t" // BAfe
+ "movq %%mm2, 8(%0, %%edi) \n\t"
+ "movq %%mm0, 16(%0, %%edi) \n\t"
+ "addl $8, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+1536), "r" (f+256)
+ :"%esi", "%edi", "memory"
+ );
+ return 6*256;
+}
+
+static int a52_resample_2F_2R_LFE_to_6_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+// "pxor %%mm6, %%mm6 \n\t"
+ "1: \n\t"
+ "movq 1024(%1, %%esi), %%mm0 \n\t"
+ "movq 2048(%1, %%esi), %%mm1 \n\t"
+ "movq 3072(%1, %%esi), %%mm2 \n\t"
+ "movq 4096(%1, %%esi), %%mm3 \n\t"
+ "movq (%1, %%esi), %%mm5 \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm7, %%mm2 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "psubd %%mm7, %%mm5 \n\t"
+ "leal (%%esi, %%esi, 2), %%edi \n\t"
+
+ "packssdw %%mm2, %%mm0 \n\t" // CcAa
+ "packssdw %%mm3, %%mm1 \n\t" // DdBb
+ "packssdw %%mm5, %%mm5 \n\t" // FfFf
+ "movq %%mm0, %%mm2 \n\t" // CcAa
+ "punpcklwd %%mm1, %%mm0 \n\t" // BAba
+ "punpckhwd %%mm1, %%mm2 \n\t" // DCdc
+ "pxor %%mm4, %%mm4 \n\t" // 0000
+ "punpcklwd %%mm5, %%mm4 \n\t" // F0f0
+ "movq %%mm0, %%mm1 \n\t" // BAba
+ "movq %%mm4, %%mm3 \n\t" // F0f0
+ "punpckldq %%mm2, %%mm0 \n\t" // dcba
+ "punpckhdq %%mm1, %%mm1 \n\t" // BABA
+ "punpckldq %%mm1, %%mm4 \n\t" // BAf0
+ "punpckhdq %%mm3, %%mm2 \n\t" // F0DC
+
+ "movq %%mm0, (%0, %%edi) \n\t"
+ "movq %%mm4, 8(%0, %%edi) \n\t"
+ "movq %%mm2, 16(%0, %%edi) \n\t"
+ "addl $8, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+1536), "r" (f+256)
+ :"%esi", "%edi", "memory"
+ );
+ return 6*256;
+}
+
+static int a52_resample_3F_2R_LFE_to_6_MMX(float * _f, int16_t * s16){
+ int32_t * f = (int32_t *) _f;
+ asm volatile(
+ "movl $-1024, %%esi \n\t"
+ "movq "MANGLE(magicF2W)", %%mm7 \n\t"
+// "pxor %%mm6, %%mm6 \n\t"
+ "1: \n\t"
+ "movq 1024(%1, %%esi), %%mm0 \n\t"
+ "movq 3072(%1, %%esi), %%mm1 \n\t"
+ "movq 4096(%1, %%esi), %%mm2 \n\t"
+ "movq 5120(%1, %%esi), %%mm3 \n\t"
+ "movq 2048(%1, %%esi), %%mm4 \n\t"
+ "movq (%1, %%esi), %%mm5 \n\t"
+ "psubd %%mm7, %%mm0 \n\t"
+ "psubd %%mm7, %%mm1 \n\t"
+ "psubd %%mm7, %%mm2 \n\t"
+ "psubd %%mm7, %%mm3 \n\t"
+ "psubd %%mm7, %%mm4 \n\t"
+ "psubd %%mm7, %%mm5 \n\t"
+ "leal (%%esi, %%esi, 2), %%edi \n\t"
+
+ "packssdw %%mm2, %%mm0 \n\t" // CcAa
+ "packssdw %%mm3, %%mm1 \n\t" // DdBb
+ "packssdw %%mm4, %%mm4 \n\t" // EeEe
+ "packssdw %%mm5, %%mm5 \n\t" // FfFf
+ "movq %%mm0, %%mm2 \n\t" // CcAa
+ "punpcklwd %%mm1, %%mm0 \n\t" // BAba
+ "punpckhwd %%mm1, %%mm2 \n\t" // DCdc
+ "punpcklwd %%mm5, %%mm4 \n\t" // FEfe
+ "movq %%mm0, %%mm1 \n\t" // BAba
+ "movq %%mm4, %%mm3 \n\t" // FEfe
+ "punpckldq %%mm2, %%mm0 \n\t" // dcba
+ "punpckhdq %%mm1, %%mm1 \n\t" // BABA
+ "punpckldq %%mm1, %%mm4 \n\t" // BAfe
+ "punpckhdq %%mm3, %%mm2 \n\t" // FEDC
+
+ "movq %%mm0, (%0, %%edi) \n\t"
+ "movq %%mm4, 8(%0, %%edi) \n\t"
+ "movq %%mm2, 16(%0, %%edi) \n\t"
+ "addl $8, %%esi \n\t"
+ " jnz 1b \n\t"
+ "emms \n\t"
+ :: "r" (s16+1536), "r" (f+256)
+ :"%esi", "%edi", "memory"
+ );
+ return 6*256;
+}
+
+
+static void* a52_resample_MMX(int flags, int ch){
+ switch (flags) {
+ case A52_MONO:
+ if(ch==5) return a52_resample_MONO_to_5_MMX;
+ break;
+ case A52_CHANNEL:
+ case A52_STEREO:
+ case A52_DOLBY:
+ if(ch==2) return a52_resample_STEREO_to_2_MMX;
+ break;
+ case A52_3F:
+ if(ch==5) return a52_resample_3F_to_5_MMX;
+ break;
+ case A52_2F2R:
+ if(ch==4) return a52_resample_2F_2R_to_4_MMX;
+ break;
+ case A52_3F2R:
+ if(ch==5) return a52_resample_3F_2R_to_5_MMX;
+ break;
+ case A52_MONO | A52_LFE:
+ if(ch==6) return a52_resample_MONO_LFE_to_6_MMX;
+ break;
+ case A52_CHANNEL | A52_LFE:
+ case A52_STEREO | A52_LFE:
+ case A52_DOLBY | A52_LFE:
+ if(ch==6) return a52_resample_STEREO_LFE_to_6_MMX;
+ break;
+ case A52_3F | A52_LFE:
+ if(ch==6) return a52_resample_3F_LFE_to_6_MMX;
+ break;
+ case A52_2F2R | A52_LFE:
+ if(ch==6) return a52_resample_2F_2R_LFE_to_6_MMX;
+ break;
+ case A52_3F2R | A52_LFE:
+ if(ch==6) return a52_resample_3F_2R_LFE_to_6_MMX;
+ break;
+ }
+ return NULL;
+}
+
+
diff --git a/contrib/ffmpeg/libavcodec/liba52/tables.h b/contrib/ffmpeg/libavcodec/liba52/tables.h
new file mode 100644
index 000000000..7f921c9d0
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/liba52/tables.h
@@ -0,0 +1,246 @@
+/*
+ * tables.h
+ * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of a52dec, a free ATSC A-52 stream decoder.
+ * See http://liba52.sourceforge.net/ for updates.
+ *
+ * a52dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * a52dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+static const int8_t exp_1[128] = {
+ -2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,-2,
+ -1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 25,25,25
+};
+static const int8_t exp_2[128] = {
+ -2,-2,-2,-2,-2,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ -2,-2,-2,-2,-2,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ -2,-2,-2,-2,-2,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ -2,-2,-2,-2,-2,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ -2,-2,-2,-2,-2,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ 25,25,25
+};
+static const int8_t exp_3[128] = {
+ -2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,
+ -2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,
+ -2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,
+ -2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,
+ -2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,
+ 25,25,25
+};
+
+#define Q(x) ROUND (32768.0 * x)
+
+#define Q0 Q (-2/3)
+#define Q1 Q (0)
+#define Q2 Q (2/3)
+
+static const quantizer_t q_1_0[32] = {
+ Q0, Q0, Q0, Q0, Q0, Q0, Q0, Q0, Q0,
+ Q1, Q1, Q1, Q1, Q1, Q1, Q1, Q1, Q1,
+ Q2, Q2, Q2, Q2, Q2, Q2, Q2, Q2, Q2,
+ 0, 0, 0, 0, 0
+};
+
+static const quantizer_t q_1_1[32] = {
+ Q0, Q0, Q0, Q1, Q1, Q1, Q2, Q2, Q2,
+ Q0, Q0, Q0, Q1, Q1, Q1, Q2, Q2, Q2,
+ Q0, Q0, Q0, Q1, Q1, Q1, Q2, Q2, Q2,
+ 0, 0, 0, 0, 0
+};
+
+static const quantizer_t q_1_2[32] = {
+ Q0, Q1, Q2, Q0, Q1, Q2, Q0, Q1, Q2,
+ Q0, Q1, Q2, Q0, Q1, Q2, Q0, Q1, Q2,
+ Q0, Q1, Q2, Q0, Q1, Q2, Q0, Q1, Q2,
+ 0, 0, 0, 0, 0
+};
+
+#undef Q0
+#undef Q1
+#undef Q2
+
+#define Q0 Q (-4/5)
+#define Q1 Q (-2/5)
+#define Q2 Q (0)
+#define Q3 Q (2/5)
+#define Q4 Q (4/5)
+
+static const quantizer_t q_2_0[128] = {
+ Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,Q0,
+ Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,Q1,
+ Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,Q2,
+ Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,Q3,
+ Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,Q4,
+ 0,0,0
+};
+
+static const quantizer_t q_2_1[128] = {
+ Q0,Q0,Q0,Q0,Q0,Q1,Q1,Q1,Q1,Q1,Q2,Q2,Q2,Q2,Q2,Q3,Q3,Q3,Q3,Q3,Q4,Q4,Q4,Q4,Q4,
+ Q0,Q0,Q0,Q0,Q0,Q1,Q1,Q1,Q1,Q1,Q2,Q2,Q2,Q2,Q2,Q3,Q3,Q3,Q3,Q3,Q4,Q4,Q4,Q4,Q4,
+ Q0,Q0,Q0,Q0,Q0,Q1,Q1,Q1,Q1,Q1,Q2,Q2,Q2,Q2,Q2,Q3,Q3,Q3,Q3,Q3,Q4,Q4,Q4,Q4,Q4,
+ Q0,Q0,Q0,Q0,Q0,Q1,Q1,Q1,Q1,Q1,Q2,Q2,Q2,Q2,Q2,Q3,Q3,Q3,Q3,Q3,Q4,Q4,Q4,Q4,Q4,
+ Q0,Q0,Q0,Q0,Q0,Q1,Q1,Q1,Q1,Q1,Q2,Q2,Q2,Q2,Q2,Q3,Q3,Q3,Q3,Q3,Q4,Q4,Q4,Q4,Q4,
+ 0,0,0
+};
+
+static const quantizer_t q_2_2[128] = {
+ Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,
+ Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,
+ Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,
+ Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,
+ Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,Q0,Q1,Q2,Q3,Q4,
+ 0,0,0
+};
+
+#undef Q0
+#undef Q1
+#undef Q2
+#undef Q3
+#undef Q4
+
+static const quantizer_t q_3[8] = {
+ Q (-6/7), Q (-4/7), Q (-2/7), Q (0), Q (2/7), Q (4/7), Q (6/7), 0
+};
+
+#define Q0 Q (-10/11)
+#define Q1 Q (-8/11)
+#define Q2 Q (-6/11)
+#define Q3 Q (-4/11)
+#define Q4 Q (-2/11)
+#define Q5 Q (0)
+#define Q6 Q (2/11)
+#define Q7 Q (4/11)
+#define Q8 Q (6/11)
+#define Q9 Q (8/11)
+#define QA Q (10/11)
+
+static const quantizer_t q_4_0[128] = {
+ Q0, Q0, Q0, Q0, Q0, Q0, Q0, Q0, Q0, Q0, Q0,
+ Q1, Q1, Q1, Q1, Q1, Q1, Q1, Q1, Q1, Q1, Q1,
+ Q2, Q2, Q2, Q2, Q2, Q2, Q2, Q2, Q2, Q2, Q2,
+ Q3, Q3, Q3, Q3, Q3, Q3, Q3, Q3, Q3, Q3, Q3,
+ Q4, Q4, Q4, Q4, Q4, Q4, Q4, Q4, Q4, Q4, Q4,
+ Q5, Q5, Q5, Q5, Q5, Q5, Q5, Q5, Q5, Q5, Q5,
+ Q6, Q6, Q6, Q6, Q6, Q6, Q6, Q6, Q6, Q6, Q6,
+ Q7, Q7, Q7, Q7, Q7, Q7, Q7, Q7, Q7, Q7, Q7,
+ Q8, Q8, Q8, Q8, Q8, Q8, Q8, Q8, Q8, Q8, Q8,
+ Q9, Q9, Q9, Q9, Q9, Q9, Q9, Q9, Q9, Q9, Q9,
+ QA, QA, QA, QA, QA, QA, QA, QA, QA, QA, QA,
+ 0, 0, 0, 0, 0, 0, 0
+};
+
+static const quantizer_t q_4_1[128] = {
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, QA,
+ 0, 0, 0, 0, 0, 0, 0
+};
+
+#undef Q0
+#undef Q1
+#undef Q2
+#undef Q3
+#undef Q4
+#undef Q5
+#undef Q6
+#undef Q7
+#undef Q8
+#undef Q9
+#undef QA
+
+static const quantizer_t q_5[16] = {
+ Q (-14/15), Q (-12/15), Q (-10/15), Q (-8/15), Q (-6/15),
+ Q (-4/15), Q (-2/15), Q (0), Q (2/15), Q (4/15),
+ Q (6/15), Q (8/15), Q (10/15), Q (12/15), Q (14/15), 0
+};
+
+#ifndef LIBA52_FIXED
+static const sample_t scale_factor[25] = {
+ 0.000030517578125,
+ 0.0000152587890625,
+ 0.00000762939453125,
+ 0.000003814697265625,
+ 0.0000019073486328125,
+ 0.00000095367431640625,
+ 0.000000476837158203125,
+ 0.0000002384185791015625,
+ 0.00000011920928955078125,
+ 0.000000059604644775390625,
+ 0.0000000298023223876953125,
+ 0.00000001490116119384765625,
+ 0.000000007450580596923828125,
+ 0.0000000037252902984619140625,
+ 0.00000000186264514923095703125,
+ 0.000000000931322574615478515625,
+ 0.0000000004656612873077392578125,
+ 0.00000000023283064365386962890625,
+ 0.000000000116415321826934814453125,
+ 0.0000000000582076609134674072265625,
+ 0.00000000002910383045673370361328125,
+ 0.000000000014551915228366851806640625,
+ 0.0000000000072759576141834259033203125,
+ 0.00000000000363797880709171295166015625,
+ 0.000000000001818989403545856475830078125
+};
+#endif
+
+static const uint16_t dither_lut[256] = {
+ 0x0000, 0xa011, 0xe033, 0x4022, 0x6077, 0xc066, 0x8044, 0x2055,
+ 0xc0ee, 0x60ff, 0x20dd, 0x80cc, 0xa099, 0x0088, 0x40aa, 0xe0bb,
+ 0x21cd, 0x81dc, 0xc1fe, 0x61ef, 0x41ba, 0xe1ab, 0xa189, 0x0198,
+ 0xe123, 0x4132, 0x0110, 0xa101, 0x8154, 0x2145, 0x6167, 0xc176,
+ 0x439a, 0xe38b, 0xa3a9, 0x03b8, 0x23ed, 0x83fc, 0xc3de, 0x63cf,
+ 0x8374, 0x2365, 0x6347, 0xc356, 0xe303, 0x4312, 0x0330, 0xa321,
+ 0x6257, 0xc246, 0x8264, 0x2275, 0x0220, 0xa231, 0xe213, 0x4202,
+ 0xa2b9, 0x02a8, 0x428a, 0xe29b, 0xc2ce, 0x62df, 0x22fd, 0x82ec,
+ 0x8734, 0x2725, 0x6707, 0xc716, 0xe743, 0x4752, 0x0770, 0xa761,
+ 0x47da, 0xe7cb, 0xa7e9, 0x07f8, 0x27ad, 0x87bc, 0xc79e, 0x678f,
+ 0xa6f9, 0x06e8, 0x46ca, 0xe6db, 0xc68e, 0x669f, 0x26bd, 0x86ac,
+ 0x6617, 0xc606, 0x8624, 0x2635, 0x0660, 0xa671, 0xe653, 0x4642,
+ 0xc4ae, 0x64bf, 0x249d, 0x848c, 0xa4d9, 0x04c8, 0x44ea, 0xe4fb,
+ 0x0440, 0xa451, 0xe473, 0x4462, 0x6437, 0xc426, 0x8404, 0x2415,
+ 0xe563, 0x4572, 0x0550, 0xa541, 0x8514, 0x2505, 0x6527, 0xc536,
+ 0x258d, 0x859c, 0xc5be, 0x65af, 0x45fa, 0xe5eb, 0xa5c9, 0x05d8,
+ 0xae79, 0x0e68, 0x4e4a, 0xee5b, 0xce0e, 0x6e1f, 0x2e3d, 0x8e2c,
+ 0x6e97, 0xce86, 0x8ea4, 0x2eb5, 0x0ee0, 0xaef1, 0xeed3, 0x4ec2,
+ 0x8fb4, 0x2fa5, 0x6f87, 0xcf96, 0xefc3, 0x4fd2, 0x0ff0, 0xafe1,
+ 0x4f5a, 0xef4b, 0xaf69, 0x0f78, 0x2f2d, 0x8f3c, 0xcf1e, 0x6f0f,
+ 0xede3, 0x4df2, 0x0dd0, 0xadc1, 0x8d94, 0x2d85, 0x6da7, 0xcdb6,
+ 0x2d0d, 0x8d1c, 0xcd3e, 0x6d2f, 0x4d7a, 0xed6b, 0xad49, 0x0d58,
+ 0xcc2e, 0x6c3f, 0x2c1d, 0x8c0c, 0xac59, 0x0c48, 0x4c6a, 0xec7b,
+ 0x0cc0, 0xacd1, 0xecf3, 0x4ce2, 0x6cb7, 0xcca6, 0x8c84, 0x2c95,
+ 0x294d, 0x895c, 0xc97e, 0x696f, 0x493a, 0xe92b, 0xa909, 0x0918,
+ 0xe9a3, 0x49b2, 0x0990, 0xa981, 0x89d4, 0x29c5, 0x69e7, 0xc9f6,
+ 0x0880, 0xa891, 0xe8b3, 0x48a2, 0x68f7, 0xc8e6, 0x88c4, 0x28d5,
+ 0xc86e, 0x687f, 0x285d, 0x884c, 0xa819, 0x0808, 0x482a, 0xe83b,
+ 0x6ad7, 0xcac6, 0x8ae4, 0x2af5, 0x0aa0, 0xaab1, 0xea93, 0x4a82,
+ 0xaa39, 0x0a28, 0x4a0a, 0xea1b, 0xca4e, 0x6a5f, 0x2a7d, 0x8a6c,
+ 0x4b1a, 0xeb0b, 0xab29, 0x0b38, 0x2b6d, 0x8b7c, 0xcb5e, 0x6b4f,
+ 0x8bf4, 0x2be5, 0x6bc7, 0xcbd6, 0xeb83, 0x4b92, 0x0bb0, 0xaba1
+};
diff --git a/contrib/ffmpeg/libavcodec/libgsm.c b/contrib/ffmpeg/libavcodec/libgsm.c
new file mode 100644
index 000000000..7cf59be7a
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/libgsm.c
@@ -0,0 +1,97 @@
+/*
+ * Interface to libgsm for gsm encoding/decoding
+ * Copyright (c) 2005 Alban Bedel <albeu@free.fr>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file libgsm.c
+ * Interface to libgsm for gsm encoding/decoding
+ */
+
+#include "avcodec.h"
+#include <gsm.h>
+
+// gsm.h miss some essential constants
+#define GSM_BLOCK_SIZE 33
+#define GSM_FRAME_SIZE 160
+
+static int libgsm_init(AVCodecContext *avctx) {
+ if (avctx->channels > 1 || avctx->sample_rate != 8000)
+ return -1;
+
+ avctx->frame_size = GSM_FRAME_SIZE;
+ avctx->block_align = GSM_BLOCK_SIZE;
+
+ avctx->priv_data = gsm_create();
+
+ avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame->key_frame= 1;
+
+ return 0;
+}
+
+static int libgsm_close(AVCodecContext *avctx) {
+ gsm_destroy(avctx->priv_data);
+ avctx->priv_data = NULL;
+ return 0;
+}
+
+static int libgsm_encode_frame(AVCodecContext *avctx,
+ unsigned char *frame, int buf_size, void *data) {
+ // we need a full block
+ if(buf_size < GSM_BLOCK_SIZE) return 0;
+
+ gsm_encode(avctx->priv_data,data,frame);
+
+ return GSM_BLOCK_SIZE;
+}
+
+
+AVCodec libgsm_encoder = {
+ "gsm",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_GSM,
+ 0,
+ libgsm_init,
+ libgsm_encode_frame,
+ libgsm_close,
+};
+
+static int libgsm_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size) {
+
+ if(buf_size < GSM_BLOCK_SIZE) return 0;
+
+ if(gsm_decode(avctx->priv_data,buf,data)) return -1;
+
+ *data_size = GSM_FRAME_SIZE*2;
+ return GSM_BLOCK_SIZE;
+}
+
+AVCodec libgsm_decoder = {
+ "gsm",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_GSM,
+ 0,
+ libgsm_init,
+ NULL,
+ libgsm_close,
+ libgsm_decode_frame,
+};
diff --git a/src/libffmpeg/libavcodec/loco.c b/contrib/ffmpeg/libavcodec/loco.c
index 37f141821..2ec850ed0 100644
--- a/src/libffmpeg/libavcodec/loco.c
+++ b/contrib/ffmpeg/libavcodec/loco.c
@@ -2,18 +2,20 @@
* LOCO codec
* Copyright (c) 2005 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/lzo.c b/contrib/ffmpeg/libavcodec/lzo.c
index d9b42f848..015c80d0d 100644
--- a/src/libffmpeg/libavcodec/lzo.c
+++ b/contrib/ffmpeg/libavcodec/lzo.c
@@ -2,18 +2,20 @@
* LZO 1x decompression
* Copyright (c) 2006 Reimar Doeffinger
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "common.h"
@@ -164,9 +166,9 @@ int lzo1x_decode(void *out, int *outlen, void *in, int *inlen) {
int x;
LZOContext c;
c.in = in;
- c.in_end = in + *inlen;
+ c.in_end = (uint8_t *)in + *inlen;
c.out = c.out_start = out;
- c.out_end = out + * outlen;
+ c.out_end = (uint8_t *)out + * outlen;
c.error = 0;
x = get_byte(&c);
if (x > 17) {
diff --git a/contrib/ffmpeg/libavcodec/lzo.h b/contrib/ffmpeg/libavcodec/lzo.h
new file mode 100644
index 000000000..4d00dd721
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/lzo.h
@@ -0,0 +1,35 @@
+/*
+ * LZO 1x decompression
+ * copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _LZO_H
+#define LZO_H
+
+#define LZO_INPUT_DEPLETED 1
+#define LZO_OUTPUT_FULL 2
+#define LZO_INVALID_BACKPTR 4
+#define LZO_ERROR 8
+
+#define LZO_INPUT_PADDING 4
+#define LZO_OUTPUT_PADDING 12
+
+int lzo1x_decode(void *out, int *outlen, void *in, int *inlen);
+
+#endif
diff --git a/contrib/ffmpeg/libavcodec/lzw.c b/contrib/ffmpeg/libavcodec/lzw.c
new file mode 100644
index 000000000..b5bb33a21
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/lzw.c
@@ -0,0 +1,262 @@
+/*
+ * LZW decoder
+ * Copyright (c) 2003 Fabrice Bellard.
+ * Copyright (c) 2006 Konstantin Shishkov.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file lzw.c
+ * @brief LZW decoding routines
+ * @author Fabrice Bellard
+ * Modified for use in TIFF by Konstantin Shishkov
+ */
+
+#include "avcodec.h"
+#include "lzw.h"
+
+#define LZW_MAXBITS 12
+#define LZW_SIZTABLE (1<<LZW_MAXBITS)
+
+static const uint16_t mask[17] =
+{
+ 0x0000, 0x0001, 0x0003, 0x0007,
+ 0x000F, 0x001F, 0x003F, 0x007F,
+ 0x00FF, 0x01FF, 0x03FF, 0x07FF,
+ 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF
+};
+
+struct LZWState {
+ int eob_reached;
+ uint8_t *pbuf, *ebuf;
+ int bbits;
+ unsigned int bbuf;
+
+ int mode; ///< Decoder mode
+ int cursize; ///< The current code size
+ int curmask;
+ int codesize;
+ int clear_code;
+ int end_code;
+ int newcodes; ///< First available code
+ int top_slot; ///< Highest code for current size
+ int top_slot2; ///< Highest possible code for current size (<=top_slot)
+ int slot; ///< Last read code
+ int fc, oc;
+ uint8_t *sp;
+ uint8_t stack[LZW_SIZTABLE];
+ uint8_t suffix[LZW_SIZTABLE];
+ uint16_t prefix[LZW_SIZTABLE];
+ int bs; ///< current buffer size for GIF
+};
+
+/* get one code from stream */
+static int lzw_get_code(struct LZWState * s)
+{
+ int c, sizbuf;
+
+ if(s->mode == FF_LZW_GIF) {
+ while (s->bbits < s->cursize) {
+ if (!s->bs) {
+ sizbuf = *s->pbuf++;
+ s->bs = sizbuf;
+ if(!sizbuf) {
+ s->eob_reached = 1;
+ }
+ }
+ s->bbuf |= (*s->pbuf++) << s->bbits;
+ s->bbits += 8;
+ s->bs--;
+ }
+ c = s->bbuf & s->curmask;
+ s->bbuf >>= s->cursize;
+ } else { // TIFF
+ while (s->bbits < s->cursize) {
+ if (s->pbuf >= s->ebuf) {
+ s->eob_reached = 1;
+ }
+ s->bbuf = (s->bbuf << 8) | (*s->pbuf++);
+ s->bbits += 8;
+ }
+ c = (s->bbuf >> (s->bbits - s->cursize)) & s->curmask;
+ }
+ s->bbits -= s->cursize;
+ return c;
+}
+
+uint8_t* ff_lzw_cur_ptr(LZWState *p)
+{
+ return ((struct LZWState*)p)->pbuf;
+}
+
+void ff_lzw_decode_tail(LZWState *p)
+{
+ struct LZWState *s = (struct LZWState *)p;
+ while(!s->eob_reached)
+ lzw_get_code(s);
+}
+
+void ff_lzw_decode_open(LZWState **p)
+{
+ *p = av_mallocz(sizeof(struct LZWState));
+}
+
+void ff_lzw_decode_close(LZWState **p)
+{
+ av_freep(p);
+}
+
+/**
+ * Initialize LZW decoder
+ * @param s LZW context
+ * @param csize initial code size in bits
+ * @param buf input data
+ * @param buf_size input data size
+ * @param mode decoder working mode - either GIF or TIFF
+ */
+int ff_lzw_decode_init(LZWState *p, int csize, uint8_t *buf, int buf_size, int mode)
+{
+ struct LZWState *s = (struct LZWState *)p;
+
+ if(csize < 1 || csize > LZW_MAXBITS)
+ return -1;
+ /* read buffer */
+ s->eob_reached = 0;
+ s->pbuf = buf;
+ s->ebuf = s->pbuf + buf_size;
+ s->bbuf = 0;
+ s->bbits = 0;
+ s->bs = 0;
+
+ /* decoder */
+ s->codesize = csize;
+ s->cursize = s->codesize + 1;
+ s->curmask = mask[s->cursize];
+ s->top_slot = 1 << s->cursize;
+ s->clear_code = 1 << s->codesize;
+ s->end_code = s->clear_code + 1;
+ s->slot = s->newcodes = s->clear_code + 2;
+ s->oc = s->fc = 0;
+ s->sp = s->stack;
+
+ s->mode = mode;
+ switch(s->mode){
+ case FF_LZW_GIF:
+ s->top_slot2 = s->top_slot;
+ break;
+ case FF_LZW_TIFF:
+ s->top_slot2 = s->top_slot - 1;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Decode given number of bytes
+ * NOTE: the algorithm here is inspired from the LZW GIF decoder
+ * written by Steven A. Bennett in 1987.
+ *
+ * @param s LZW context
+ * @param buf output buffer
+ * @param len number of bytes to decode
+ * @return number of bytes decoded
+ */
+int ff_lzw_decode(LZWState *p, uint8_t *buf, int len){
+ int l, c, code, oc, fc;
+ uint8_t *sp;
+ struct LZWState *s = (struct LZWState *)p;
+
+ if (s->end_code < 0)
+ return 0;
+
+ l = len;
+ sp = s->sp;
+ oc = s->oc;
+ fc = s->fc;
+
+ while (sp > s->stack) {
+ *buf++ = *(--sp);
+ if ((--l) == 0)
+ goto the_end;
+ }
+
+ for (;;) {
+ c = lzw_get_code(s);
+ if (c == s->end_code) {
+ s->end_code = -1;
+ break;
+ } else if (c == s->clear_code) {
+ s->cursize = s->codesize + 1;
+ s->curmask = mask[s->cursize];
+ s->slot = s->newcodes;
+ s->top_slot = 1 << s->cursize;
+ s->top_slot2 = s->top_slot;
+ if(s->mode == FF_LZW_TIFF)
+ s->top_slot2--;
+ while ((c = lzw_get_code(s)) == s->clear_code);
+ if (c == s->end_code) {
+ s->end_code = -1;
+ break;
+ }
+ /* test error */
+ if (c >= s->slot)
+ c = 0;
+ fc = oc = c;
+ *buf++ = c;
+ if ((--l) == 0)
+ break;
+ } else {
+ code = c;
+ if (code >= s->slot) {
+ *sp++ = fc;
+ code = oc;
+ }
+ while (code >= s->newcodes) {
+ *sp++ = s->suffix[code];
+ code = s->prefix[code];
+ }
+ *sp++ = code;
+ if (s->slot < s->top_slot) {
+ s->suffix[s->slot] = fc = code;
+ s->prefix[s->slot++] = oc;
+ oc = c;
+ }
+ if (s->slot >= s->top_slot2) {
+ if (s->cursize < LZW_MAXBITS) {
+ s->top_slot <<= 1;
+ s->top_slot2 = s->top_slot;
+ if(s->mode == FF_LZW_TIFF)
+ s->top_slot2--;
+ s->curmask = mask[++s->cursize];
+ }
+ }
+ while (sp > s->stack) {
+ *buf++ = *(--sp);
+ if ((--l) == 0)
+ goto the_end;
+ }
+ }
+ }
+ the_end:
+ s->sp = sp;
+ s->oc = oc;
+ s->fc = fc;
+ return len - l;
+}
diff --git a/contrib/ffmpeg/libavcodec/lzw.h b/contrib/ffmpeg/libavcodec/lzw.h
new file mode 100644
index 000000000..60f115caf
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/lzw.h
@@ -0,0 +1,49 @@
+/*
+ * LZW decoder
+ * Copyright (c) 2003 Fabrice Bellard.
+ * Copyright (c) 2006 Konstantin Shishkov.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file lzw.h
+ * @brief LZW decoding routines
+ * @author Fabrice Bellard
+ * Modified for use in TIFF by Konstantin Shishkov
+ */
+
+#ifndef LZW_H
+#define LZW_H
+
+enum FF_LZW_MODES{
+ FF_LZW_GIF,
+ FF_LZW_TIFF
+};
+
+/* clients should not know what LZWState is */
+typedef void LZWState;
+
+/* first two functions de/allocate memory for LZWState */
+void ff_lzw_decode_open(LZWState **p);
+void ff_lzw_decode_close(LZWState **p);
+int ff_lzw_decode_init(LZWState *s, int csize, uint8_t *buf, int buf_size, int mode);
+int ff_lzw_decode(LZWState *s, uint8_t *buf, int len);
+uint8_t* ff_lzw_cur_ptr(LZWState *lzw);
+void ff_lzw_decode_tail(LZWState *lzw);
+
+#endif
diff --git a/src/libffmpeg/libavcodec/mace.c b/contrib/ffmpeg/libavcodec/mace.c
index a104fb04e..95839379a 100644
--- a/src/libffmpeg/libavcodec/mace.c
+++ b/contrib/ffmpeg/libavcodec/mace.c
@@ -2,18 +2,20 @@
* MACE decoder
* Copyright (c) 2002 Laszlo Torok <torokl@alpha.dfmk.hu>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -410,18 +412,18 @@ static int mace_decode_frame(AVCodecContext *avctx,
#ifdef DEBUG
puts("mace_decode_frame[3]()");
#endif
- Exp1to3(c, buf, samples, buf_size / 2, avctx->channels, 1);
+ Exp1to3(c, buf, samples, buf_size / 2 / avctx->channels, avctx->channels, 1);
if (avctx->channels == 2)
- Exp1to3(c, buf, samples+1, buf_size / 2, 2, 2);
+ Exp1to3(c, buf, samples+1, buf_size / 2 / 2, 2, 2);
*data_size = 2 * 3 * buf_size;
break;
case CODEC_ID_MACE6:
#ifdef DEBUG
puts("mace_decode_frame[6]()");
#endif
- Exp1to6(c, buf, samples, buf_size, avctx->channels, 1);
+ Exp1to6(c, buf, samples, buf_size / avctx->channels, avctx->channels, 1);
if (avctx->channels == 2)
- Exp1to6(c, buf, samples+1, buf_size, 2, 2);
+ Exp1to6(c, buf, samples+1, buf_size / 2, 2, 2);
*data_size = 2 * 6 * buf_size;
break;
default:
diff --git a/contrib/ffmpeg/libavcodec/mathops.h b/contrib/ffmpeg/libavcodec/mathops.h
new file mode 100644
index 000000000..9ae34d71b
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/mathops.h
@@ -0,0 +1,69 @@
+/*
+ * simple math operations
+ * Copyright (c) 2001, 2002 Fabrice Bellard.
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef MATHOPS_H
+#define MATHOPS_H
+
+#ifdef ARCH_X86_32
+
+#include "i386/mathops.h"
+
+#elif defined(ARCH_ARMV4L)
+
+#include "armv4l/mathops.h"
+
+#elif defined(ARCH_PPC)
+
+#include "ppc/mathops.h"
+
+#endif
+
+/* generic implementation */
+
+#ifndef MULL
+# define MULL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS)
+#endif
+
+#ifndef MULH
+//gcc 3.4 creates an incredibly bloated mess out of this
+//# define MULH(a,b) (((int64_t)(a) * (int64_t)(b))>>32)
+
+static always_inline int MULH(int a, int b){
+ return ((int64_t)(a) * (int64_t)(b))>>32;
+}
+#endif
+
+#ifndef MUL64
+# define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
+#endif
+
+/* signed 16x16 -> 32 multiply add accumulate */
+#ifndef MAC16
+# define MAC16(rt, ra, rb) rt += (ra) * (rb)
+#endif
+
+/* signed 16x16 -> 32 multiply */
+#ifndef MUL16
+# define MUL16(ra, rb) ((ra) * (rb))
+#endif
+
+#endif //MATHOPS_H
+
diff --git a/src/libffmpeg/libavcodec/mdct.c b/contrib/ffmpeg/libavcodec/mdct.c
index 5c3e7b3b1..de3275289 100644
--- a/src/libffmpeg/libavcodec/mdct.c
+++ b/contrib/ffmpeg/libavcodec/mdct.c
@@ -2,18 +2,20 @@
* MDCT/IMDCT transforms
* Copyright (c) 2002 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dsputil.h"
diff --git a/src/libffmpeg/libavcodec/mdec.c b/contrib/ffmpeg/libavcodec/mdec.c
index 79caa24c1..ee43b2777 100644
--- a/src/libffmpeg/libavcodec/mdec.c
+++ b/contrib/ffmpeg/libavcodec/mdec.c
@@ -2,18 +2,20 @@
* PSX MDEC codec
* Copyright (c) 2003 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* based upon code from Sebastian Jedruszkiewicz <elf@frogger.rules.pl>
diff --git a/src/libffmpeg/libavcodec/mjpeg.c b/contrib/ffmpeg/libavcodec/mjpeg.c
index dffd98946..3d8383e7b 100644
--- a/src/libffmpeg/libavcodec/mjpeg.c
+++ b/contrib/ffmpeg/libavcodec/mjpeg.c
@@ -4,18 +4,20 @@
* Copyright (c) 2003 Alex Beregszaszi
* Copyright (c) 2003-2004 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Support for external huffman table, various fixes (AVID workaround),
@@ -34,6 +36,7 @@
#include "avcodec.h"
#include "dsputil.h"
#include "mpegvideo.h"
+#include "bytestream.h"
/* use two quantizer tables (one for luminance and one for chrominance) */
/* not yet working */
@@ -613,7 +616,7 @@ static void encode_block(MpegEncContext *s, DCTELEM *block, int n)
uint16_t *huff_code_ac;
/* DC coef */
- component = (n <= 3 ? 0 : n - 4 + 1);
+ component = (n <= 3 ? 0 : (n&1) + 1);
dc = block[0]; /* overflow is impossible */
val = dc - s->last_dc[component];
if (n < 4) {
@@ -666,9 +669,16 @@ void mjpeg_encode_mb(MpegEncContext *s,
DCTELEM block[6][64])
{
int i;
- for(i=0;i<6;i++) {
+ for(i=0;i<5;i++) {
encode_block(s, block[i], i);
}
+ if (s->chroma_format == CHROMA_420) {
+ encode_block(s, block[5], 5);
+ } else {
+ encode_block(s, block[6], 6);
+ encode_block(s, block[5], 5);
+ encode_block(s, block[7], 7);
+ }
}
static int encode_picture_lossless(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
@@ -845,6 +855,7 @@ typedef struct MJpegDecodeContext {
int bottom_field; /* true if bottom field */
int lossless;
int ls;
+ int progressive;
int rgb;
int rct; /* standard rct */
int pegasus_rct; /* pegasus reversible colorspace transform */
@@ -876,6 +887,7 @@ typedef struct MJpegDecodeContext {
DECLARE_ALIGNED_8(DCTELEM, block[64]);
ScanTable scantable;
void (*idct_put)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
+ void (*idct_add)(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
int restart_interval;
int restart_count;
@@ -932,6 +944,7 @@ static int mjpeg_decode_init(AVCodecContext *avctx)
s->scantable= s2.intra_scantable;
s->idct_put= s2.dsp.idct_put;
+ s->idct_add= s2.dsp.idct_add;
s->mpeg_enc_ctx_allocated = 0;
s->buffer_size = 0;
@@ -1103,7 +1116,7 @@ static int mjpeg_decode_dht(MJpegDecodeContext *s)
static int mjpeg_decode_sof(MJpegDecodeContext *s)
{
- int len, nb_components, i, width, height;
+ int len, nb_components, i, width, height, pix_fmt_id;
/* XXX: verify len field validity */
len = get_bits(&s->gb, 16);
@@ -1116,10 +1129,6 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s)
av_log(s->avctx, AV_LOG_ERROR, "only 8 bits/component accepted\n");
return -1;
}
- if (s->bits > 8 && s->ls){
- av_log(s->avctx, AV_LOG_ERROR, "only <= 8 bits/component accepted for JPEG-LS\n");
- return -1;
- }
height = get_bits(&s->gb, 16);
width = get_bits(&s->gb, 16);
@@ -1132,6 +1141,10 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s)
if (nb_components <= 0 ||
nb_components > MAX_COMPONENTS)
return -1;
+ if (s->ls && !(s->bits <= 8 || nb_components == 1)){
+ av_log(s->avctx, AV_LOG_ERROR, "only <= 8 bits/component or 16-bit gray accepted for JPEG-LS\n");
+ return -1;
+ }
s->nb_components = nb_components;
s->h_max = 1;
s->v_max = 1;
@@ -1188,8 +1201,13 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s)
return 0;
/* XXX: not complete test ! */
- switch((s->h_count[0] << 4) | s->v_count[0]) {
- case 0x11:
+ pix_fmt_id = (s->h_count[0] << 20) | (s->v_count[0] << 16) |
+ (s->h_count[1] << 12) | (s->v_count[1] << 8) |
+ (s->h_count[2] << 4) | s->v_count[2];
+ dprintf("pix fmt id %x\n", pix_fmt_id);
+ switch(pix_fmt_id){
+ case 0x222222:
+ case 0x111111:
if(s->rgb){
s->avctx->pix_fmt = PIX_FMT_RGBA32;
}else if(s->nb_components==3)
@@ -1197,19 +1215,22 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s)
else
s->avctx->pix_fmt = PIX_FMT_GRAY8;
break;
- case 0x21:
+ case 0x211111:
+ case 0x221212:
s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV422P : PIX_FMT_YUVJ422P;
break;
default:
- case 0x22:
+ case 0x221111:
s->avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV420P : PIX_FMT_YUVJ420P;
break;
}
if(s->ls){
if(s->nb_components > 1)
s->avctx->pix_fmt = PIX_FMT_RGB24;
- else
+ else if(s->bits <= 8)
s->avctx->pix_fmt = PIX_FMT_GRAY8;
+ else
+ s->avctx->pix_fmt = PIX_FMT_GRAY16;
}
if(s->picture.data[0])
@@ -1234,6 +1255,12 @@ static int mjpeg_decode_sof(MJpegDecodeContext *s)
dprintf("decode_sof0: error, len(%d) mismatch\n", len);
}
+ /* totally blank picture as progressive JPEG will only add details to it */
+ if(s->progressive){
+ memset(s->picture.data[0], 0, s->picture.linesize[0] * s->height);
+ memset(s->picture.data[1], 0, s->picture.linesize[1] * s->height >> (s->v_max - s->v_count[1]));
+ memset(s->picture.data[2], 0, s->picture.linesize[2] * s->height >> (s->v_max - s->v_count[2]));
+ }
return 0;
}
@@ -1311,6 +1338,83 @@ static int decode_block(MJpegDecodeContext *s, DCTELEM *block,
return 0;
}
+/* decode block and dequantize - progressive JPEG version */
+static int decode_block_progressive(MJpegDecodeContext *s, DCTELEM *block,
+ int component, int dc_index, int ac_index, int16_t *quant_matrix,
+ int ss, int se, int Ah, int Al, int *EOBRUN)
+{
+ int code, i, j, level, val, run;
+
+ /* DC coef */
+ if(!ss){
+ val = mjpeg_decode_dc(s, dc_index);
+ if (val == 0xffff) {
+ dprintf("error dc\n");
+ return -1;
+ }
+ val = (val * quant_matrix[0] << Al) + s->last_dc[component];
+ }else
+ val = 0;
+ s->last_dc[component] = val;
+ block[0] = val;
+ if(!se) return 0;
+ /* AC coefs */
+ if(*EOBRUN){
+ (*EOBRUN)--;
+ return 0;
+ }
+ {OPEN_READER(re, &s->gb)
+ for(i=ss;;i++) {
+ UPDATE_CACHE(re, &s->gb);
+ GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2)
+ /* Progressive JPEG use AC coeffs from zero and this decoder sets offset 16 by default */
+ code -= 16;
+ if(code & 0xF) {
+ i += ((unsigned) code) >> 4;
+ code &= 0xf;
+ if(code > MIN_CACHE_BITS - 16){
+ UPDATE_CACHE(re, &s->gb)
+ }
+ {
+ int cache=GET_CACHE(re,&s->gb);
+ int sign=(~cache)>>31;
+ level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
+ }
+
+ LAST_SKIP_BITS(re, &s->gb, code)
+
+ if (i >= se) {
+ if(i == se){
+ j = s->scantable.permutated[se];
+ block[j] = level * quant_matrix[j] << Al;
+ break;
+ }
+ dprintf("error count: %d\n", i);
+ return -1;
+ }
+ j = s->scantable.permutated[i];
+ block[j] = level * quant_matrix[j] << Al;
+ }else{
+ run = ((unsigned) code) >> 4;
+ if(run == 0xF){// ZRL - skip 15 coefficients
+ i += 15;
+ }else{
+ val = run;
+ run = (1 << run);
+ UPDATE_CACHE(re, &s->gb);
+ run += (GET_CACHE(re, &s->gb) >> (32 - val)) & (run - 1);
+ if(val)
+ LAST_SKIP_BITS(re, &s->gb, val);
+ *EOBRUN = run - 1;
+ break;
+ }
+ }
+ }
+ CLOSE_READER(re, &s->gb)}
+
+ return 0;
+}
+
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int predictor, int point_transform){
int i, mb_x, mb_y;
uint16_t buffer[32768][4];
@@ -1462,10 +1566,11 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point
return 0;
}
-static int mjpeg_decode_scan(MJpegDecodeContext *s){
+static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int ss, int se, int Ah, int Al){
int i, mb_x, mb_y;
- const int nb_components=3;
+ int EOBRUN = 0;
+ if(Ah) return 0; /* TODO decode refinement planes too */
for(mb_y = 0; mb_y < s->mb_height; mb_y++) {
for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
if (s->restart_interval && !s->restart_count)
@@ -1482,12 +1587,18 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s){
y = 0;
for(j=0;j<n;j++) {
memset(s->block, 0, sizeof(s->block));
- if (decode_block(s, s->block, i,
+ if (!s->progressive && decode_block(s, s->block, i,
s->dc_index[i], s->ac_index[i],
s->quant_matrixes[ s->quant_index[c] ]) < 0) {
dprintf("error y=%d x=%d\n", mb_y, mb_x);
return -1;
}
+ if (s->progressive && decode_block_progressive(s, s->block, i,
+ s->dc_index[i], s->ac_index[i],
+ s->quant_matrixes[ s->quant_index[c] ], ss, se, Ah, Al, &EOBRUN) < 0) {
+ dprintf("error y=%d x=%d\n", mb_y, mb_x);
+ return -1;
+ }
// dprintf("mb: %d %d processed\n", mb_y, mb_x);
ptr = s->picture.data[c] +
(((s->linesize[c] * (v * mb_y + y) * 8) +
@@ -1495,7 +1606,10 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s){
if (s->interlaced && s->bottom_field)
ptr += s->linesize[c] >> 1;
//av_log(NULL, AV_LOG_DEBUG, "%d %d %d %d %d %d %d %d \n", mb_x, mb_y, x, y, c, s->bottom_field, (v * mb_y + y) * 8, (h * mb_x + x) * 8);
- s->idct_put(ptr, s->linesize[c], s->block);
+ if(!s->progressive)
+ s->idct_put(ptr, s->linesize[c], s->block);
+ else
+ s->idct_add(ptr, s->linesize[c], s->block);
if (++x == h) {
x = 0;
y++;
@@ -1520,7 +1634,7 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
int len, nb_components, i, h, v, predictor, point_transform;
int vmax, hmax, index, id;
const int block_size= s->lossless ? 1 : 8;
- int ilv;
+ int ilv, prev_shift;
/* XXX: verify len field validity */
len = get_bits(&s->gb, 16);
@@ -1530,12 +1644,6 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
dprintf("decode_sos: invalid len (%d)\n", len);
return -1;
}
- /* XXX: only interleaved scan accepted */
- if ((nb_components != s->nb_components) && !s->ls)
- {
- dprintf("decode_sos: components(%d) mismatch\n", nb_components);
- return -1;
- }
vmax = 0;
hmax = 0;
for(i=0;i<nb_components;i++) {
@@ -1585,7 +1693,7 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
predictor= get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
ilv= get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
- skip_bits(&s->gb, 4); /* Ah */
+ prev_shift = get_bits(&s->gb, 4); /* Ah */
point_transform= get_bits(&s->gb, 4); /* Al */
for(i=0;i<nb_components;i++)
@@ -1596,8 +1704,8 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
} else if(!s->ls) { /* skip this for JPEG-LS */
- h = s->h_max / s->h_scount[s->comp_index[0]];
- v = s->v_max / s->v_scount[s->comp_index[0]];
+ h = s->h_max / s->h_scount[0];
+ v = s->v_max / s->v_scount[0];
s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
s->nb_blocks[0] = 1;
@@ -1631,7 +1739,7 @@ static int mjpeg_decode_sos(MJpegDecodeContext *s)
}
}
}else{
- if(mjpeg_decode_scan(s) < 0)
+ if(mjpeg_decode_scan(s, nb_components, predictor, ilv, prev_shift, point_transform) < 0)
return -1;
}
emms_c();
@@ -1801,7 +1909,7 @@ static int mjpeg_decode_com(MJpegDecodeContext *s)
{
int len = get_bits(&s->gb, 16);
if (len >= 2 && 8*len - 16 + get_bits_count(&s->gb) <= s->gb.size_in_bits) {
- uint8_t *cbuf = av_malloc(len - 1);
+ char *cbuf = av_malloc(len - 1);
if (cbuf) {
int i;
for (i = 0; i < len - 2; i++)
@@ -2020,17 +2128,26 @@ static int mjpeg_decode_frame(AVCodecContext *avctx,
break;
case SOF0:
s->lossless=0;
+ s->progressive=0;
+ if (mjpeg_decode_sof(s) < 0)
+ return -1;
+ break;
+ case SOF2:
+ s->lossless=0;
+ s->progressive=1;
if (mjpeg_decode_sof(s) < 0)
return -1;
break;
case SOF3:
s->lossless=1;
+ s->progressive=0;
if (mjpeg_decode_sof(s) < 0)
return -1;
break;
case SOF48:
s->lossless=1;
s->ls=1;
+ s->progressive=0;
if (mjpeg_decode_sof(s) < 0)
return -1;
break;
@@ -2039,6 +2156,7 @@ static int mjpeg_decode_frame(AVCodecContext *avctx,
return -1;
break;
case EOI:
+ s->cur_scan = 0;
if ((s->buggy_avid && !s->interlaced) || s->restart_interval)
break;
eoi_parser:
@@ -2076,7 +2194,6 @@ eoi_parser:
mjpeg_decode_dri(s);
break;
case SOF1:
- case SOF2:
case SOF5:
case SOF6:
case SOF7:
@@ -2387,6 +2504,61 @@ static int mjpeg_decode_end(AVCodecContext *avctx)
return 0;
}
+static int mjpega_dump_header(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe)
+{
+ uint8_t *poutbufp;
+ int i;
+
+ if (avctx->codec_id != CODEC_ID_MJPEG) {
+ av_log(avctx, AV_LOG_ERROR, "mjpega bitstream filter only applies to mjpeg codec\n");
+ return 0;
+ }
+
+ *poutbuf_size = 0;
+ *poutbuf = av_malloc(buf_size + 44 + FF_INPUT_BUFFER_PADDING_SIZE);
+ poutbufp = *poutbuf;
+ bytestream_put_byte(&poutbufp, 0xff);
+ bytestream_put_byte(&poutbufp, SOI);
+ bytestream_put_byte(&poutbufp, 0xff);
+ bytestream_put_byte(&poutbufp, APP1);
+ bytestream_put_be16(&poutbufp, 42); /* size */
+ bytestream_put_be32(&poutbufp, 0);
+ bytestream_put_buffer(&poutbufp, "mjpg", 4);
+ bytestream_put_be32(&poutbufp, buf_size + 44); /* field size */
+ bytestream_put_be32(&poutbufp, buf_size + 44); /* pad field size */
+ bytestream_put_be32(&poutbufp, 0); /* next ptr */
+
+ for (i = 0; i < buf_size - 1; i++) {
+ if (buf[i] == 0xff) {
+ switch (buf[i + 1]) {
+ case DQT: /* quant off */
+ case DHT: /* huff off */
+ case SOF0: /* image off */
+ bytestream_put_be32(&poutbufp, i + 46);
+ break;
+ case SOS:
+ bytestream_put_be32(&poutbufp, i + 46); /* scan off */
+ bytestream_put_be32(&poutbufp, i + 46 + BE_16(buf + i + 2)); /* data off */
+ bytestream_put_buffer(&poutbufp, buf + 2, buf_size - 2); /* skip already written SOI */
+ *poutbuf_size = poutbufp - *poutbuf;
+ return 1;
+ case APP1:
+ if (i + 8 < buf_size && LE_32(buf + i + 8) == ff_get_fourcc("mjpg")) {
+ av_log(avctx, AV_LOG_ERROR, "bitstream already formatted\n");
+ memcpy(*poutbuf, buf, buf_size);
+ *poutbuf_size = buf_size;
+ return 1;
+ }
+ }
+ }
+ }
+ av_freep(poutbuf);
+ av_log(avctx, AV_LOG_ERROR, "could not find SOS marker in bitstream\n");
+ return 0;
+}
+
AVCodec mjpeg_decoder = {
"mjpeg",
CODEC_TYPE_VIDEO,
@@ -2446,3 +2618,8 @@ AVCodecParser mjpeg_parser = {
ff_parse_close,
};
+AVBitStreamFilter mjpega_dump_header_bsf = {
+ "mjpegadump",
+ 0,
+ mjpega_dump_header,
+};
diff --git a/src/libffmpeg/libavcodec/mlib/dsputil_mlib.c b/contrib/ffmpeg/libavcodec/mlib/dsputil_mlib.c
index c52490592..b78a54e0e 100644
--- a/src/libffmpeg/libavcodec/mlib/dsputil_mlib.c
+++ b/contrib/ffmpeg/libavcodec/mlib/dsputil_mlib.c
@@ -2,26 +2,26 @@
* Sun mediaLib optimized DSP utils
* Copyright (c) 2001 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "../dsputil.h"
#include "../mpegvideo.h"
-#include "../../../xine-utils/xineutils.h"
-
#include <mlib_types.h>
#include <mlib_status.h>
#include <mlib_sys.h>
@@ -384,7 +384,7 @@ static void bswap_buf_mlib(uint32_t *dst, uint32_t *src, int w)
static void ff_idct_put_mlib(uint8_t *dest, int line_size, DCTELEM *data)
{
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
mlib_VideoIDCT8x8_S16_S16 (data, data);
@@ -421,7 +421,6 @@ static void ff_fdct_mlib(DCTELEM *data)
void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx)
{
- if (xine_mm_accel() & MM_ACCEL_MLIB) {
c->get_pixels = get_pixels_mlib;
c->diff_pixels = diff_pixels_mlib;
c->add_pixels_clamped = add_pixels_clamped_mlib;
@@ -448,12 +447,10 @@ void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx)
c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mlib;
c->bswap_buf = bswap_buf_mlib;
- }
}
void MPV_common_init_mlib(MpegEncContext *s)
{
- if (xine_mm_accel() & MM_ACCEL_MLIB) {
if(s->avctx->dct_algo==FF_DCT_AUTO || s->avctx->dct_algo==FF_DCT_MLIB){
s->dsp.fdct = ff_fdct_mlib;
}
@@ -464,5 +461,4 @@ void MPV_common_init_mlib(MpegEncContext *s)
s->dsp.idct = ff_idct_mlib;
s->dsp.idct_permutation_type= FF_NO_IDCT_PERM;
}
- }
}
diff --git a/src/libffmpeg/libavcodec/mmvideo.c b/contrib/ffmpeg/libavcodec/mmvideo.c
index 0cfae83de..07d3f3fc5 100644
--- a/src/libffmpeg/libavcodec/mmvideo.c
+++ b/contrib/ffmpeg/libavcodec/mmvideo.c
@@ -2,18 +2,20 @@
* American Laser Games MM Video Decoder
* Copyright (c) 2006 Peter Ross
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/motion_est.c b/contrib/ffmpeg/libavcodec/motion_est.c
index c587369f3..4dc17b99b 100644
--- a/src/libffmpeg/libavcodec/motion_est.c
+++ b/contrib/ffmpeg/libavcodec/motion_est.c
@@ -4,26 +4,25 @@
* Copyright (c) 2002-2004 Michael Niedermayer
*
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* new Motion Estimation (X1/EPZS) by Michael Niedermayer <michaelni@gmx.at>
*/
-/* motion estimation only needed for encoders */
-#ifdef CONFIG_ENCODERS
-
/**
* @file motion_est.c
* Motion estimation.
@@ -297,14 +296,14 @@ static int pix_dev(uint8_t * pix, int line_size, int mean)
s = 0;
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j += 8) {
- s += ABS(pix[0]-mean);
- s += ABS(pix[1]-mean);
- s += ABS(pix[2]-mean);
- s += ABS(pix[3]-mean);
- s += ABS(pix[4]-mean);
- s += ABS(pix[5]-mean);
- s += ABS(pix[6]-mean);
- s += ABS(pix[7]-mean);
+ s += FFABS(pix[0]-mean);
+ s += FFABS(pix[1]-mean);
+ s += FFABS(pix[2]-mean);
+ s += FFABS(pix[3]-mean);
+ s += FFABS(pix[4]-mean);
+ s += FFABS(pix[5]-mean);
+ s += FFABS(pix[6]-mean);
+ s += FFABS(pix[7]-mean);
pix += 8;
}
pix += line_size - 16;
@@ -1179,13 +1178,11 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
vard= check_input_motion(s, mb_x, mb_y, 1);
if((vard+128)>>8 < c->avctx->me_threshold){
+ int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
+ int i_score= varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*20;
pic->mc_mb_var[s->mb_stride * mb_y + mb_x] = (vard+128)>>8;
c->mc_mb_var_sum_temp += (vard+128)>>8;
- if (vard <= 64<<8 || vard < varc) { //FIXME
- c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
- }else{
- c->scene_change_score+= s->qscale * s->avctx->scenechange_factor;
- }
+ c->scene_change_score+= ff_sqrt(p_score) - ff_sqrt(i_score);
return;
}
if((vard+128)>>8 < c->avctx->mb_threshold)
@@ -1272,10 +1269,9 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
varc, s->avg_mb_var, sum, vard, mx - xx, my - yy);
#endif
if(mb_type){
- if (vard <= 64<<8 || vard < varc)
- c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
- else
- c->scene_change_score+= s->qscale * s->avctx->scenechange_factor;
+ int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
+ int i_score= varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*20;
+ c->scene_change_score+= ff_sqrt(p_score) - ff_sqrt(i_score);
if(mb_type == CANDIDATE_MB_TYPE_INTER){
c->sub_motion_search(s, &mx, &my, dmin, 0, 0, 0, 16);
@@ -1293,14 +1289,14 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
interlaced_search(s, 0, s->p_field_mv_table, s->p_field_select_table, mx, my, 1);
}
}else if(c->avctx->mb_decision > FF_MB_DECISION_SIMPLE){
- if (vard <= 64<<8 || vard < varc)
- c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
- else
- c->scene_change_score+= s->qscale * s->avctx->scenechange_factor;
+ int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
+ int i_score= varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*20;
+ c->scene_change_score+= ff_sqrt(p_score) - ff_sqrt(i_score);
if (vard*2 + 200*256 > varc)
mb_type|= CANDIDATE_MB_TYPE_INTRA;
- if (varc*2 + 200*256 > vard){
+ if (varc*2 + 200*256 > vard || s->qscale > 24){
+// if (varc*2 + 200*256 + 50*(s->lambda2>>FF_LAMBDA_SHIFT) > vard){
mb_type|= CANDIDATE_MB_TYPE_INTER;
c->sub_motion_search(s, &mx, &my, dmin, 0, 0, 0, 16);
if(s->flags&CODEC_FLAG_MV0)
@@ -1399,10 +1395,10 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
}else
s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= 0;
- if (vard <= 64<<8 || vard < varc) { //FIXME
- c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
- }else{
- c->scene_change_score+= s->qscale * s->avctx->scenechange_factor;
+ {
+ int p_score= FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
+ int i_score= varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*20;
+ c->scene_change_score+= ff_sqrt(p_score) - ff_sqrt(i_score);
}
}
@@ -1673,7 +1669,7 @@ static inline int bidir_refine(MpegEncContext * s, int mb_x, int mb_y)
}
#define CHECK_BIDIR2(a,b,c,d)\
CHECK_BIDIR(a,b,c,d)\
-CHECK_BIDIR(-a,-b,-c,-d)
+CHECK_BIDIR(-(a),-(b),-(c),-(d))
#define CHECK_BIDIRR(a,b,c,d)\
CHECK_BIDIR2(a,b,c,d)\
@@ -2116,5 +2112,3 @@ void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_
}
}
}
-
-#endif /* CONFIG_ENCODERS */
diff --git a/src/libffmpeg/libavcodec/motion_est_template.c b/contrib/ffmpeg/libavcodec/motion_est_template.c
index 16d34bb88..193a8b24e 100644
--- a/src/libffmpeg/libavcodec/motion_est_template.c
+++ b/contrib/ffmpeg/libavcodec/motion_est_template.c
@@ -2,18 +2,20 @@
* Motion estimation
* Copyright (c) 2002-2004 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -231,7 +233,7 @@ static int no_sub_motion_search(MpegEncContext * s,
return dmin;
}
-int inline ff_get_mb_score(MpegEncContext * s, int mx, int my, int src_index,
+inline int ff_get_mb_score(MpegEncContext * s, int mx, int my, int src_index,
int ref_index, int size, int h, int add_rate)
{
// const int check_luma= s->dsp.me_sub_cmp != s->dsp.mb_cmp;
@@ -515,11 +517,11 @@ static int qpel_motion_search(MpegEncContext * s,
#define CHECK_CLIPED_MV(ax,ay)\
{\
- const int x= ax;\
- const int y= ay;\
- const int x2= FFMAX(xmin, FFMIN(x, xmax));\
- const int y2= FFMAX(ymin, FFMIN(y, ymax));\
- CHECK_MV(x2, y2)\
+ const int Lx= ax;\
+ const int Ly= ay;\
+ const int Lx2= FFMAX(xmin, FFMIN(Lx, xmax));\
+ const int Ly2= FFMAX(ymin, FFMIN(Ly, ymax));\
+ CHECK_MV(Lx2, Ly2)\
}
#define CHECK_MV_DIR(x,y,new_dir)\
@@ -637,8 +639,8 @@ static int funny_diamond_search(MpegEncContext * s, int *best, int dmin,
{
int dx, dy, i;
static int stats[8*8];
-dx= ABS(x-best[0]);
-dy= ABS(y-best[1]);
+dx= FFABS(x-best[0]);
+dy= FFABS(y-best[1]);
if(dy>dx){
dx^=dy; dy^=dx; dx^=dy;
}
@@ -656,6 +658,70 @@ if(256*256*256*64 % (stats[0]+1)==0){
return dmin;
}
+static int umh_search(MpegEncContext * s, int *best, int dmin,
+ int src_index, int ref_index, int const penalty_factor,
+ int size, int h, int flags)
+{
+ MotionEstContext * const c= &s->me;
+ me_cmp_func cmpf, chroma_cmpf;
+ LOAD_COMMON
+ LOAD_COMMON2
+ int map_generation= c->map_generation;
+ int x,y,x2,y2, i, j, d;
+ static const int hex[16][2]={{-4,-2}, {-4,-1}, {-4, 0}, {-4, 1}, {-4, 2},
+ { 4,-2}, { 4,-1}, { 4, 0}, { 4, 1}, { 4, 2},
+ {-2, 3}, { 0, 4}, { 2, 3},
+ {-2,-3}, { 0,-4}, { 2,-3},};
+ static const int hex2[6][2]={{-2, 0}, { 2,0}, {-1,-2}, {1,-2}, {-1,2},{1,2}};
+
+ cmpf= s->dsp.me_cmp[size];
+ chroma_cmpf= s->dsp.me_cmp[size+1];
+
+ x= best[0];
+ y= best[1];
+ for(x2=FFMAX(x-15, xmin); x2<=FFMIN(x+15,xmax); x2+=2){
+ CHECK_MV(x2, y);
+ }
+ for(y2=FFMAX(y- 7, ymin); y2<=FFMIN(y+ 7,ymax); y2+=2){
+ CHECK_MV(x, y2);
+ }
+
+ x= best[0];
+ y= best[1];
+ for(y2=FFMAX(y-2, ymin); y2<=FFMIN(y+2,ymax); y2++){
+ for(x2=FFMAX(x-2, xmin); x2<=FFMIN(x+2,xmax); x2++){
+ CHECK_MV(x2, y2);
+ }
+ }
+
+//FIXME prevent the CLIP stuff
+
+ for(j=1; j<=4; j++){
+ for(i=0; i<16; i++){
+ CHECK_CLIPED_MV(x+hex[i][0]*j, y+hex[i][1]*j);
+ }
+ }
+
+ do{
+ x= best[0];
+ y= best[1];
+ for(i=0; i<6; i++){
+ CHECK_CLIPED_MV(x+hex2[i][0], y+hex2[i][1]);
+ }
+ }while(best[0] != x || best[1] != y);
+
+ do{
+ x= best[0];
+ y= best[1];
+ CHECK_CLIPED_MV(x+1, y);
+ CHECK_CLIPED_MV(x, y+1);
+ CHECK_CLIPED_MV(x-1, y);
+ CHECK_CLIPED_MV(x, y-1);
+ }while(best[0] != x || best[1] != y);
+
+ return dmin;
+}
+
#define SAB_CHECK_MV(ax,ay)\
{\
const int key= ((ay)<<ME_MAP_MV_BITS) + (ax) + map_generation;\
@@ -693,7 +759,7 @@ static int sab_diamond_search(MpegEncContext * s, int *best, int dmin,
MotionEstContext * const c= &s->me;
me_cmp_func cmpf, chroma_cmpf;
Minima minima[MAX_SAB_SIZE];
- const int minima_count= ABS(c->dia_size);
+ const int minima_count= FFABS(c->dia_size);
int i, j;
LOAD_COMMON
LOAD_COMMON2
@@ -827,8 +893,8 @@ static int var_diamond_search(MpegEncContext * s, int *best, int dmin,
{
int dx, dy, i;
static int stats[8*8];
-dx= ABS(x-best[0]);
-dy= ABS(y-best[1]);
+dx= FFABS(x-best[0]);
+dy= FFABS(y-best[1]);
stats[dy*8 + dx] ++;
if(256*256*256*64 % (stats[0]+1)==0){
for(i=0; i<64; i++){
@@ -848,7 +914,7 @@ static always_inline int diamond_search(MpegEncContext * s, int *best, int dmin,
int size, int h, int flags){
MotionEstContext * const c= &s->me;
if(c->dia_size==-1)
- return funny_diamond_search(s, best, dmin, src_index, ref_index, penalty_factor, size, h, flags);
+ return umh_search(s, best, dmin, src_index, ref_index, penalty_factor, size, h, flags);
else if(c->dia_size<-1)
return sab_diamond_search(s, best, dmin, src_index, ref_index, penalty_factor, size, h, flags);
else if(c->dia_size<2)
@@ -905,14 +971,16 @@ static always_inline int epzs_motion_search_internal(MpegEncContext * s, int *mx
c->skip=1;
return dmin;
}
- CHECK_MV(P_MEDIAN[0]>>shift, P_MEDIAN[1]>>shift)
- if(dmin>h*h*2){
+ CHECK_MV( P_MEDIAN[0] >>shift , P_MEDIAN[1] >>shift)
+ CHECK_CLIPED_MV((P_MEDIAN[0]>>shift) , (P_MEDIAN[1]>>shift)-1)
+ CHECK_CLIPED_MV((P_MEDIAN[0]>>shift) , (P_MEDIAN[1]>>shift)+1)
+ CHECK_CLIPED_MV((P_MEDIAN[0]>>shift)-1, (P_MEDIAN[1]>>shift) )
+ CHECK_CLIPED_MV((P_MEDIAN[0]>>shift)+1, (P_MEDIAN[1]>>shift) )
CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16)
CHECK_MV(P_LEFT[0] >>shift, P_LEFT[1] >>shift)
CHECK_MV(P_TOP[0] >>shift, P_TOP[1] >>shift)
CHECK_MV(P_TOPRIGHT[0]>>shift, P_TOPRIGHT[1]>>shift)
- }
}
if(dmin>h*h*4){
if(c->pre_pass){
@@ -1013,14 +1081,12 @@ static int epzs_motion_search4(MpegEncContext * s,
}else{
CHECK_MV(P_MV1[0]>>shift, P_MV1[1]>>shift)
//FIXME try some early stop
- if(dmin>64*2){
CHECK_MV(P_MEDIAN[0]>>shift, P_MEDIAN[1]>>shift)
CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift)
CHECK_MV(P_TOP[0]>>shift, P_TOP[1]>>shift)
CHECK_MV(P_TOPRIGHT[0]>>shift, P_TOPRIGHT[1]>>shift)
CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16)
- }
}
if(dmin>64*4){
CHECK_CLIPED_MV((last_mv[ref_mv_xy+1][0]*ref_mv_scale + (1<<15))>>16,
@@ -1075,14 +1141,12 @@ static int epzs_motion_search2(MpegEncContext * s,
}else{
CHECK_MV(P_MV1[0]>>shift, P_MV1[1]>>shift)
//FIXME try some early stop
- if(dmin>64*2){
CHECK_MV(P_MEDIAN[0]>>shift, P_MEDIAN[1]>>shift)
CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift)
CHECK_MV(P_TOP[0]>>shift, P_TOP[1]>>shift)
CHECK_MV(P_TOPRIGHT[0]>>shift, P_TOPRIGHT[1]>>shift)
CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16)
- }
}
if(dmin>64*4){
CHECK_CLIPED_MV((last_mv[ref_mv_xy+1][0]*ref_mv_scale + (1<<15))>>16,
diff --git a/contrib/ffmpeg/libavcodec/motion_test.c b/contrib/ffmpeg/libavcodec/motion_test.c
new file mode 100644
index 000000000..8540b7483
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/motion_test.c
@@ -0,0 +1,159 @@
+/*
+ * (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file motion_test.c
+ * motion test.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "dsputil.h"
+
+#include "i386/mmx.h"
+
+#undef printf
+
+int pix_abs16x16_mmx(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_mmx1(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_c(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_x2_mmx(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_x2_mmx1(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_x2_c(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_y2_mmx(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_y2_mmx1(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_y2_c(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_xy2_mmx(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_xy2_mmx1(uint8_t *blk1, uint8_t *blk2, int lx);
+int pix_abs16x16_xy2_c(uint8_t *blk1, uint8_t *blk2, int lx);
+
+typedef int motion_func(uint8_t *blk1, uint8_t *blk2, int lx);
+
+#define WIDTH 64
+#define HEIGHT 64
+
+uint8_t img1[WIDTH * HEIGHT];
+uint8_t img2[WIDTH * HEIGHT];
+
+void fill_random(uint8_t *tab, int size)
+{
+ int i;
+ for(i=0;i<size;i++) {
+#if 1
+ tab[i] = random() % 256;
+#else
+ tab[i] = i;
+#endif
+ }
+}
+
+void help(void)
+{
+ printf("motion-test [-h]\n"
+ "test motion implementations\n");
+ exit(1);
+}
+
+int64_t gettime(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv,NULL);
+ return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+#define NB_ITS 500
+
+int dummy;
+
+void test_motion(const char *name,
+ motion_func *test_func, motion_func *ref_func)
+{
+ int x, y, d1, d2, it;
+ uint8_t *ptr;
+ int64_t ti;
+ printf("testing '%s'\n", name);
+
+ /* test correctness */
+ for(it=0;it<20;it++) {
+
+ fill_random(img1, WIDTH * HEIGHT);
+ fill_random(img2, WIDTH * HEIGHT);
+
+ for(y=0;y<HEIGHT-17;y++) {
+ for(x=0;x<WIDTH-17;x++) {
+ ptr = img2 + y * WIDTH + x;
+ d1 = test_func(img1, ptr, WIDTH);
+ d2 = ref_func(img1, ptr, WIDTH);
+ if (d1 != d2) {
+ printf("error: mmx=%d c=%d\n", d1, d2);
+ }
+ }
+ }
+ }
+ emms();
+
+ /* speed test */
+ ti = gettime();
+ d1 = 0;
+ for(it=0;it<NB_ITS;it++) {
+ for(y=0;y<HEIGHT-17;y++) {
+ for(x=0;x<WIDTH-17;x++) {
+ ptr = img2 + y * WIDTH + x;
+ d1 += test_func(img1, ptr, WIDTH);
+ }
+ }
+ }
+ emms();
+ dummy = d1; /* avoid optimisation */
+ ti = gettime() - ti;
+
+ printf(" %0.0f kop/s\n",
+ (double)NB_ITS * (WIDTH - 16) * (HEIGHT - 16) /
+ (double)(ti / 1000.0));
+}
+
+
+int main(int argc, char **argv)
+{
+ int c;
+
+ for(;;) {
+ c = getopt(argc, argv, "h");
+ if (c == -1)
+ break;
+ switch(c) {
+ case 'h':
+ help();
+ break;
+ }
+ }
+
+ printf("ffmpeg motion test\n");
+
+ test_motion("mmx", pix_abs16x16_mmx, pix_abs16x16_c);
+ test_motion("mmx_x2", pix_abs16x16_x2_mmx, pix_abs16x16_x2_c);
+ test_motion("mmx_y2", pix_abs16x16_y2_mmx, pix_abs16x16_y2_c);
+ test_motion("mmx_xy2", pix_abs16x16_xy2_mmx, pix_abs16x16_xy2_c);
+ return 0;
+}
diff --git a/contrib/ffmpeg/libavcodec/mp3lameaudio.c b/contrib/ffmpeg/libavcodec/mp3lameaudio.c
new file mode 100644
index 000000000..d13350265
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/mp3lameaudio.c
@@ -0,0 +1,221 @@
+/*
+ * Interface to libmp3lame for mp3 encoding
+ * Copyright (c) 2002 Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mp3lameaudio.c
+ * Interface to libmp3lame for mp3 encoding.
+ */
+
+#include "avcodec.h"
+#include "mpegaudio.h"
+#include <lame/lame.h>
+
+#define BUFFER_SIZE (2*MPA_FRAME_SIZE)
+typedef struct Mp3AudioContext {
+ lame_global_flags *gfp;
+ int stereo;
+ uint8_t buffer[BUFFER_SIZE];
+ int buffer_index;
+} Mp3AudioContext;
+
+static int MP3lame_encode_init(AVCodecContext *avctx)
+{
+ Mp3AudioContext *s = avctx->priv_data;
+
+ if (avctx->channels > 2)
+ return -1;
+
+ s->stereo = avctx->channels > 1 ? 1 : 0;
+
+ if ((s->gfp = lame_init()) == NULL)
+ goto err;
+ lame_set_in_samplerate(s->gfp, avctx->sample_rate);
+ lame_set_out_samplerate(s->gfp, avctx->sample_rate);
+ lame_set_num_channels(s->gfp, avctx->channels);
+ /* lame 3.91 dies on quality != 5 */
+ lame_set_quality(s->gfp, 5);
+ /* lame 3.91 doesn't work in mono */
+ lame_set_mode(s->gfp, JOINT_STEREO);
+ lame_set_brate(s->gfp, avctx->bit_rate/1000);
+ if(avctx->flags & CODEC_FLAG_QSCALE) {
+ lame_set_brate(s->gfp, 0);
+ lame_set_VBR(s->gfp, vbr_default);
+ lame_set_VBR_q(s->gfp, avctx->global_quality / (float)FF_QP2LAMBDA);
+ }
+ lame_set_bWriteVbrTag(s->gfp,0);
+ if (lame_init_params(s->gfp) < 0)
+ goto err_close;
+
+ avctx->frame_size = lame_get_framesize(s->gfp);
+
+ avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame->key_frame= 1;
+
+ return 0;
+
+err_close:
+ lame_close(s->gfp);
+err:
+ return -1;
+}
+
+static const int sSampleRates[3] = {
+ 44100, 48000, 32000
+};
+
+static const int sBitRates[2][3][15] = {
+ { { 0, 32, 64, 96,128,160,192,224,256,288,320,352,384,416,448},
+ { 0, 32, 48, 56, 64, 80, 96,112,128,160,192,224,256,320,384},
+ { 0, 32, 40, 48, 56, 64, 80, 96,112,128,160,192,224,256,320}
+ },
+ { { 0, 32, 48, 56, 64, 80, 96,112,128,144,160,176,192,224,256},
+ { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160},
+ { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160}
+ },
+};
+
+static const int sSamplesPerFrame[2][3] =
+{
+ { 384, 1152, 1152 },
+ { 384, 1152, 576 }
+};
+
+static const int sBitsPerSlot[3] = {
+ 32,
+ 8,
+ 8
+};
+
+static int mp3len(void *data, int *samplesPerFrame, int *sampleRate)
+{
+ uint8_t *dataTmp = (uint8_t *)data;
+ uint32_t header = ( (uint32_t)dataTmp[0] << 24 ) | ( (uint32_t)dataTmp[1] << 16 ) | ( (uint32_t)dataTmp[2] << 8 ) | (uint32_t)dataTmp[3];
+ int layerID = 3 - ((header >> 17) & 0x03);
+ int bitRateID = ((header >> 12) & 0x0f);
+ int sampleRateID = ((header >> 10) & 0x03);
+ int bitsPerSlot = sBitsPerSlot[layerID];
+ int isPadded = ((header >> 9) & 0x01);
+ static int const mode_tab[4]= {2,3,1,0};
+ int mode= mode_tab[(header >> 19) & 0x03];
+ int mpeg_id= mode>0;
+ int temp0, temp1, bitRate;
+
+ if ( (( header >> 21 ) & 0x7ff) != 0x7ff || mode == 3 || layerID==3 || sampleRateID==3) {
+ return -1;
+ }
+
+ if(!samplesPerFrame) samplesPerFrame= &temp0;
+ if(!sampleRate ) sampleRate = &temp1;
+
+// *isMono = ((header >> 6) & 0x03) == 0x03;
+
+ *sampleRate = sSampleRates[sampleRateID]>>mode;
+ bitRate = sBitRates[mpeg_id][layerID][bitRateID] * 1000;
+ *samplesPerFrame = sSamplesPerFrame[mpeg_id][layerID];
+//av_log(NULL, AV_LOG_DEBUG, "sr:%d br:%d spf:%d l:%d m:%d\n", *sampleRate, bitRate, *samplesPerFrame, layerID, mode);
+
+ return *samplesPerFrame * bitRate / (bitsPerSlot * *sampleRate) + isPadded;
+}
+
+static int MP3lame_encode_frame(AVCodecContext *avctx,
+ unsigned char *frame, int buf_size, void *data)
+{
+ Mp3AudioContext *s = avctx->priv_data;
+ int len;
+ int lame_result;
+
+ /* lame 3.91 dies on '1-channel interleaved' data */
+
+ if(data){
+ if (s->stereo) {
+ lame_result = lame_encode_buffer_interleaved(
+ s->gfp,
+ data,
+ avctx->frame_size,
+ s->buffer + s->buffer_index,
+ BUFFER_SIZE - s->buffer_index
+ );
+ } else {
+ lame_result = lame_encode_buffer(
+ s->gfp,
+ data,
+ data,
+ avctx->frame_size,
+ s->buffer + s->buffer_index,
+ BUFFER_SIZE - s->buffer_index
+ );
+ }
+ }else{
+ lame_result= lame_encode_flush(
+ s->gfp,
+ s->buffer + s->buffer_index,
+ BUFFER_SIZE - s->buffer_index
+ );
+ }
+
+ if(lame_result==-1) {
+ /* output buffer too small */
+ av_log(avctx, AV_LOG_ERROR, "lame: output buffer too small (buffer index: %d, free bytes: %d)\n", s->buffer_index, BUFFER_SIZE - s->buffer_index);
+ return 0;
+ }
+
+ s->buffer_index += lame_result;
+
+ if(s->buffer_index<4)
+ return 0;
+
+ len= mp3len(s->buffer, NULL, NULL);
+//av_log(avctx, AV_LOG_DEBUG, "in:%d packet-len:%d index:%d\n", avctx->frame_size, len, s->buffer_index);
+ if(len <= s->buffer_index){
+ memcpy(frame, s->buffer, len);
+ s->buffer_index -= len;
+
+ memmove(s->buffer, s->buffer+len, s->buffer_index);
+ //FIXME fix the audio codec API, so we dont need the memcpy()
+/*for(i=0; i<len; i++){
+ av_log(avctx, AV_LOG_DEBUG, "%2X ", frame[i]);
+}*/
+ return len;
+ }else
+ return 0;
+}
+
+static int MP3lame_encode_close(AVCodecContext *avctx)
+{
+ Mp3AudioContext *s = avctx->priv_data;
+
+ av_freep(&avctx->coded_frame);
+
+ lame_close(s->gfp);
+ return 0;
+}
+
+
+AVCodec mp3lame_encoder = {
+ "mp3",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_MP3,
+ sizeof(Mp3AudioContext),
+ MP3lame_encode_init,
+ MP3lame_encode_frame,
+ MP3lame_encode_close,
+ .capabilities= CODEC_CAP_DELAY,
+};
diff --git a/src/libffmpeg/libavcodec/mpeg12.c b/contrib/ffmpeg/libavcodec/mpeg12.c
index c268cf707..c392e17af 100644
--- a/src/libffmpeg/libavcodec/mpeg12.c
+++ b/contrib/ffmpeg/libavcodec/mpeg12.c
@@ -3,18 +3,20 @@
* Copyright (c) 2000,2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -34,13 +36,6 @@
//#include <assert.h>
-/* if xine's MPEG encoder is enabled, enable the encoding features in
- * this particular module */
-#if defined(XINE_MPEG_ENCODER) && !defined(CONFIG_ENCODERS)
-#define CONFIG_ENCODERS
-#endif
-
-
/* Start codes. */
#define SEQ_END_CODE 0x000001b7
#define SEQ_START_CODE 0x000001b3
@@ -90,10 +85,10 @@ extern void XVMC_pack_pblocks(MpegEncContext *s,int cbp);
extern void XVMC_init_block(MpegEncContext *s);//set s->block
#endif
-const enum PixelFormat pixfmt_yuv_420[]= {PIX_FMT_YUV420P,-1};
-const enum PixelFormat pixfmt_yuv_422[]= {PIX_FMT_YUV422P,-1};
-const enum PixelFormat pixfmt_yuv_444[]= {PIX_FMT_YUV444P,-1};
-const enum PixelFormat pixfmt_xvmc_mpg2_420[] = {
+static const enum PixelFormat pixfmt_yuv_420[]= {PIX_FMT_YUV420P,-1};
+static const enum PixelFormat pixfmt_yuv_422[]= {PIX_FMT_YUV422P,-1};
+static const enum PixelFormat pixfmt_yuv_444[]= {PIX_FMT_YUV444P,-1};
+static const enum PixelFormat pixfmt_xvmc_mpg2_420[] = {
PIX_FMT_XVMC_MPEG2_IDCT,
PIX_FMT_XVMC_MPEG2_MC,
-1};
@@ -164,7 +159,7 @@ static void init_uni_ac_vlc(RLTable *rl, uint8_t *uni_ac_vlc_len){
for(run=0; run<64; run++){
int len, bits, code;
- int alevel= ABS(level);
+ int alevel= FFABS(level);
int sign= (level>>31)&1;
if (alevel > rl->max_level[0][run])
@@ -211,7 +206,7 @@ static int find_frame_rate_index(MpegEncContext *s){
int64_t n1= 1001LL*s->avctx->time_base.den;
if(s->avctx->strict_std_compliance > FF_COMPLIANCE_INOFFICIAL && i>=9) break;
- d = ABS(n0 - n1);
+ d = FFABS(n0 - n1);
if(d < dmin){
dmin=d;
s->frame_rate_index= i;
@@ -245,6 +240,11 @@ static int encode_init(AVCodecContext *avctx)
if(avctx->level == FF_LEVEL_UNKNOWN)
avctx->level = s->chroma_format == CHROMA_420 ? 8 : 5;
+ if((avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) && s->frame_rate_index != 4){
+ av_log(avctx, AV_LOG_ERROR, "Drop frame time code only allowed with 1001/30000 fps\n");
+ return -1;
+ }
+
return 0;
}
@@ -284,7 +284,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
else
error-= av_q2d(mpeg2_aspect[i])*s->height/s->width;
- error= ABS(error);
+ error= FFABS(error);
if(error < best_aspect_error){
best_aspect_error= error;
@@ -351,13 +351,20 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
}
put_header(s, GOP_START_CODE);
- put_bits(&s->pb, 1, 0); /* do drop frame */
+ put_bits(&s->pb, 1, !!(s->avctx->flags & CODEC_FLAG2_DROP_FRAME_TIMECODE)); /* drop frame flag */
/* time code : we must convert from the real frame rate to a
fake mpeg frame rate in case of low frame rate */
fps = (framerate.num + framerate.den/2)/ framerate.den;
- time_code = s->current_picture_ptr->coded_picture_number;
-
- s->gop_picture_number = time_code;
+ time_code = s->current_picture_ptr->coded_picture_number + s->avctx->timecode_frame_start;
+
+ s->gop_picture_number = s->current_picture_ptr->coded_picture_number;
+ if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) {
+ /* only works for NTSC 29.97 */
+ int d = time_code / 17982;
+ int m = time_code % 17982;
+ //if (m < 2) m += 2; /* not needed since -2,-1 / 1798 in C returns 0 */
+ time_code += 18 * d + 2 * ((m - 2) / 1798);
+ }
put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24));
put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60));
put_bits(&s->pb, 1, 1);
@@ -811,7 +818,7 @@ void ff_mpeg1_encode_init(MpegEncContext *s)
int bits, code;
int diff=i;
- adiff = ABS(diff);
+ adiff = FFABS(diff);
if(diff<0) diff--;
index = av_log2(2*adiff);
@@ -2395,10 +2402,12 @@ static void mpeg_decode_picture_coding_extension(MpegEncContext *s)
s->chroma_420_type = get_bits1(&s->gb);
s->progressive_frame = get_bits1(&s->gb);
- if(s->picture_structure == PICT_FRAME)
+ if(s->picture_structure == PICT_FRAME){
s->first_field=0;
- else{
+ s->v_edge_pos= 16*s->mb_height;
+ }else{
s->first_field ^= 1;
+ s->v_edge_pos= 8*s->mb_height;
memset(s->mbskip_table, 0, s->mb_stride*s->mb_height);
}
@@ -2702,7 +2711,8 @@ static int slice_decode_thread(AVCodecContext *c, void *arg){
s->error_count= 3*(s->end_mb_y - s->start_mb_y)*s->mb_width;
for(;;){
- int start_code, ret;
+ uint32_t start_code;
+ int ret;
ret= mpeg_decode_slice((Mpeg1Context*)s, mb_y, &buf, s->gb.buffer_end - buf);
emms_c();
@@ -2996,7 +3006,7 @@ static void mpeg_decode_gop(AVCodecContext *avctx,
* finds the end of the current frame in the bitstream.
* @return the position of the first byte of the next frame, or -1
*/
-int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size)
+static int mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size)
{
int i;
uint32_t state= pc->state;
@@ -3040,7 +3050,8 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
Mpeg1Context *s = avctx->priv_data;
const uint8_t *buf_end;
const uint8_t *buf_ptr;
- int ret, start_code, input_size;
+ uint32_t start_code;
+ int ret, input_size;
AVFrame *picture = data;
MpegEncContext *s2 = &s->mpeg_enc_ctx;
dprintf("fill_buffer\n");
@@ -3057,7 +3068,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
}
if(s2->flags&CODEC_FLAG_TRUNCATED){
- int next= ff_mpeg1_find_frame_end(&s2->parse_context, buf, buf_size);
+ int next= mpeg1_find_frame_end(&s2->parse_context, buf, buf_size);
if( ff_combine_frame(&s2->parse_context, next, &buf, &buf_size) < 0 )
return buf_size;
@@ -3087,7 +3098,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
/* find start next code */
start_code = -1;
buf_ptr = ff_find_start_code(buf_ptr,buf_end, &start_code);
- if (start_code < 0){
+ if (start_code > 0x1ff){
if(s2->pict_type != B_TYPE || avctx->skip_frame <= AVDISCARD_DEFAULT){
if(avctx->thread_count > 1){
int i;
@@ -3110,93 +3121,93 @@ static int mpeg_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_DEBUG, "%3X at %zd left %d\n", start_code, buf_ptr-buf, input_size);
}
- /* prepare data for next start code */
- switch(start_code) {
- case SEQ_START_CODE:
- mpeg1_decode_sequence(avctx, buf_ptr,
- input_size);
- break;
+ /* prepare data for next start code */
+ switch(start_code) {
+ case SEQ_START_CODE:
+ mpeg1_decode_sequence(avctx, buf_ptr,
+ input_size);
+ break;
- case PICTURE_START_CODE:
- /* we have a complete image : we try to decompress it */
- mpeg1_decode_picture(avctx,
- buf_ptr, input_size);
- break;
- case EXT_START_CODE:
- mpeg_decode_extension(avctx,
- buf_ptr, input_size);
- break;
- case USER_START_CODE:
- mpeg_decode_user_data(avctx,
- buf_ptr, input_size);
- break;
- case GOP_START_CODE:
- s2->first_field=0;
- mpeg_decode_gop(avctx,
- buf_ptr, input_size);
+ case PICTURE_START_CODE:
+ /* we have a complete image : we try to decompress it */
+ mpeg1_decode_picture(avctx,
+ buf_ptr, input_size);
+ break;
+ case EXT_START_CODE:
+ mpeg_decode_extension(avctx,
+ buf_ptr, input_size);
+ break;
+ case USER_START_CODE:
+ mpeg_decode_user_data(avctx,
+ buf_ptr, input_size);
+ break;
+ case GOP_START_CODE:
+ s2->first_field=0;
+ mpeg_decode_gop(avctx,
+ buf_ptr, input_size);
+ break;
+ default:
+ if (start_code >= SLICE_MIN_START_CODE &&
+ start_code <= SLICE_MAX_START_CODE) {
+ int mb_y= start_code - SLICE_MIN_START_CODE;
+
+ if(s2->last_picture_ptr==NULL){
+ /* skip b frames if we dont have reference frames */
+ if(s2->pict_type==B_TYPE) break;
+ /* skip P frames if we dont have reference frame no valid header */
+// if(s2->pict_type==P_TYPE && s2->first_field && !s2->first_slice) break;
+ }
+ /* skip b frames if we are in a hurry */
+ if(avctx->hurry_up && s2->pict_type==B_TYPE) break;
+ if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==B_TYPE)
+ ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=I_TYPE)
+ || avctx->skip_frame >= AVDISCARD_ALL)
break;
- default:
- if (start_code >= SLICE_MIN_START_CODE &&
- start_code <= SLICE_MAX_START_CODE) {
- int mb_y= start_code - SLICE_MIN_START_CODE;
-
- if(s2->last_picture_ptr==NULL){
- /* skip b frames if we dont have reference frames */
- if(s2->pict_type==B_TYPE) break;
- /* skip P frames if we dont have reference frame no valid header */
-// if(s2->pict_type==P_TYPE && s2->first_field && !s2->first_slice) break;
- }
- /* skip b frames if we are in a hurry */
- if(avctx->hurry_up && s2->pict_type==B_TYPE) break;
- if( (avctx->skip_frame >= AVDISCARD_NONREF && s2->pict_type==B_TYPE)
- ||(avctx->skip_frame >= AVDISCARD_NONKEY && s2->pict_type!=I_TYPE)
- || avctx->skip_frame >= AVDISCARD_ALL)
- break;
- /* skip everything if we are in a hurry>=5 */
- if(avctx->hurry_up>=5) break;
-
- if (!s->mpeg_enc_ctx_allocated) break;
-
- if(s2->codec_id == CODEC_ID_MPEG2VIDEO){
- if(mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom)
- break;
- }
+ /* skip everything if we are in a hurry>=5 */
+ if(avctx->hurry_up>=5) break;
- if(s2->first_slice){
- s2->first_slice=0;
- if(mpeg_field_start(s2) < 0)
- return -1;
- }
+ if (!s->mpeg_enc_ctx_allocated) break;
- if(avctx->thread_count > 1){
- int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
- if(threshold <= mb_y){
- MpegEncContext *thread_context= s2->thread_context[s->slice_count];
+ if(s2->codec_id == CODEC_ID_MPEG2VIDEO){
+ if(mb_y < avctx->skip_top || mb_y >= s2->mb_height - avctx->skip_bottom)
+ break;
+ }
- thread_context->start_mb_y= mb_y;
- thread_context->end_mb_y = s2->mb_height;
- if(s->slice_count){
- s2->thread_context[s->slice_count-1]->end_mb_y= mb_y;
- ff_update_duplicate_context(thread_context, s2);
- }
- init_get_bits(&thread_context->gb, buf_ptr, input_size*8);
- s->slice_count++;
- }
- buf_ptr += 2; //FIXME add minimum num of bytes per slice
- }else{
- ret = mpeg_decode_slice(s, mb_y, &buf_ptr, input_size);
- emms_c();
-
- if(ret < 0){
- if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0)
- ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
- }else{
- ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, AC_END|DC_END|MV_END);
- }
+ if(s2->first_slice){
+ s2->first_slice=0;
+ if(mpeg_field_start(s2) < 0)
+ return -1;
+ }
+
+ if(avctx->thread_count > 1){
+ int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
+ if(threshold <= mb_y){
+ MpegEncContext *thread_context= s2->thread_context[s->slice_count];
+
+ thread_context->start_mb_y= mb_y;
+ thread_context->end_mb_y = s2->mb_height;
+ if(s->slice_count){
+ s2->thread_context[s->slice_count-1]->end_mb_y= mb_y;
+ ff_update_duplicate_context(thread_context, s2);
}
+ init_get_bits(&thread_context->gb, buf_ptr, input_size*8);
+ s->slice_count++;
+ }
+ buf_ptr += 2; //FIXME add minimum num of bytes per slice
+ }else{
+ ret = mpeg_decode_slice(s, mb_y, &buf_ptr, input_size);
+ emms_c();
+
+ if(ret < 0){
+ if(s2->resync_mb_x>=0 && s2->resync_mb_y>=0)
+ ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x, s2->mb_y, AC_ERROR|DC_ERROR|MV_ERROR);
+ }else{
+ ff_er_add_slice(s2, s2->resync_mb_x, s2->resync_mb_y, s2->mb_x-1, s2->mb_y, AC_END|DC_END|MV_END);
}
- break;
}
+ }
+ break;
+ }
}
}
@@ -3313,6 +3324,169 @@ AVCodec mpeg_xvmc_decoder = {
#endif
+#ifdef CONFIG_MPEGVIDEO_PARSER
+static void mpegvideo_extract_headers(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size)
+{
+ ParseContext1 *pc = s->priv_data;
+ const uint8_t *buf_end;
+ uint32_t start_code;
+ int frame_rate_index, ext_type, bytes_left;
+ int frame_rate_ext_n, frame_rate_ext_d;
+ int picture_structure, top_field_first, repeat_first_field, progressive_frame;
+ int horiz_size_ext, vert_size_ext, bit_rate_ext;
+//FIXME replace the crap with get_bits()
+ s->repeat_pict = 0;
+ buf_end = buf + buf_size;
+ while (buf < buf_end) {
+ start_code= -1;
+ buf= ff_find_start_code(buf, buf_end, &start_code);
+ bytes_left = buf_end - buf;
+ switch(start_code) {
+ case PICTURE_START_CODE:
+ if (bytes_left >= 2) {
+ s->pict_type = (buf[1] >> 3) & 7;
+ }
+ break;
+ case SEQ_START_CODE:
+ if (bytes_left >= 7) {
+ pc->width = (buf[0] << 4) | (buf[1] >> 4);
+ pc->height = ((buf[1] & 0x0f) << 8) | buf[2];
+ avcodec_set_dimensions(avctx, pc->width, pc->height);
+ frame_rate_index = buf[3] & 0xf;
+ pc->frame_rate.den = avctx->time_base.den = ff_frame_rate_tab[frame_rate_index].num;
+ pc->frame_rate.num = avctx->time_base.num = ff_frame_rate_tab[frame_rate_index].den;
+ avctx->bit_rate = ((buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6))*400;
+ avctx->codec_id = CODEC_ID_MPEG1VIDEO;
+ avctx->sub_id = 1;
+ }
+ break;
+ case EXT_START_CODE:
+ if (bytes_left >= 1) {
+ ext_type = (buf[0] >> 4);
+ switch(ext_type) {
+ case 0x1: /* sequence extension */
+ if (bytes_left >= 6) {
+ horiz_size_ext = ((buf[1] & 1) << 1) | (buf[2] >> 7);
+ vert_size_ext = (buf[2] >> 5) & 3;
+ bit_rate_ext = ((buf[2] & 0x1F)<<7) | (buf[3]>>1);
+ frame_rate_ext_n = (buf[5] >> 5) & 3;
+ frame_rate_ext_d = (buf[5] & 0x1f);
+ pc->progressive_sequence = buf[1] & (1 << 3);
+ avctx->has_b_frames= !(buf[5] >> 7);
+
+ pc->width |=(horiz_size_ext << 12);
+ pc->height |=( vert_size_ext << 12);
+ avctx->bit_rate += (bit_rate_ext << 18) * 400;
+ avcodec_set_dimensions(avctx, pc->width, pc->height);
+ avctx->time_base.den = pc->frame_rate.den * (frame_rate_ext_n + 1);
+ avctx->time_base.num = pc->frame_rate.num * (frame_rate_ext_d + 1);
+ avctx->codec_id = CODEC_ID_MPEG2VIDEO;
+ avctx->sub_id = 2; /* forces MPEG2 */
+ }
+ break;
+ case 0x8: /* picture coding extension */
+ if (bytes_left >= 5) {
+ picture_structure = buf[2]&3;
+ top_field_first = buf[3] & (1 << 7);
+ repeat_first_field = buf[3] & (1 << 1);
+ progressive_frame = buf[4] & (1 << 7);
+
+ /* check if we must repeat the frame */
+ if (repeat_first_field) {
+ if (pc->progressive_sequence) {
+ if (top_field_first)
+ s->repeat_pict = 4;
+ else
+ s->repeat_pict = 2;
+ } else if (progressive_frame) {
+ s->repeat_pict = 1;
+ }
+ }
+
+ /* the packet only represents half a frame
+ XXX,FIXME maybe find a different solution */
+ if(picture_structure != 3)
+ s->repeat_pict = -1;
+ }
+ break;
+ }
+ }
+ break;
+ case -1:
+ goto the_end;
+ default:
+ /* we stop parsing when we encounter a slice. It ensures
+ that this function takes a negligible amount of time */
+ if (start_code >= SLICE_MIN_START_CODE &&
+ start_code <= SLICE_MAX_START_CODE)
+ goto the_end;
+ break;
+ }
+ }
+ the_end: ;
+}
+
+static int mpegvideo_parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ ParseContext1 *pc1 = s->priv_data;
+ ParseContext *pc= &pc1->pc;
+ int next;
+
+ if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){
+ next= buf_size;
+ }else{
+ next= mpeg1_find_frame_end(pc, buf, buf_size);
+
+ if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) {
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+ return buf_size;
+ }
+
+ }
+ /* we have a full frame : we just parse the first few MPEG headers
+ to have the full timing information. The time take by this
+ function should be negligible for uncorrupted streams */
+ mpegvideo_extract_headers(s, avctx, buf, buf_size);
+#if 0
+ printf("pict_type=%d frame_rate=%0.3f repeat_pict=%d\n",
+ s->pict_type, (double)avctx->time_base.den / avctx->time_base.num, s->repeat_pict);
+#endif
+
+ *poutbuf = (uint8_t *)buf;
+ *poutbuf_size = buf_size;
+ return next;
+}
+
+static int mpegvideo_split(AVCodecContext *avctx,
+ const uint8_t *buf, int buf_size)
+{
+ int i;
+ uint32_t state= -1;
+
+ for(i=0; i<buf_size; i++){
+ state= (state<<8) | buf[i];
+ if(state != 0x1B3 && state != 0x1B5 && state < 0x200 && state >= 0x100)
+ return i-3;
+ }
+ return 0;
+}
+
+AVCodecParser mpegvideo_parser = {
+ { CODEC_ID_MPEG1VIDEO, CODEC_ID_MPEG2VIDEO },
+ sizeof(ParseContext1),
+ NULL,
+ mpegvideo_parse,
+ ff_parse1_close,
+ mpegvideo_split,
+};
+#endif /* !CONFIG_MPEGVIDEO_PARSER */
+
/* this is ugly i know, but the alternative is too make
hundreds of vars global and prefix them with ff_mpeg1_
which is far uglier. */
diff --git a/src/libffmpeg/libavcodec/mpeg12data.h b/contrib/ffmpeg/libavcodec/mpeg12data.h
index e9a10ff3a..6c96a495b 100644
--- a/src/libffmpeg/libavcodec/mpeg12data.h
+++ b/contrib/ffmpeg/libavcodec/mpeg12data.h
@@ -1,9 +1,31 @@
+/*
+ * MPEG1 codec / MPEG2 decoder
+ * copyright (c) 2000,2001 Fabrice Bellard
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file mpeg12data.h
* MPEG1/2 tables.
*/
-const int16_t ff_mpeg1_default_intra_matrix[64] = {
+const uint16_t ff_mpeg1_default_intra_matrix[64] = {
8, 16, 19, 22, 26, 27, 29, 34,
16, 16, 22, 24, 27, 29, 34, 37,
19, 22, 26, 27, 29, 34, 34, 38,
@@ -14,7 +36,7 @@ const int16_t ff_mpeg1_default_intra_matrix[64] = {
27, 29, 35, 38, 46, 56, 69, 83
};
-const int16_t ff_mpeg1_default_non_intra_matrix[64] = {
+const uint16_t ff_mpeg1_default_non_intra_matrix[64] = {
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16,
@@ -32,10 +54,10 @@ static const unsigned char vlc_dc_lum_bits[12] = {
3, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9, 9,
};
-const uint16_t vlc_dc_chroma_code[12] = {
+static const uint16_t vlc_dc_chroma_code[12] = {
0x0, 0x1, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe, 0x3fe, 0x3ff,
};
-const unsigned char vlc_dc_chroma_bits[12] = {
+static const unsigned char vlc_dc_chroma_bits[12] = {
2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10,
};
@@ -367,7 +389,7 @@ const uint8_t ff_mpeg1_dc_scale_table[128]={
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
};
-const static uint8_t mpeg2_dc_scale_table1[128]={
+static const uint8_t mpeg2_dc_scale_table1[128]={
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
diff --git a/src/libffmpeg/libavcodec/mpeg4data.h b/contrib/ffmpeg/libavcodec/mpeg4data.h
index 804d2ded8..e199c6a14 100644
--- a/src/libffmpeg/libavcodec/mpeg4data.h
+++ b/contrib/ffmpeg/libavcodec/mpeg4data.h
@@ -1,3 +1,25 @@
+/*
+ * copyright (c) 2000,2001 Fabrice Bellard
+ * H263+ support
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file mpeg4data.h
* mpeg4 tables.
diff --git a/contrib/ffmpeg/libavcodec/mpegaudio.c b/contrib/ffmpeg/libavcodec/mpegaudio.c
new file mode 100644
index 000000000..2e5e28a18
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/mpegaudio.c
@@ -0,0 +1,801 @@
+/*
+ * The simplest mpeg audio layer 2 encoder
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mpegaudio.c
+ * The simplest mpeg audio layer 2 encoder.
+ */
+
+#include "avcodec.h"
+#include "bitstream.h"
+#include "mpegaudio.h"
+
+/* currently, cannot change these constants (need to modify
+ quantization stage) */
+#define MUL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS)
+#define FIX(a) ((int)((a) * (1 << FRAC_BITS)))
+
+#define SAMPLES_BUF_SIZE 4096
+
+typedef struct MpegAudioContext {
+ PutBitContext pb;
+ int nb_channels;
+ int freq, bit_rate;
+ int lsf; /* 1 if mpeg2 low bitrate selected */
+ int bitrate_index; /* bit rate */
+ int freq_index;
+ int frame_size; /* frame size, in bits, without padding */
+ int64_t nb_samples; /* total number of samples encoded */
+ /* padding computation */
+ int frame_frac, frame_frac_incr, do_padding;
+ short samples_buf[MPA_MAX_CHANNELS][SAMPLES_BUF_SIZE]; /* buffer for filter */
+ int samples_offset[MPA_MAX_CHANNELS]; /* offset in samples_buf */
+ int sb_samples[MPA_MAX_CHANNELS][3][12][SBLIMIT];
+ unsigned char scale_factors[MPA_MAX_CHANNELS][SBLIMIT][3]; /* scale factors */
+ /* code to group 3 scale factors */
+ unsigned char scale_code[MPA_MAX_CHANNELS][SBLIMIT];
+ int sblimit; /* number of used subbands */
+ const unsigned char *alloc_table;
+} MpegAudioContext;
+
+/* define it to use floats in quantization (I don't like floats !) */
+//#define USE_FLOATS
+
+#include "mpegaudiotab.h"
+
+static int MPA_encode_init(AVCodecContext *avctx)
+{
+ MpegAudioContext *s = avctx->priv_data;
+ int freq = avctx->sample_rate;
+ int bitrate = avctx->bit_rate;
+ int channels = avctx->channels;
+ int i, v, table;
+ float a;
+
+ if (channels > 2)
+ return -1;
+ bitrate = bitrate / 1000;
+ s->nb_channels = channels;
+ s->freq = freq;
+ s->bit_rate = bitrate * 1000;
+ avctx->frame_size = MPA_FRAME_SIZE;
+
+ /* encoding freq */
+ s->lsf = 0;
+ for(i=0;i<3;i++) {
+ if (mpa_freq_tab[i] == freq)
+ break;
+ if ((mpa_freq_tab[i] / 2) == freq) {
+ s->lsf = 1;
+ break;
+ }
+ }
+ if (i == 3){
+ av_log(avctx, AV_LOG_ERROR, "Sampling rate %d is not allowed in mp2\n", freq);
+ return -1;
+ }
+ s->freq_index = i;
+
+ /* encoding bitrate & frequency */
+ for(i=0;i<15;i++) {
+ if (mpa_bitrate_tab[s->lsf][1][i] == bitrate)
+ break;
+ }
+ if (i == 15){
+ av_log(avctx, AV_LOG_ERROR, "bitrate %d is not allowed in mp2\n", bitrate);
+ return -1;
+ }
+ s->bitrate_index = i;
+
+ /* compute total header size & pad bit */
+
+ a = (float)(bitrate * 1000 * MPA_FRAME_SIZE) / (freq * 8.0);
+ s->frame_size = ((int)a) * 8;
+
+ /* frame fractional size to compute padding */
+ s->frame_frac = 0;
+ s->frame_frac_incr = (int)((a - floor(a)) * 65536.0);
+
+ /* select the right allocation table */
+ table = l2_select_table(bitrate, s->nb_channels, freq, s->lsf);
+
+ /* number of used subbands */
+ s->sblimit = sblimit_table[table];
+ s->alloc_table = alloc_tables[table];
+
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_DEBUG, "%d kb/s, %d Hz, frame_size=%d bits, table=%d, padincr=%x\n",
+ bitrate, freq, s->frame_size, table, s->frame_frac_incr);
+#endif
+
+ for(i=0;i<s->nb_channels;i++)
+ s->samples_offset[i] = 0;
+
+ for(i=0;i<257;i++) {
+ int v;
+ v = mpa_enwindow[i];
+#if WFRAC_BITS != 16
+ v = (v + (1 << (16 - WFRAC_BITS - 1))) >> (16 - WFRAC_BITS);
+#endif
+ filter_bank[i] = v;
+ if ((i & 63) != 0)
+ v = -v;
+ if (i != 0)
+ filter_bank[512 - i] = v;
+ }
+
+ for(i=0;i<64;i++) {
+ v = (int)(pow(2.0, (3 - i) / 3.0) * (1 << 20));
+ if (v <= 0)
+ v = 1;
+ scale_factor_table[i] = v;
+#ifdef USE_FLOATS
+ scale_factor_inv_table[i] = pow(2.0, -(3 - i) / 3.0) / (float)(1 << 20);
+#else
+#define P 15
+ scale_factor_shift[i] = 21 - P - (i / 3);
+ scale_factor_mult[i] = (1 << P) * pow(2.0, (i % 3) / 3.0);
+#endif
+ }
+ for(i=0;i<128;i++) {
+ v = i - 64;
+ if (v <= -3)
+ v = 0;
+ else if (v < 0)
+ v = 1;
+ else if (v == 0)
+ v = 2;
+ else if (v < 3)
+ v = 3;
+ else
+ v = 4;
+ scale_diff_table[i] = v;
+ }
+
+ for(i=0;i<17;i++) {
+ v = quant_bits[i];
+ if (v < 0)
+ v = -v;
+ else
+ v = v * 3;
+ total_quant_bits[i] = 12 * v;
+ }
+
+ avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame->key_frame= 1;
+
+ return 0;
+}
+
+/* 32 point floating point IDCT without 1/sqrt(2) coef zero scaling */
+static void idct32(int *out, int *tab)
+{
+ int i, j;
+ int *t, *t1, xr;
+ const int *xp = costab32;
+
+ for(j=31;j>=3;j-=2) tab[j] += tab[j - 2];
+
+ t = tab + 30;
+ t1 = tab + 2;
+ do {
+ t[0] += t[-4];
+ t[1] += t[1 - 4];
+ t -= 4;
+ } while (t != t1);
+
+ t = tab + 28;
+ t1 = tab + 4;
+ do {
+ t[0] += t[-8];
+ t[1] += t[1-8];
+ t[2] += t[2-8];
+ t[3] += t[3-8];
+ t -= 8;
+ } while (t != t1);
+
+ t = tab;
+ t1 = tab + 32;
+ do {
+ t[ 3] = -t[ 3];
+ t[ 6] = -t[ 6];
+
+ t[11] = -t[11];
+ t[12] = -t[12];
+ t[13] = -t[13];
+ t[15] = -t[15];
+ t += 16;
+ } while (t != t1);
+
+
+ t = tab;
+ t1 = tab + 8;
+ do {
+ int x1, x2, x3, x4;
+
+ x3 = MUL(t[16], FIX(SQRT2*0.5));
+ x4 = t[0] - x3;
+ x3 = t[0] + x3;
+
+ x2 = MUL(-(t[24] + t[8]), FIX(SQRT2*0.5));
+ x1 = MUL((t[8] - x2), xp[0]);
+ x2 = MUL((t[8] + x2), xp[1]);
+
+ t[ 0] = x3 + x1;
+ t[ 8] = x4 - x2;
+ t[16] = x4 + x2;
+ t[24] = x3 - x1;
+ t++;
+ } while (t != t1);
+
+ xp += 2;
+ t = tab;
+ t1 = tab + 4;
+ do {
+ xr = MUL(t[28],xp[0]);
+ t[28] = (t[0] - xr);
+ t[0] = (t[0] + xr);
+
+ xr = MUL(t[4],xp[1]);
+ t[ 4] = (t[24] - xr);
+ t[24] = (t[24] + xr);
+
+ xr = MUL(t[20],xp[2]);
+ t[20] = (t[8] - xr);
+ t[ 8] = (t[8] + xr);
+
+ xr = MUL(t[12],xp[3]);
+ t[12] = (t[16] - xr);
+ t[16] = (t[16] + xr);
+ t++;
+ } while (t != t1);
+ xp += 4;
+
+ for (i = 0; i < 4; i++) {
+ xr = MUL(tab[30-i*4],xp[0]);
+ tab[30-i*4] = (tab[i*4] - xr);
+ tab[ i*4] = (tab[i*4] + xr);
+
+ xr = MUL(tab[ 2+i*4],xp[1]);
+ tab[ 2+i*4] = (tab[28-i*4] - xr);
+ tab[28-i*4] = (tab[28-i*4] + xr);
+
+ xr = MUL(tab[31-i*4],xp[0]);
+ tab[31-i*4] = (tab[1+i*4] - xr);
+ tab[ 1+i*4] = (tab[1+i*4] + xr);
+
+ xr = MUL(tab[ 3+i*4],xp[1]);
+ tab[ 3+i*4] = (tab[29-i*4] - xr);
+ tab[29-i*4] = (tab[29-i*4] + xr);
+
+ xp += 2;
+ }
+
+ t = tab + 30;
+ t1 = tab + 1;
+ do {
+ xr = MUL(t1[0], *xp);
+ t1[0] = (t[0] - xr);
+ t[0] = (t[0] + xr);
+ t -= 2;
+ t1 += 2;
+ xp++;
+ } while (t >= tab);
+
+ for(i=0;i<32;i++) {
+ out[i] = tab[bitinv32[i]];
+ }
+}
+
+#define WSHIFT (WFRAC_BITS + 15 - FRAC_BITS)
+
+static void filter(MpegAudioContext *s, int ch, short *samples, int incr)
+{
+ short *p, *q;
+ int sum, offset, i, j;
+ int tmp[64];
+ int tmp1[32];
+ int *out;
+
+ // print_pow1(samples, 1152);
+
+ offset = s->samples_offset[ch];
+ out = &s->sb_samples[ch][0][0][0];
+ for(j=0;j<36;j++) {
+ /* 32 samples at once */
+ for(i=0;i<32;i++) {
+ s->samples_buf[ch][offset + (31 - i)] = samples[0];
+ samples += incr;
+ }
+
+ /* filter */
+ p = s->samples_buf[ch] + offset;
+ q = filter_bank;
+ /* maxsum = 23169 */
+ for(i=0;i<64;i++) {
+ sum = p[0*64] * q[0*64];
+ sum += p[1*64] * q[1*64];
+ sum += p[2*64] * q[2*64];
+ sum += p[3*64] * q[3*64];
+ sum += p[4*64] * q[4*64];
+ sum += p[5*64] * q[5*64];
+ sum += p[6*64] * q[6*64];
+ sum += p[7*64] * q[7*64];
+ tmp[i] = sum;
+ p++;
+ q++;
+ }
+ tmp1[0] = tmp[16] >> WSHIFT;
+ for( i=1; i<=16; i++ ) tmp1[i] = (tmp[i+16]+tmp[16-i]) >> WSHIFT;
+ for( i=17; i<=31; i++ ) tmp1[i] = (tmp[i+16]-tmp[80-i]) >> WSHIFT;
+
+ idct32(out, tmp1);
+
+ /* advance of 32 samples */
+ offset -= 32;
+ out += 32;
+ /* handle the wrap around */
+ if (offset < 0) {
+ memmove(s->samples_buf[ch] + SAMPLES_BUF_SIZE - (512 - 32),
+ s->samples_buf[ch], (512 - 32) * 2);
+ offset = SAMPLES_BUF_SIZE - 512;
+ }
+ }
+ s->samples_offset[ch] = offset;
+
+ // print_pow(s->sb_samples, 1152);
+}
+
+static void compute_scale_factors(unsigned char scale_code[SBLIMIT],
+ unsigned char scale_factors[SBLIMIT][3],
+ int sb_samples[3][12][SBLIMIT],
+ int sblimit)
+{
+ int *p, vmax, v, n, i, j, k, code;
+ int index, d1, d2;
+ unsigned char *sf = &scale_factors[0][0];
+
+ for(j=0;j<sblimit;j++) {
+ for(i=0;i<3;i++) {
+ /* find the max absolute value */
+ p = &sb_samples[i][0][j];
+ vmax = abs(*p);
+ for(k=1;k<12;k++) {
+ p += SBLIMIT;
+ v = abs(*p);
+ if (v > vmax)
+ vmax = v;
+ }
+ /* compute the scale factor index using log 2 computations */
+ if (vmax > 0) {
+ n = av_log2(vmax);
+ /* n is the position of the MSB of vmax. now
+ use at most 2 compares to find the index */
+ index = (21 - n) * 3 - 3;
+ if (index >= 0) {
+ while (vmax <= scale_factor_table[index+1])
+ index++;
+ } else {
+ index = 0; /* very unlikely case of overflow */
+ }
+ } else {
+ index = 62; /* value 63 is not allowed */
+ }
+
+#if 0
+ printf("%2d:%d in=%x %x %d\n",
+ j, i, vmax, scale_factor_table[index], index);
+#endif
+ /* store the scale factor */
+ assert(index >=0 && index <= 63);
+ sf[i] = index;
+ }
+
+ /* compute the transmission factor : look if the scale factors
+ are close enough to each other */
+ d1 = scale_diff_table[sf[0] - sf[1] + 64];
+ d2 = scale_diff_table[sf[1] - sf[2] + 64];
+
+ /* handle the 25 cases */
+ switch(d1 * 5 + d2) {
+ case 0*5+0:
+ case 0*5+4:
+ case 3*5+4:
+ case 4*5+0:
+ case 4*5+4:
+ code = 0;
+ break;
+ case 0*5+1:
+ case 0*5+2:
+ case 4*5+1:
+ case 4*5+2:
+ code = 3;
+ sf[2] = sf[1];
+ break;
+ case 0*5+3:
+ case 4*5+3:
+ code = 3;
+ sf[1] = sf[2];
+ break;
+ case 1*5+0:
+ case 1*5+4:
+ case 2*5+4:
+ code = 1;
+ sf[1] = sf[0];
+ break;
+ case 1*5+1:
+ case 1*5+2:
+ case 2*5+0:
+ case 2*5+1:
+ case 2*5+2:
+ code = 2;
+ sf[1] = sf[2] = sf[0];
+ break;
+ case 2*5+3:
+ case 3*5+3:
+ code = 2;
+ sf[0] = sf[1] = sf[2];
+ break;
+ case 3*5+0:
+ case 3*5+1:
+ case 3*5+2:
+ code = 2;
+ sf[0] = sf[2] = sf[1];
+ break;
+ case 1*5+3:
+ code = 2;
+ if (sf[0] > sf[2])
+ sf[0] = sf[2];
+ sf[1] = sf[2] = sf[0];
+ break;
+ default:
+ assert(0); //cant happen
+ code = 0; /* kill warning */
+ }
+
+#if 0
+ printf("%d: %2d %2d %2d %d %d -> %d\n", j,
+ sf[0], sf[1], sf[2], d1, d2, code);
+#endif
+ scale_code[j] = code;
+ sf += 3;
+ }
+}
+
+/* The most important function : psycho acoustic module. In this
+ encoder there is basically none, so this is the worst you can do,
+ but also this is the simpler. */
+static void psycho_acoustic_model(MpegAudioContext *s, short smr[SBLIMIT])
+{
+ int i;
+
+ for(i=0;i<s->sblimit;i++) {
+ smr[i] = (int)(fixed_smr[i] * 10);
+ }
+}
+
+
+#define SB_NOTALLOCATED 0
+#define SB_ALLOCATED 1
+#define SB_NOMORE 2
+
+/* Try to maximize the smr while using a number of bits inferior to
+ the frame size. I tried to make the code simpler, faster and
+ smaller than other encoders :-) */
+static void compute_bit_allocation(MpegAudioContext *s,
+ short smr1[MPA_MAX_CHANNELS][SBLIMIT],
+ unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT],
+ int *padding)
+{
+ int i, ch, b, max_smr, max_ch, max_sb, current_frame_size, max_frame_size;
+ int incr;
+ short smr[MPA_MAX_CHANNELS][SBLIMIT];
+ unsigned char subband_status[MPA_MAX_CHANNELS][SBLIMIT];
+ const unsigned char *alloc;
+
+ memcpy(smr, smr1, s->nb_channels * sizeof(short) * SBLIMIT);
+ memset(subband_status, SB_NOTALLOCATED, s->nb_channels * SBLIMIT);
+ memset(bit_alloc, 0, s->nb_channels * SBLIMIT);
+
+ /* compute frame size and padding */
+ max_frame_size = s->frame_size;
+ s->frame_frac += s->frame_frac_incr;
+ if (s->frame_frac >= 65536) {
+ s->frame_frac -= 65536;
+ s->do_padding = 1;
+ max_frame_size += 8;
+ } else {
+ s->do_padding = 0;
+ }
+
+ /* compute the header + bit alloc size */
+ current_frame_size = 32;
+ alloc = s->alloc_table;
+ for(i=0;i<s->sblimit;i++) {
+ incr = alloc[0];
+ current_frame_size += incr * s->nb_channels;
+ alloc += 1 << incr;
+ }
+ for(;;) {
+ /* look for the subband with the largest signal to mask ratio */
+ max_sb = -1;
+ max_ch = -1;
+ max_smr = 0x80000000;
+ for(ch=0;ch<s->nb_channels;ch++) {
+ for(i=0;i<s->sblimit;i++) {
+ if (smr[ch][i] > max_smr && subband_status[ch][i] != SB_NOMORE) {
+ max_smr = smr[ch][i];
+ max_sb = i;
+ max_ch = ch;
+ }
+ }
+ }
+#if 0
+ printf("current=%d max=%d max_sb=%d alloc=%d\n",
+ current_frame_size, max_frame_size, max_sb,
+ bit_alloc[max_sb]);
+#endif
+ if (max_sb < 0)
+ break;
+
+ /* find alloc table entry (XXX: not optimal, should use
+ pointer table) */
+ alloc = s->alloc_table;
+ for(i=0;i<max_sb;i++) {
+ alloc += 1 << alloc[0];
+ }
+
+ if (subband_status[max_ch][max_sb] == SB_NOTALLOCATED) {
+ /* nothing was coded for this band: add the necessary bits */
+ incr = 2 + nb_scale_factors[s->scale_code[max_ch][max_sb]] * 6;
+ incr += total_quant_bits[alloc[1]];
+ } else {
+ /* increments bit allocation */
+ b = bit_alloc[max_ch][max_sb];
+ incr = total_quant_bits[alloc[b + 1]] -
+ total_quant_bits[alloc[b]];
+ }
+
+ if (current_frame_size + incr <= max_frame_size) {
+ /* can increase size */
+ b = ++bit_alloc[max_ch][max_sb];
+ current_frame_size += incr;
+ /* decrease smr by the resolution we added */
+ smr[max_ch][max_sb] = smr1[max_ch][max_sb] - quant_snr[alloc[b]];
+ /* max allocation size reached ? */
+ if (b == ((1 << alloc[0]) - 1))
+ subband_status[max_ch][max_sb] = SB_NOMORE;
+ else
+ subband_status[max_ch][max_sb] = SB_ALLOCATED;
+ } else {
+ /* cannot increase the size of this subband */
+ subband_status[max_ch][max_sb] = SB_NOMORE;
+ }
+ }
+ *padding = max_frame_size - current_frame_size;
+ assert(*padding >= 0);
+
+#if 0
+ for(i=0;i<s->sblimit;i++) {
+ printf("%d ", bit_alloc[i]);
+ }
+ printf("\n");
+#endif
+}
+
+/*
+ * Output the mpeg audio layer 2 frame. Note how the code is small
+ * compared to other encoders :-)
+ */
+static void encode_frame(MpegAudioContext *s,
+ unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT],
+ int padding)
+{
+ int i, j, k, l, bit_alloc_bits, b, ch;
+ unsigned char *sf;
+ int q[3];
+ PutBitContext *p = &s->pb;
+
+ /* header */
+
+ put_bits(p, 12, 0xfff);
+ put_bits(p, 1, 1 - s->lsf); /* 1 = mpeg1 ID, 0 = mpeg2 lsf ID */
+ put_bits(p, 2, 4-2); /* layer 2 */
+ put_bits(p, 1, 1); /* no error protection */
+ put_bits(p, 4, s->bitrate_index);
+ put_bits(p, 2, s->freq_index);
+ put_bits(p, 1, s->do_padding); /* use padding */
+ put_bits(p, 1, 0); /* private_bit */
+ put_bits(p, 2, s->nb_channels == 2 ? MPA_STEREO : MPA_MONO);
+ put_bits(p, 2, 0); /* mode_ext */
+ put_bits(p, 1, 0); /* no copyright */
+ put_bits(p, 1, 1); /* original */
+ put_bits(p, 2, 0); /* no emphasis */
+
+ /* bit allocation */
+ j = 0;
+ for(i=0;i<s->sblimit;i++) {
+ bit_alloc_bits = s->alloc_table[j];
+ for(ch=0;ch<s->nb_channels;ch++) {
+ put_bits(p, bit_alloc_bits, bit_alloc[ch][i]);
+ }
+ j += 1 << bit_alloc_bits;
+ }
+
+ /* scale codes */
+ for(i=0;i<s->sblimit;i++) {
+ for(ch=0;ch<s->nb_channels;ch++) {
+ if (bit_alloc[ch][i])
+ put_bits(p, 2, s->scale_code[ch][i]);
+ }
+ }
+
+ /* scale factors */
+ for(i=0;i<s->sblimit;i++) {
+ for(ch=0;ch<s->nb_channels;ch++) {
+ if (bit_alloc[ch][i]) {
+ sf = &s->scale_factors[ch][i][0];
+ switch(s->scale_code[ch][i]) {
+ case 0:
+ put_bits(p, 6, sf[0]);
+ put_bits(p, 6, sf[1]);
+ put_bits(p, 6, sf[2]);
+ break;
+ case 3:
+ case 1:
+ put_bits(p, 6, sf[0]);
+ put_bits(p, 6, sf[2]);
+ break;
+ case 2:
+ put_bits(p, 6, sf[0]);
+ break;
+ }
+ }
+ }
+ }
+
+ /* quantization & write sub band samples */
+
+ for(k=0;k<3;k++) {
+ for(l=0;l<12;l+=3) {
+ j = 0;
+ for(i=0;i<s->sblimit;i++) {
+ bit_alloc_bits = s->alloc_table[j];
+ for(ch=0;ch<s->nb_channels;ch++) {
+ b = bit_alloc[ch][i];
+ if (b) {
+ int qindex, steps, m, sample, bits;
+ /* we encode 3 sub band samples of the same sub band at a time */
+ qindex = s->alloc_table[j+b];
+ steps = quant_steps[qindex];
+ for(m=0;m<3;m++) {
+ sample = s->sb_samples[ch][k][l + m][i];
+ /* divide by scale factor */
+#ifdef USE_FLOATS
+ {
+ float a;
+ a = (float)sample * scale_factor_inv_table[s->scale_factors[ch][i][k]];
+ q[m] = (int)((a + 1.0) * steps * 0.5);
+ }
+#else
+ {
+ int q1, e, shift, mult;
+ e = s->scale_factors[ch][i][k];
+ shift = scale_factor_shift[e];
+ mult = scale_factor_mult[e];
+
+ /* normalize to P bits */
+ if (shift < 0)
+ q1 = sample << (-shift);
+ else
+ q1 = sample >> shift;
+ q1 = (q1 * mult) >> P;
+ q[m] = ((q1 + (1 << P)) * steps) >> (P + 1);
+ }
+#endif
+ if (q[m] >= steps)
+ q[m] = steps - 1;
+ assert(q[m] >= 0 && q[m] < steps);
+ }
+ bits = quant_bits[qindex];
+ if (bits < 0) {
+ /* group the 3 values to save bits */
+ put_bits(p, -bits,
+ q[0] + steps * (q[1] + steps * q[2]));
+#if 0
+ printf("%d: gr1 %d\n",
+ i, q[0] + steps * (q[1] + steps * q[2]));
+#endif
+ } else {
+#if 0
+ printf("%d: gr3 %d %d %d\n",
+ i, q[0], q[1], q[2]);
+#endif
+ put_bits(p, bits, q[0]);
+ put_bits(p, bits, q[1]);
+ put_bits(p, bits, q[2]);
+ }
+ }
+ }
+ /* next subband in alloc table */
+ j += 1 << bit_alloc_bits;
+ }
+ }
+ }
+
+ /* padding */
+ for(i=0;i<padding;i++)
+ put_bits(p, 1, 0);
+
+ /* flush */
+ flush_put_bits(p);
+}
+
+static int MPA_encode_frame(AVCodecContext *avctx,
+ unsigned char *frame, int buf_size, void *data)
+{
+ MpegAudioContext *s = avctx->priv_data;
+ short *samples = data;
+ short smr[MPA_MAX_CHANNELS][SBLIMIT];
+ unsigned char bit_alloc[MPA_MAX_CHANNELS][SBLIMIT];
+ int padding, i;
+
+ for(i=0;i<s->nb_channels;i++) {
+ filter(s, i, samples + i, s->nb_channels);
+ }
+
+ for(i=0;i<s->nb_channels;i++) {
+ compute_scale_factors(s->scale_code[i], s->scale_factors[i],
+ s->sb_samples[i], s->sblimit);
+ }
+ for(i=0;i<s->nb_channels;i++) {
+ psycho_acoustic_model(s, smr[i]);
+ }
+ compute_bit_allocation(s, smr, bit_alloc, &padding);
+
+ init_put_bits(&s->pb, frame, MPA_MAX_CODED_FRAME_SIZE);
+
+ encode_frame(s, bit_alloc, padding);
+
+ s->nb_samples += MPA_FRAME_SIZE;
+ return pbBufPtr(&s->pb) - s->pb.buf;
+}
+
+static int MPA_encode_close(AVCodecContext *avctx)
+{
+ av_freep(&avctx->coded_frame);
+ return 0;
+}
+
+#ifdef CONFIG_MP2_ENCODER
+AVCodec mp2_encoder = {
+ "mp2",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_MP2,
+ sizeof(MpegAudioContext),
+ MPA_encode_init,
+ MPA_encode_frame,
+ MPA_encode_close,
+ NULL,
+};
+#endif // CONFIG_MP2_ENCODER
+
+#undef FIX
diff --git a/src/libffmpeg/libavcodec/mpegaudio.h b/contrib/ffmpeg/libavcodec/mpegaudio.h
index 0ee58240c..3eadf92a8 100644
--- a/src/libffmpeg/libavcodec/mpegaudio.h
+++ b/contrib/ffmpeg/libavcodec/mpegaudio.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file mpegaudio.h
* mpeg audio declarations for both encoder and decoder.
@@ -52,7 +72,7 @@ typedef int32_t MPA_INT;
#endif
int l2_select_table(int bitrate, int nb_channels, int freq, int lsf);
-int mpa_decode_header(AVCodecContext *avctx, uint32_t head);
+int mpa_decode_header(AVCodecContext *avctx, uint32_t head, int *sample_rate);
void ff_mpa_synth_init(MPA_INT *window);
void ff_mpa_synth_filter(MPA_INT *synth_buf_ptr, int *synth_buf_offset,
MPA_INT *window, int *dither_state,
diff --git a/src/libffmpeg/libavcodec/mpegaudiodec.c b/contrib/ffmpeg/libavcodec/mpegaudiodec.c
index 0d82e3e98..54bcee3b0 100644
--- a/src/libffmpeg/libavcodec/mpegaudiodec.c
+++ b/contrib/ffmpeg/libavcodec/mpegaudiodec.c
@@ -2,18 +2,20 @@
* MPEG Audio decoder
* Copyright (c) 2001, 2002 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -36,40 +38,34 @@
/* define USE_HIGHPRECISION to have a bit exact (but slower) mpeg
audio decoder */
#ifdef CONFIG_MPEGAUDIO_HP
-#define USE_HIGHPRECISION
+# define USE_HIGHPRECISION
#endif
#include "mpegaudio.h"
+#include "mathops.h"
+
#define FRAC_ONE (1 << FRAC_BITS)
-#define MULL(a,b) (((int64_t)(a) * (int64_t)(b)) >> FRAC_BITS)
-#define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
#define FIX(a) ((int)((a) * FRAC_ONE))
/* WARNING: only correct for posititive numbers */
#define FIXR(a) ((int)((a) * FRAC_ONE + 0.5))
#define FRAC_RND(a) (((a) + (FRAC_ONE/2)) >> FRAC_BITS)
#define FIXHR(a) ((int)((a) * (1LL<<32) + 0.5))
-//#define MULH(a,b) (((int64_t)(a) * (int64_t)(b))>>32) //gcc 3.4 creates an incredibly bloated mess out of this
-static always_inline int MULH(int a, int b){
- return ((int64_t)(a) * (int64_t)(b))>>32;
-}
/****************/
#define HEADER_SIZE 4
#define BACKSTEP_SIZE 512
+#define EXTRABYTES 24
struct GranuleDef;
typedef struct MPADecodeContext {
- uint8_t inbuf1[2][MPA_MAX_CODED_FRAME_SIZE + BACKSTEP_SIZE]; /* input buffer */
- int inbuf_index;
- uint8_t *inbuf_ptr, *inbuf;
+ DECLARE_ALIGNED_8(uint8_t, last_buf[2*BACKSTEP_SIZE + EXTRABYTES]);
+ int last_buf_size;
int frame_size;
- int free_format_frame_size; /* frame size in case of free format
- (zero if currently unknown) */
/* next header (used in free format parsing) */
uint32_t free_format_next_header;
int error_protection;
@@ -77,8 +73,8 @@ typedef struct MPADecodeContext {
int sample_rate;
int sample_rate_index; /* between 0 and 8 */
int bit_rate;
- int old_frame_size;
GetBitContext gb;
+ GetBitContext in_gb;
int nb_channels;
int mode;
int mode_ext;
@@ -92,7 +88,8 @@ typedef struct MPADecodeContext {
#endif
void (*compute_antialias)(struct MPADecodeContext *s, struct GranuleDef *g);
int adu_mode; ///< 0 for standard mp3, 1 for adu formatted mp3
- unsigned int dither_state;
+ int dither_state;
+ int error_resilience;
} MPADecodeContext;
/**
@@ -141,7 +138,6 @@ static void compute_antialias_float(MPADecodeContext *s, GranuleDef *g);
/* vlc structure for decoding layer 3 huffman tables */
static VLC huff_vlc[16];
-static uint8_t *huff_code_table[16];
static VLC huff_quad_vlc[2];
/* computed from band_size_long */
static uint16_t band_index_long[9][23];
@@ -149,6 +145,8 @@ static uint16_t band_index_long[9][23];
#define TABLE_4_3_SIZE (8191 + 16)*4
static int8_t *table_4_3_exp;
static uint32_t *table_4_3_value;
+static uint32_t exp_table[512];
+static uint32_t expval_table[512][16];
/* intensity stereo coef table */
static int32_t is_table[2][16];
static int32_t is_table_lsf[2][2][16];
@@ -171,7 +169,6 @@ static const int32_t scale_factor_mult2[3][3] = {
SCALE_GEN(4.0 / 9.0), /* 9 steps */
};
-void ff_mpa_synth_init(MPA_INT *window);
static MPA_INT window[512] __attribute__((aligned(16)));
/* layer 1 unscaling */
@@ -309,6 +306,7 @@ static int decode_init(AVCodecContext * avctx)
#else
avctx->sample_fmt= SAMPLE_FMT_S16;
#endif
+ s->error_resilience= avctx->error_resilience;
if(avctx->antialias_algo != FF_AA_FLOAT)
s->compute_antialias= compute_antialias_integer;
@@ -343,26 +341,30 @@ static int decode_init(AVCodecContext * avctx)
ff_mpa_synth_init(window);
/* huffman decode tables */
- huff_code_table[0] = NULL;
for(i=1;i<16;i++) {
const HuffTable *h = &mpa_huff_tables[i];
int xsize, x, y;
unsigned int n;
- uint8_t *code_table;
+ uint8_t tmp_bits [512];
+ uint16_t tmp_codes[512];
+
+ memset(tmp_bits , 0, sizeof(tmp_bits ));
+ memset(tmp_codes, 0, sizeof(tmp_codes));
xsize = h->xsize;
n = xsize * xsize;
- /* XXX: fail test */
- init_vlc(&huff_vlc[i], 8, n,
- h->bits, 1, 1, h->codes, 2, 2, 1);
- code_table = av_mallocz(n);
j = 0;
for(x=0;x<xsize;x++) {
- for(y=0;y<xsize;y++)
- code_table[j++] = (x << 4) | y;
+ for(y=0;y<xsize;y++){
+ tmp_bits [(x << 5) | y | ((x&&y)<<4)]= h->bits [j ];
+ tmp_codes[(x << 5) | y | ((x&&y)<<4)]= h->codes[j++];
+ }
}
- huff_code_table[i] = code_table;
+
+ /* XXX: fail test */
+ init_vlc(&huff_vlc[i], 7, 512,
+ tmp_bits, 1, 1, tmp_codes, 2, 2, 1);
}
for(i=0;i<2;i++) {
init_vlc(&huff_quad_vlc[i], i == 0 ? 7 : 4, 16,
@@ -393,13 +395,20 @@ static int decode_init(AVCodecContext * avctx)
f = pow((double)(i/4), 4.0 / 3.0) * pow(2, (i&3)*0.25);
fm = frexp(f, &e);
m = (uint32_t)(fm*(1LL<<31) + 0.5);
- e+= FRAC_BITS - 31 + 5;
+ e+= FRAC_BITS - 31 + 5 - 100;
/* normalized to FRAC_BITS */
table_4_3_value[i] = m;
// av_log(NULL, AV_LOG_DEBUG, "%d %d %f\n", i, m, pow((double)i, 4.0 / 3.0));
table_4_3_exp[i] = -e;
}
+ for(i=0; i<512*16; i++){
+ int exponent= (i>>4);
+ double f= pow(i&15, 4.0 / 3.0) * pow(2, (exponent-400)*0.25 + FRAC_BITS + 5);
+ expval_table[exponent][i&15]= llrint(f);
+ if((i&15)==1)
+ exp_table[exponent]= llrint(f);
+ }
for(i=0;i<7;i++) {
float f;
@@ -498,9 +507,6 @@ static int decode_init(AVCodecContext * avctx)
init = 1;
}
- s->inbuf_index = 0;
- s->inbuf = &s->inbuf1[s->inbuf_index][BACKSTEP_SIZE];
- s->inbuf_ptr = s->inbuf;
#ifdef DEBUG
s->frame_count = 0;
#endif
@@ -513,62 +519,62 @@ static int decode_init(AVCodecContext * avctx)
/* cos(i*pi/64) */
-#define COS0_0 FIXR(0.50060299823519630134)
-#define COS0_1 FIXR(0.50547095989754365998)
-#define COS0_2 FIXR(0.51544730992262454697)
-#define COS0_3 FIXR(0.53104259108978417447)
-#define COS0_4 FIXR(0.55310389603444452782)
-#define COS0_5 FIXR(0.58293496820613387367)
-#define COS0_6 FIXR(0.62250412303566481615)
-#define COS0_7 FIXR(0.67480834145500574602)
-#define COS0_8 FIXR(0.74453627100229844977)
-#define COS0_9 FIXR(0.83934964541552703873)
-#define COS0_10 FIXR(0.97256823786196069369)
-#define COS0_11 FIXR(1.16943993343288495515)
-#define COS0_12 FIXR(1.48416461631416627724)
-#define COS0_13 FIXR(2.05778100995341155085)
-#define COS0_14 FIXR(3.40760841846871878570)
-#define COS0_15 FIXR(10.19000812354805681150)
-
-#define COS1_0 FIXR(0.50241928618815570551)
-#define COS1_1 FIXR(0.52249861493968888062)
-#define COS1_2 FIXR(0.56694403481635770368)
-#define COS1_3 FIXR(0.64682178335999012954)
-#define COS1_4 FIXR(0.78815462345125022473)
-#define COS1_5 FIXR(1.06067768599034747134)
-#define COS1_6 FIXR(1.72244709823833392782)
-#define COS1_7 FIXR(5.10114861868916385802)
-
-#define COS2_0 FIXR(0.50979557910415916894)
-#define COS2_1 FIXR(0.60134488693504528054)
-#define COS2_2 FIXR(0.89997622313641570463)
-#define COS2_3 FIXR(2.56291544774150617881)
-
-#define COS3_0 FIXR(0.54119610014619698439)
-#define COS3_1 FIXR(1.30656296487637652785)
-
-#define COS4_0 FIXR(0.70710678118654752439)
+#define COS0_0 FIXHR(0.50060299823519630134/2)
+#define COS0_1 FIXHR(0.50547095989754365998/2)
+#define COS0_2 FIXHR(0.51544730992262454697/2)
+#define COS0_3 FIXHR(0.53104259108978417447/2)
+#define COS0_4 FIXHR(0.55310389603444452782/2)
+#define COS0_5 FIXHR(0.58293496820613387367/2)
+#define COS0_6 FIXHR(0.62250412303566481615/2)
+#define COS0_7 FIXHR(0.67480834145500574602/2)
+#define COS0_8 FIXHR(0.74453627100229844977/2)
+#define COS0_9 FIXHR(0.83934964541552703873/2)
+#define COS0_10 FIXHR(0.97256823786196069369/2)
+#define COS0_11 FIXHR(1.16943993343288495515/4)
+#define COS0_12 FIXHR(1.48416461631416627724/4)
+#define COS0_13 FIXHR(2.05778100995341155085/8)
+#define COS0_14 FIXHR(3.40760841846871878570/8)
+#define COS0_15 FIXHR(10.19000812354805681150/32)
+
+#define COS1_0 FIXHR(0.50241928618815570551/2)
+#define COS1_1 FIXHR(0.52249861493968888062/2)
+#define COS1_2 FIXHR(0.56694403481635770368/2)
+#define COS1_3 FIXHR(0.64682178335999012954/2)
+#define COS1_4 FIXHR(0.78815462345125022473/2)
+#define COS1_5 FIXHR(1.06067768599034747134/4)
+#define COS1_6 FIXHR(1.72244709823833392782/4)
+#define COS1_7 FIXHR(5.10114861868916385802/16)
+
+#define COS2_0 FIXHR(0.50979557910415916894/2)
+#define COS2_1 FIXHR(0.60134488693504528054/2)
+#define COS2_2 FIXHR(0.89997622313641570463/2)
+#define COS2_3 FIXHR(2.56291544774150617881/8)
+
+#define COS3_0 FIXHR(0.54119610014619698439/2)
+#define COS3_1 FIXHR(1.30656296487637652785/4)
+
+#define COS4_0 FIXHR(0.70710678118654752439/2)
/* butterfly operator */
-#define BF(a, b, c)\
+#define BF(a, b, c, s)\
{\
tmp0 = tab[a] + tab[b];\
tmp1 = tab[a] - tab[b];\
tab[a] = tmp0;\
- tab[b] = MULL(tmp1, c);\
+ tab[b] = MULH(tmp1<<(s), c);\
}
#define BF1(a, b, c, d)\
{\
- BF(a, b, COS4_0);\
- BF(c, d, -COS4_0);\
+ BF(a, b, COS4_0, 1);\
+ BF(c, d,-COS4_0, 1);\
tab[c] += tab[d];\
}
#define BF2(a, b, c, d)\
{\
- BF(a, b, COS4_0);\
- BF(c, d, -COS4_0);\
+ BF(a, b, COS4_0, 1);\
+ BF(c, d,-COS4_0, 1);\
tab[c] += tab[d];\
tab[a] += tab[c];\
tab[c] += tab[b];\
@@ -583,92 +589,100 @@ static void dct32(int32_t *out, int32_t *tab)
int tmp0, tmp1;
/* pass 1 */
- BF(0, 31, COS0_0);
- BF(1, 30, COS0_1);
- BF(2, 29, COS0_2);
- BF(3, 28, COS0_3);
- BF(4, 27, COS0_4);
- BF(5, 26, COS0_5);
- BF(6, 25, COS0_6);
- BF(7, 24, COS0_7);
- BF(8, 23, COS0_8);
- BF(9, 22, COS0_9);
- BF(10, 21, COS0_10);
- BF(11, 20, COS0_11);
- BF(12, 19, COS0_12);
- BF(13, 18, COS0_13);
- BF(14, 17, COS0_14);
- BF(15, 16, COS0_15);
-
+ BF( 0, 31, COS0_0 , 1);
+ BF(15, 16, COS0_15, 5);
/* pass 2 */
- BF(0, 15, COS1_0);
- BF(1, 14, COS1_1);
- BF(2, 13, COS1_2);
- BF(3, 12, COS1_3);
- BF(4, 11, COS1_4);
- BF(5, 10, COS1_5);
- BF(6, 9, COS1_6);
- BF(7, 8, COS1_7);
-
- BF(16, 31, -COS1_0);
- BF(17, 30, -COS1_1);
- BF(18, 29, -COS1_2);
- BF(19, 28, -COS1_3);
- BF(20, 27, -COS1_4);
- BF(21, 26, -COS1_5);
- BF(22, 25, -COS1_6);
- BF(23, 24, -COS1_7);
-
+ BF( 0, 15, COS1_0 , 1);
+ BF(16, 31,-COS1_0 , 1);
+ /* pass 1 */
+ BF( 7, 24, COS0_7 , 1);
+ BF( 8, 23, COS0_8 , 1);
+ /* pass 2 */
+ BF( 7, 8, COS1_7 , 4);
+ BF(23, 24,-COS1_7 , 4);
/* pass 3 */
- BF(0, 7, COS2_0);
- BF(1, 6, COS2_1);
- BF(2, 5, COS2_2);
- BF(3, 4, COS2_3);
-
- BF(8, 15, -COS2_0);
- BF(9, 14, -COS2_1);
- BF(10, 13, -COS2_2);
- BF(11, 12, -COS2_3);
-
- BF(16, 23, COS2_0);
- BF(17, 22, COS2_1);
- BF(18, 21, COS2_2);
- BF(19, 20, COS2_3);
-
- BF(24, 31, -COS2_0);
- BF(25, 30, -COS2_1);
- BF(26, 29, -COS2_2);
- BF(27, 28, -COS2_3);
-
+ BF( 0, 7, COS2_0 , 1);
+ BF( 8, 15,-COS2_0 , 1);
+ BF(16, 23, COS2_0 , 1);
+ BF(24, 31,-COS2_0 , 1);
+ /* pass 1 */
+ BF( 3, 28, COS0_3 , 1);
+ BF(12, 19, COS0_12, 2);
+ /* pass 2 */
+ BF( 3, 12, COS1_3 , 1);
+ BF(19, 28,-COS1_3 , 1);
+ /* pass 1 */
+ BF( 4, 27, COS0_4 , 1);
+ BF(11, 20, COS0_11, 2);
+ /* pass 2 */
+ BF( 4, 11, COS1_4 , 1);
+ BF(20, 27,-COS1_4 , 1);
+ /* pass 3 */
+ BF( 3, 4, COS2_3 , 3);
+ BF(11, 12,-COS2_3 , 3);
+ BF(19, 20, COS2_3 , 3);
+ BF(27, 28,-COS2_3 , 3);
/* pass 4 */
- BF(0, 3, COS3_0);
- BF(1, 2, COS3_1);
-
- BF(4, 7, -COS3_0);
- BF(5, 6, -COS3_1);
-
- BF(8, 11, COS3_0);
- BF(9, 10, COS3_1);
+ BF( 0, 3, COS3_0 , 1);
+ BF( 4, 7,-COS3_0 , 1);
+ BF( 8, 11, COS3_0 , 1);
+ BF(12, 15,-COS3_0 , 1);
+ BF(16, 19, COS3_0 , 1);
+ BF(20, 23,-COS3_0 , 1);
+ BF(24, 27, COS3_0 , 1);
+ BF(28, 31,-COS3_0 , 1);
- BF(12, 15, -COS3_0);
- BF(13, 14, -COS3_1);
- BF(16, 19, COS3_0);
- BF(17, 18, COS3_1);
- BF(20, 23, -COS3_0);
- BF(21, 22, -COS3_1);
-
- BF(24, 27, COS3_0);
- BF(25, 26, COS3_1);
+ /* pass 1 */
+ BF( 1, 30, COS0_1 , 1);
+ BF(14, 17, COS0_14, 3);
+ /* pass 2 */
+ BF( 1, 14, COS1_1 , 1);
+ BF(17, 30,-COS1_1 , 1);
+ /* pass 1 */
+ BF( 6, 25, COS0_6 , 1);
+ BF( 9, 22, COS0_9 , 1);
+ /* pass 2 */
+ BF( 6, 9, COS1_6 , 2);
+ BF(22, 25,-COS1_6 , 2);
+ /* pass 3 */
+ BF( 1, 6, COS2_1 , 1);
+ BF( 9, 14,-COS2_1 , 1);
+ BF(17, 22, COS2_1 , 1);
+ BF(25, 30,-COS2_1 , 1);
- BF(28, 31, -COS3_0);
- BF(29, 30, -COS3_1);
+ /* pass 1 */
+ BF( 2, 29, COS0_2 , 1);
+ BF(13, 18, COS0_13, 3);
+ /* pass 2 */
+ BF( 2, 13, COS1_2 , 1);
+ BF(18, 29,-COS1_2 , 1);
+ /* pass 1 */
+ BF( 5, 26, COS0_5 , 1);
+ BF(10, 21, COS0_10, 1);
+ /* pass 2 */
+ BF( 5, 10, COS1_5 , 2);
+ BF(21, 26,-COS1_5 , 2);
+ /* pass 3 */
+ BF( 2, 5, COS2_2 , 1);
+ BF(10, 13,-COS2_2 , 1);
+ BF(18, 21, COS2_2 , 1);
+ BF(26, 29,-COS2_2 , 1);
+ /* pass 4 */
+ BF( 1, 2, COS3_1 , 2);
+ BF( 5, 6,-COS3_1 , 2);
+ BF( 9, 10, COS3_1 , 2);
+ BF(13, 14,-COS3_1 , 2);
+ BF(17, 18, COS3_1 , 2);
+ BF(21, 22,-COS3_1 , 2);
+ BF(25, 26, COS3_1 , 2);
+ BF(29, 30,-COS3_1 , 2);
/* pass 5 */
- BF1(0, 1, 2, 3);
- BF2(4, 5, 6, 7);
- BF1(8, 9, 10, 11);
+ BF1( 0, 1, 2, 3);
+ BF2( 4, 5, 6, 7);
+ BF1( 8, 9, 10, 11);
BF2(12, 13, 14, 15);
BF1(16, 17, 18, 19);
BF2(20, 21, 22, 23);
@@ -742,25 +756,11 @@ static inline int round_sample(int *sum)
return sum1;
}
-#if defined(ARCH_POWERPC_405)
-
/* signed 16x16 -> 32 multiply add accumulate */
-#define MACS(rt, ra, rb) \
- asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
+#define MACS(rt, ra, rb) MAC16(rt, ra, rb)
/* signed 16x16 -> 32 multiply */
-#define MULS(ra, rb) \
- ({ int __rt; asm ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); __rt; })
-
-#else
-
-/* signed 16x16 -> 32 multiply add accumulate */
-#define MACS(rt, ra, rb) rt += (ra) * (rb)
-
-/* signed 16x16 -> 32 multiply */
-#define MULS(ra, rb) ((ra) * (rb))
-
-#endif
+#define MULS(ra, rb) MUL16(ra, rb)
#else
@@ -776,8 +776,7 @@ static inline int round_sample(int64_t *sum)
return sum1;
}
-#define MULS(ra, rb) MUL64(ra, rb)
-
+# define MULS(ra, rb) MUL64(ra, rb)
#endif
#define SUM8(sum, op, w, p) \
@@ -934,6 +933,19 @@ static const int icos36[9] = {
FIXR(5.73685662283492756461),
};
+/* 0.5 / cos(pi*(2*i+1)/36) */
+static const int icos36h[9] = {
+ FIXHR(0.50190991877167369479/2),
+ FIXHR(0.51763809020504152469/2), //0
+ FIXHR(0.55168895948124587824/2),
+ FIXHR(0.61038729438072803416/2),
+ FIXHR(0.70710678118654752439/2), //1
+ FIXHR(0.87172339781054900991/2),
+ FIXHR(1.18310079157624925896/4),
+ FIXHR(1.93185165257813657349/4), //2
+// FIXHR(5.73685662283492756461),
+};
+
/* 12 points IMDCT. We compute it "by hand" by factorizing obvious
cases. */
static void imdct12(int *out, int *in)
@@ -950,10 +962,10 @@ static void imdct12(int *out, int *in)
in3 += in1;
in2= MULH(2*in2, C3);
- in3= MULH(2*in3, C3);
+ in3= MULH(4*in3, C3);
t1 = in0 - in4;
- t2 = MULL(in1 - in5, icos36[4]);
+ t2 = MULH(2*(in1 - in5), icos36h[4]);
out[ 7]=
out[10]= t1 + t2;
@@ -962,19 +974,19 @@ static void imdct12(int *out, int *in)
in0 += in4>>1;
in4 = in0 + in2;
- in1 += in5>>1;
- in5 = MULL(in1 + in3, icos36[1]);
+ in5 += 2*in1;
+ in1 = MULH(in5 + in3, icos36h[1]);
out[ 8]=
- out[ 9]= in4 + in5;
+ out[ 9]= in4 + in1;
out[ 2]=
- out[ 3]= in4 - in5;
+ out[ 3]= in4 - in1;
in0 -= in2;
- in1 = MULL(in1 - in3, icos36[7]);
+ in5 = MULH(2*(in5 - in3), icos36h[7]);
out[ 0]=
- out[ 5]= in0 - in1;
+ out[ 5]= in0 - in5;
out[ 6]=
- out[11]= in0 + in1;
+ out[11]= in0 + in5;
}
/* cos(pi*i/18) */
@@ -1068,7 +1080,7 @@ static void imdct36(int *out, int *buf, int *in, int *win)
t2 = tmp[i + 1];
t3 = tmp[i + 3];
- s1 = MULL(t3 + t2, icos36[j]);
+ s1 = MULH(2*(t3 + t2), icos36h[j]);
s3 = MULL(t3 - t2, icos36[8 - j]);
t0 = s0 + s1;
@@ -1088,7 +1100,7 @@ static void imdct36(int *out, int *buf, int *in, int *win)
}
s0 = tmp[16];
- s1 = MULL(tmp[17], icos36[4]);
+ s1 = MULH(2*tmp[17], icos36h[4]);
t0 = s0 + s1;
t1 = s0 - s1;
out[(9 + 4)*SBLIMIT] = MULH(t1, win[9 + 4]) + buf[9 + 4];
@@ -1156,26 +1168,7 @@ static int decode_header(MPADecodeContext *s, uint32_t header)
s->frame_size = frame_size;
} else {
/* if no frame size computed, signal it */
- if (!s->free_format_frame_size)
- return 1;
- /* free format: compute bitrate and real frame size from the
- frame size we extracted by reading the bitstream */
- s->frame_size = s->free_format_frame_size;
- switch(s->layer) {
- case 1:
- s->frame_size += padding * 4;
- s->bit_rate = (s->frame_size * sample_rate) / 48000;
- break;
- case 2:
- s->frame_size += padding;
- s->bit_rate = (s->frame_size * sample_rate) / 144000;
- break;
- default:
- case 3:
- s->frame_size += padding;
- s->bit_rate = (s->frame_size * (sample_rate << s->lsf)) / 144000;
- break;
- }
+ return 1;
}
#if defined(DEBUG)
@@ -1199,10 +1192,9 @@ static int decode_header(MPADecodeContext *s, uint32_t header)
/* useful helper to get mpeg audio stream infos. Return -1 if error in
header, otherwise the coded frame size in bytes */
-int mpa_decode_header(AVCodecContext *avctx, uint32_t head)
+int mpa_decode_header(AVCodecContext *avctx, uint32_t head, int *sample_rate)
{
MPADecodeContext s1, *s = &s1;
- memset( s, 0, sizeof(MPADecodeContext) );
if (ff_mpa_check_header(head) != 0)
return -1;
@@ -1227,7 +1219,7 @@ int mpa_decode_header(AVCodecContext *avctx, uint32_t head)
break;
}
- avctx->sample_rate = s->sample_rate;
+ *sample_rate = s->sample_rate;
avctx->channels = s->nb_channels;
avctx->bit_rate = s->bit_rate;
avctx->sub_id = s->layer;
@@ -1533,29 +1525,6 @@ static int mp_decode_layer2(MPADecodeContext *s)
return 3 * 12;
}
-/*
- * Seek back in the stream for backstep bytes (at most 511 bytes)
- */
-static void seek_to_maindata(MPADecodeContext *s, unsigned int backstep)
-{
- uint8_t *ptr;
-
- /* compute current position in stream */
- ptr = (uint8_t *)(s->gb.buffer + (get_bits_count(&s->gb)>>3));
-
- /* copy old data before current one */
- ptr -= backstep;
- memcpy(ptr, s->inbuf1[s->inbuf_index ^ 1] +
- BACKSTEP_SIZE + s->old_frame_size - backstep, backstep);
- /* init get bits again */
- init_get_bits(&s->gb, ptr, (s->frame_size + backstep)*8);
-
- /* prepare next buffer */
- s->inbuf_index ^= 1;
- s->inbuf = &s->inbuf1[s->inbuf_index][BACKSTEP_SIZE];
- s->old_frame_size = s->frame_size;
-}
-
static inline void lsf_sf_expand(int *slen,
int sf, int n1, int n2, int n3)
{
@@ -1591,7 +1560,7 @@ static void exponents_from_scale_factors(MPADecodeContext *s,
bstab = band_size_long[s->sample_rate_index];
pretab = mpa_pretab[g->preflag];
for(i=0;i<g->long_end;i++) {
- v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift);
+ v0 = gain - ((g->scale_factors[i] + pretab[i]) << shift) + 400;
len = bstab[i];
for(j=len;j>0;j--)
*exp_ptr++ = v0;
@@ -1606,7 +1575,7 @@ static void exponents_from_scale_factors(MPADecodeContext *s,
for(i=g->short_start;i<13;i++) {
len = bstab[i];
for(l=0;l<3;l++) {
- v0 = gains[l] - (g->scale_factors[k++] << shift);
+ v0 = gains[l] - (g->scale_factors[k++] << shift) + 400;
for(j=len;j>0;j--)
*exp_ptr++ = v0;
}
@@ -1624,17 +1593,18 @@ static inline int get_bitsz(GetBitContext *s, int n)
}
static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
- int16_t *exponents, int end_pos)
+ int16_t *exponents, int end_pos2)
{
int s_index;
- int linbits, code, x, y, l, v, i, j, k, pos;
- GetBitContext last_gb;
+ int i;
+ int last_pos, bits_left;
VLC *vlc;
- uint8_t *code_table;
+ int end_pos= FFMIN(end_pos2, s->gb.size_in_bits);
/* low frequencies (called big values) */
s_index = 0;
for(i=0;i<3;i++) {
+ int j, k, l, linbits;
j = g->region_size[i];
if (j == 0)
continue;
@@ -1643,83 +1613,152 @@ static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
l = mpa_huff_data[k][0];
linbits = mpa_huff_data[k][1];
vlc = &huff_vlc[l];
- code_table = huff_code_table[l];
+
+ if(!l){
+ memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*2*j);
+ s_index += 2*j;
+ continue;
+ }
/* read huffcode and compute each couple */
for(;j>0;j--) {
- if (get_bits_count(&s->gb) >= end_pos)
- break;
- if (code_table) {
- code = get_vlc2(&s->gb, vlc->table, 8, 3);
- if (code < 0)
- return -1;
- y = code_table[code];
- x = y >> 4;
- y = y & 0x0f;
- } else {
- x = 0;
- y = 0;
+ int exponent, x, y, v;
+ int pos= get_bits_count(&s->gb);
+
+ if (pos >= end_pos){
+// av_log(NULL, AV_LOG_ERROR, "pos: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
+ if(s->in_gb.buffer && pos >= s->gb.size_in_bits){
+ s->gb= s->in_gb;
+ s->in_gb.buffer=NULL;
+ assert((get_bits_count(&s->gb) & 7) == 0);
+ skip_bits_long(&s->gb, pos - end_pos);
+ end_pos2=
+ end_pos= end_pos2 + get_bits_count(&s->gb) - pos;
+ pos= get_bits_count(&s->gb);
+ }
+// av_log(NULL, AV_LOG_ERROR, "new pos: %d %d\n", pos, end_pos);
+ if(pos >= end_pos)
+ break;
}
+ y = get_vlc2(&s->gb, vlc->table, 7, 3);
+
+ if(!y){
+ g->sb_hybrid[s_index ] =
+ g->sb_hybrid[s_index+1] = 0;
+ s_index += 2;
+ continue;
+ }
+
+ exponent= exponents[s_index];
+
dprintf("region=%d n=%d x=%d y=%d exp=%d\n",
- i, g->region_size[i] - j, x, y, exponents[s_index]);
- if (x) {
- if (x == 15)
+ i, g->region_size[i] - j, x, y, exponent);
+ if(y&16){
+ x = y >> 5;
+ y = y & 0x0f;
+ if (x < 15){
+ v = expval_table[ exponent ][ x ];
+// v = expval_table[ (exponent&3) ][ x ] >> FFMIN(0 - (exponent>>2), 31);
+ }else{
x += get_bitsz(&s->gb, linbits);
- v = l3_unscale(x, exponents[s_index]);
+ v = l3_unscale(x, exponent);
+ }
if (get_bits1(&s->gb))
v = -v;
- } else {
- v = 0;
- }
- g->sb_hybrid[s_index++] = v;
- if (y) {
- if (y == 15)
+ g->sb_hybrid[s_index] = v;
+ if (y < 15){
+ v = expval_table[ exponent ][ y ];
+ }else{
y += get_bitsz(&s->gb, linbits);
- v = l3_unscale(y, exponents[s_index]);
+ v = l3_unscale(y, exponent);
+ }
if (get_bits1(&s->gb))
v = -v;
- } else {
- v = 0;
+ g->sb_hybrid[s_index+1] = v;
+ }else{
+ x = y >> 5;
+ y = y & 0x0f;
+ x += y;
+ if (x < 15){
+ v = expval_table[ exponent ][ x ];
+ }else{
+ x += get_bitsz(&s->gb, linbits);
+ v = l3_unscale(x, exponent);
+ }
+ if (get_bits1(&s->gb))
+ v = -v;
+ g->sb_hybrid[s_index+!!y] = v;
+ g->sb_hybrid[s_index+ !y] = 0;
}
- g->sb_hybrid[s_index++] = v;
+ s_index+=2;
}
}
/* high frequencies */
vlc = &huff_quad_vlc[g->count1table_select];
- last_gb.buffer = NULL;
+ last_pos=0;
while (s_index <= 572) {
+ int pos, code;
pos = get_bits_count(&s->gb);
if (pos >= end_pos) {
- if (pos > end_pos && last_gb.buffer != NULL) {
+ if (pos > end_pos2 && last_pos){
/* some encoders generate an incorrect size for this
part. We must go back into the data */
s_index -= 4;
- s->gb = last_gb;
+ skip_bits_long(&s->gb, last_pos - pos);
+ av_log(NULL, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos);
+ if(s->error_resilience >= FF_ER_COMPLIANT)
+ s_index=0;
+ break;
}
- break;
+// av_log(NULL, AV_LOG_ERROR, "pos2: %d %d %d %d\n", pos, end_pos, end_pos2, s_index);
+ if(s->in_gb.buffer && pos >= s->gb.size_in_bits){
+ s->gb= s->in_gb;
+ s->in_gb.buffer=NULL;
+ assert((get_bits_count(&s->gb) & 7) == 0);
+ skip_bits_long(&s->gb, pos - end_pos);
+ end_pos2=
+ end_pos= end_pos2 + get_bits_count(&s->gb) - pos;
+ pos= get_bits_count(&s->gb);
+ }
+// av_log(NULL, AV_LOG_ERROR, "new pos2: %d %d %d\n", pos, end_pos, s_index);
+ if(pos >= end_pos)
+ break;
}
- last_gb= s->gb;
+ last_pos= pos;
- code = get_vlc2(&s->gb, vlc->table, vlc->bits, 2);
+ code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1);
dprintf("t=%d code=%d\n", g->count1table_select, code);
- if (code < 0)
- return -1;
- for(i=0;i<4;i++) {
- if (code & (8 >> i)) {
- /* non zero value. Could use a hand coded function for
- 'one' value */
- v = l3_unscale(1, exponents[s_index]);
- if(get_bits1(&s->gb))
- v = -v;
- } else {
- v = 0;
- }
- g->sb_hybrid[s_index++] = v;
+ g->sb_hybrid[s_index+0]=
+ g->sb_hybrid[s_index+1]=
+ g->sb_hybrid[s_index+2]=
+ g->sb_hybrid[s_index+3]= 0;
+ while(code){
+ const static int idxtab[16]={3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0};
+ int v;
+ int pos= s_index+idxtab[code];
+ code ^= 8>>idxtab[code];
+ v = exp_table[ exponents[pos] ];
+// v = exp_table[ (exponents[pos]&3) ] >> FFMIN(0 - (exponents[pos]>>2), 31);
+ if(get_bits1(&s->gb))
+ v = -v;
+ g->sb_hybrid[pos] = v;
}
+ s_index+=4;
+ }
+ /* skip extension bits */
+ bits_left = end_pos - get_bits_count(&s->gb);
+//av_log(NULL, AV_LOG_ERROR, "left:%d buf:%p\n", bits_left, s->in_gb.buffer);
+ if (bits_left < 0 || bits_left > 16) {
+ av_log(NULL, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
+ s_index=0;
+ }else if(bits_left > 0 && s->error_resilience >= FF_ER_AGGRESSIVE){
+ av_log(NULL, AV_LOG_ERROR, "bits_left=%d\n", bits_left);
+ s_index=0;
}
- while (s_index < 576)
- g->sb_hybrid[s_index++] = 0;
+ memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid)*(576 - s_index));
+ skip_bits_long(&s->gb, bits_left);
+
return 0;
}
@@ -1728,7 +1767,7 @@ static int huffman_decode(MPADecodeContext *s, GranuleDef *g,
complicated */
static void reorder_block(MPADecodeContext *s, GranuleDef *g)
{
- int i, j, k, len;
+ int i, j, len;
int32_t *ptr, *dst, *ptr1;
int32_t tmp[576];
@@ -1748,14 +1787,15 @@ static void reorder_block(MPADecodeContext *s, GranuleDef *g)
for(i=g->short_start;i<13;i++) {
len = band_size_short[s->sample_rate_index][i];
ptr1 = ptr;
- for(k=0;k<3;k++) {
- dst = tmp + k;
- for(j=len;j>0;j--) {
- *dst = *ptr++;
- dst += 3;
- }
+ dst = tmp;
+ for(j=len;j>0;j--) {
+ *dst++ = ptr[0*len];
+ *dst++ = ptr[1*len];
+ *dst++ = ptr[2*len];
+ ptr++;
}
- memcpy(ptr1, tmp, len * 3 * sizeof(int32_t));
+ ptr+=2*len;
+ memcpy(ptr1, tmp, len * 3 * sizeof(*ptr1));
}
}
@@ -2104,17 +2144,14 @@ void sample_dump(int fnum, int32_t *tab, int n)
static int mp_decode_layer3(MPADecodeContext *s)
{
int nb_granules, main_data_begin, private_bits;
- int gr, ch, blocksplit_flag, i, j, k, n, bits_pos, bits_left;
+ int gr, ch, blocksplit_flag, i, j, k, n, bits_pos;
GranuleDef granules[2][2], *g;
int16_t exponents[576];
/* read side info */
if (s->lsf) {
main_data_begin = get_bits(&s->gb, 8);
- if (s->nb_channels == 2)
- private_bits = get_bits(&s->gb, 2);
- else
- private_bits = get_bits(&s->gb, 1);
+ private_bits = get_bits(&s->gb, s->nb_channels);
nb_granules = 1;
} else {
main_data_begin = get_bits(&s->gb, 9);
@@ -2135,6 +2172,11 @@ static int mp_decode_layer3(MPADecodeContext *s)
g = &granules[ch][gr];
g->part2_3_length = get_bits(&s->gb, 12);
g->big_values = get_bits(&s->gb, 9);
+ if(g->big_values > 288){
+ av_log(NULL, AV_LOG_ERROR, "big_values too big\n");
+ return -1;
+ }
+
g->global_gain = get_bits(&s->gb, 8);
/* if MS stereo only is selected, we precompute the
1/sqrt(2) renormalization factor */
@@ -2148,8 +2190,10 @@ static int mp_decode_layer3(MPADecodeContext *s)
blocksplit_flag = get_bits(&s->gb, 1);
if (blocksplit_flag) {
g->block_type = get_bits(&s->gb, 2);
- if (g->block_type == 0)
+ if (g->block_type == 0){
+ av_log(NULL, AV_LOG_ERROR, "invalid block type\n");
return -1;
+ }
g->switch_point = get_bits(&s->gb, 1);
for(i=0;i<2;i++)
g->table_select[i] = get_bits(&s->gb, 5);
@@ -2192,9 +2236,7 @@ static int mp_decode_layer3(MPADecodeContext *s)
g->region_size[2] = (576 / 2);
j = 0;
for(i=0;i<3;i++) {
- k = g->region_size[i];
- if (k > g->big_values)
- k = g->big_values;
+ k = FFMIN(g->region_size[i], g->big_values);
g->region_size[i] = k - j;
j = k;
}
@@ -2212,10 +2254,7 @@ static int mp_decode_layer3(MPADecodeContext *s)
else
g->long_end = 4; /* 8000 Hz */
- if (s->sample_rate_index != 8)
- g->short_start = 3;
- else
- g->short_start = 2;
+ g->short_start = 2 + (s->sample_rate_index != 8);
} else {
g->long_end = 0;
g->short_start = 0;
@@ -2236,14 +2275,33 @@ static int mp_decode_layer3(MPADecodeContext *s)
}
if (!s->adu_mode) {
+ const uint8_t *ptr = s->gb.buffer + (get_bits_count(&s->gb)>>3);
+ assert((get_bits_count(&s->gb) & 7) == 0);
/* now we get bits from the main_data_begin offset */
dprintf("seekback: %d\n", main_data_begin);
- seek_to_maindata(s, main_data_begin);
+//av_log(NULL, AV_LOG_ERROR, "backstep:%d, lastbuf:%d\n", main_data_begin, s->last_buf_size);
+
+ memcpy(s->last_buf + s->last_buf_size, ptr, EXTRABYTES);
+ s->in_gb= s->gb;
+ init_get_bits(&s->gb, s->last_buf, s->last_buf_size*8);
+ skip_bits_long(&s->gb, 8*(s->last_buf_size - main_data_begin));
}
for(gr=0;gr<nb_granules;gr++) {
for(ch=0;ch<s->nb_channels;ch++) {
g = &granules[ch][gr];
+ if(get_bits_count(&s->gb)<0){
+ av_log(NULL, AV_LOG_ERROR, "mdb:%d, lastbuf:%d skiping granule %d\n",
+ main_data_begin, s->last_buf_size, gr);
+ skip_bits_long(&s->gb, g->part2_3_length);
+ memset(g->sb_hybrid, 0, sizeof(g->sb_hybrid));
+ if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->in_gb.buffer){
+ skip_bits_long(&s->in_gb, get_bits_count(&s->gb) - s->gb.size_in_bits);
+ s->gb= s->in_gb;
+ s->in_gb.buffer=NULL;
+ }
+ continue;
+ }
bits_pos = get_bits_count(&s->gb);
@@ -2258,12 +2316,22 @@ static int mp_decode_layer3(MPADecodeContext *s)
if (g->block_type == 2) {
n = g->switch_point ? 17 : 18;
j = 0;
- for(i=0;i<n;i++)
- g->scale_factors[j++] = get_bitsz(&s->gb, slen1);
- for(i=0;i<18;i++)
- g->scale_factors[j++] = get_bitsz(&s->gb, slen2);
- for(i=0;i<3;i++)
- g->scale_factors[j++] = 0;
+ if(slen1){
+ for(i=0;i<n;i++)
+ g->scale_factors[j++] = get_bits(&s->gb, slen1);
+ }else{
+ for(i=0;i<n;i++)
+ g->scale_factors[j++] = 0;
+ }
+ if(slen2){
+ for(i=0;i<18;i++)
+ g->scale_factors[j++] = get_bits(&s->gb, slen2);
+ for(i=0;i<3;i++)
+ g->scale_factors[j++] = 0;
+ }else{
+ for(i=0;i<21;i++)
+ g->scale_factors[j++] = 0;
+ }
} else {
sc = granules[ch][0].scale_factors;
j = 0;
@@ -2271,8 +2339,13 @@ static int mp_decode_layer3(MPADecodeContext *s)
n = (k == 0 ? 6 : 5);
if ((g->scfsi & (0x8 >> k)) == 0) {
slen = (k < 2) ? slen1 : slen2;
- for(i=0;i<n;i++)
- g->scale_factors[j++] = get_bitsz(&s->gb, slen);
+ if(slen){
+ for(i=0;i<n;i++)
+ g->scale_factors[j++] = get_bits(&s->gb, slen);
+ }else{
+ for(i=0;i<n;i++)
+ g->scale_factors[j++] = 0;
+ }
} else {
/* simply copy from last granule */
for(i=0;i<n;i++) {
@@ -2334,8 +2407,13 @@ static int mp_decode_layer3(MPADecodeContext *s)
for(k=0;k<4;k++) {
n = lsf_nsf_table[tindex2][tindex][k];
sl = slen[k];
- for(i=0;i<n;i++)
- g->scale_factors[j++] = get_bitsz(&s->gb, sl);
+ if(sl){
+ for(i=0;i<n;i++)
+ g->scale_factors[j++] = get_bits(&s->gb, sl);
+ }else{
+ for(i=0;i<n;i++)
+ g->scale_factors[j++] = 0;
+ }
}
/* XXX: should compute exact size */
for(;j<40;j++)
@@ -2354,25 +2432,10 @@ static int mp_decode_layer3(MPADecodeContext *s)
exponents_from_scale_factors(s, g, exponents);
/* read Huffman coded residue */
- if (huffman_decode(s, g, exponents,
- bits_pos + g->part2_3_length) < 0)
- return -1;
+ huffman_decode(s, g, exponents, bits_pos + g->part2_3_length);
#if defined(DEBUG)
sample_dump(0, g->sb_hybrid, 576);
#endif
-
- /* skip extension bits */
- bits_left = g->part2_3_length - (get_bits_count(&s->gb) - bits_pos);
- if (bits_left < 0) {
- dprintf("bits_left=%d\n", bits_left);
- return -1;
- }
- while (bits_left >= 16) {
- skip_bits(&s->gb, 16);
- bits_left -= 16;
- }
- if (bits_left > 0)
- skip_bits(&s->gb, bits_left);
} /* ch */
if (s->nb_channels == 2)
@@ -2395,17 +2458,18 @@ static int mp_decode_layer3(MPADecodeContext *s)
#endif
}
} /* gr */
+ if(get_bits_count(&s->gb)<0)
+ skip_bits_long(&s->gb, -get_bits_count(&s->gb));
return nb_granules * 18;
}
static int mp_decode_frame(MPADecodeContext *s,
- OUT_INT *samples)
+ OUT_INT *samples, const uint8_t *buf, int buf_size)
{
int i, nb_frames, ch;
OUT_INT *samples_ptr;
- init_get_bits(&s->gb, s->inbuf + HEADER_SIZE,
- (s->inbuf_ptr - s->inbuf - HEADER_SIZE)*8);
+ init_get_bits(&s->gb, buf + HEADER_SIZE, (buf_size - HEADER_SIZE)*8);
/* skip error protection field */
if (s->error_protection)
@@ -2422,6 +2486,32 @@ static int mp_decode_frame(MPADecodeContext *s,
case 3:
default:
nb_frames = mp_decode_layer3(s);
+
+ s->last_buf_size=0;
+ if(s->in_gb.buffer){
+ align_get_bits(&s->gb);
+ i= (s->gb.size_in_bits - get_bits_count(&s->gb))>>3;
+ if(i >= 0 && i <= BACKSTEP_SIZE){
+ memmove(s->last_buf, s->gb.buffer + (get_bits_count(&s->gb)>>3), i);
+ s->last_buf_size=i;
+ }else
+ av_log(NULL, AV_LOG_ERROR, "invalid old backstep %d\n", i);
+ s->gb= s->in_gb;
+ s->in_gb.buffer= NULL;
+ }
+
+ align_get_bits(&s->gb);
+ assert((get_bits_count(&s->gb) & 7) == 0);
+ i= (s->gb.size_in_bits - get_bits_count(&s->gb))>>3;
+
+ if(i<0 || i > BACKSTEP_SIZE || nb_frames<0){
+ av_log(NULL, AV_LOG_ERROR, "invalid new backstep %d\n", i);
+ i= FFMIN(BACKSTEP_SIZE, buf_size - HEADER_SIZE);
+ }
+ assert(i <= buf_size - HEADER_SIZE && i>= 0);
+ memcpy(s->last_buf + s->last_buf_size, s->gb.buffer + buf_size - HEADER_SIZE - i, i);
+ s->last_buf_size += i;
+
break;
}
#if defined(DEBUG)
@@ -2458,162 +2548,69 @@ static int decode_frame(AVCodecContext * avctx,
{
MPADecodeContext *s = avctx->priv_data;
uint32_t header;
- uint8_t *buf_ptr;
- int len, out_size;
+ int out_size;
OUT_INT *out_samples = data;
- buf_ptr = buf;
- while (buf_size > 0) {
- len = s->inbuf_ptr - s->inbuf;
- if (s->frame_size == 0) {
- /* special case for next header for first frame in free
- format case (XXX: find a simpler method) */
- if (s->free_format_next_header != 0) {
- s->inbuf[0] = s->free_format_next_header >> 24;
- s->inbuf[1] = s->free_format_next_header >> 16;
- s->inbuf[2] = s->free_format_next_header >> 8;
- s->inbuf[3] = s->free_format_next_header;
- s->inbuf_ptr = s->inbuf + 4;
- s->free_format_next_header = 0;
- goto got_header;
- }
- /* no header seen : find one. We need at least HEADER_SIZE
- bytes to parse it */
- len = HEADER_SIZE - len;
- if (len > buf_size)
- len = buf_size;
- if (len > 0) {
- memcpy(s->inbuf_ptr, buf_ptr, len);
- buf_ptr += len;
- buf_size -= len;
- s->inbuf_ptr += len;
- }
- if ((s->inbuf_ptr - s->inbuf) >= HEADER_SIZE) {
- got_header:
- header = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) |
- (s->inbuf[2] << 8) | s->inbuf[3];
-
- if (ff_mpa_check_header(header) < 0) {
- /* no sync found : move by one byte (inefficient, but simple!) */
- memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1);
- s->inbuf_ptr--;
- dprintf("skip %x\n", header);
- /* reset free format frame size to give a chance
- to get a new bitrate */
- s->free_format_frame_size = 0;
- } else {
- if (decode_header(s, header) == 1) {
- /* free format: prepare to compute frame size */
- s->frame_size = -1;
- }
- /* update codec info */
- avctx->sample_rate = s->sample_rate;
- avctx->channels = s->nb_channels;
- avctx->bit_rate = s->bit_rate;
- avctx->sub_id = s->layer;
- switch(s->layer) {
- case 1:
- avctx->frame_size = 384;
- break;
- case 2:
- avctx->frame_size = 1152;
- break;
- case 3:
- if (s->lsf)
- avctx->frame_size = 576;
- else
- avctx->frame_size = 1152;
- break;
- }
- }
- }
- } else if (s->frame_size == -1) {
- /* free format : find next sync to compute frame size */
- len = MPA_MAX_CODED_FRAME_SIZE - len;
- if (len > buf_size)
- len = buf_size;
- if (len == 0) {
- /* frame too long: resync */
- s->frame_size = 0;
- memmove(s->inbuf, s->inbuf + 1, s->inbuf_ptr - s->inbuf - 1);
- s->inbuf_ptr--;
- } else {
- uint8_t *p, *pend;
- uint32_t header1;
- int padding;
-
- memcpy(s->inbuf_ptr, buf_ptr, len);
- /* check for header */
- p = s->inbuf_ptr - 3;
- pend = s->inbuf_ptr + len - 4;
- while (p <= pend) {
- header = (p[0] << 24) | (p[1] << 16) |
- (p[2] << 8) | p[3];
- header1 = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) |
- (s->inbuf[2] << 8) | s->inbuf[3];
- /* check with high probability that we have a
- valid header */
- if ((header & SAME_HEADER_MASK) ==
- (header1 & SAME_HEADER_MASK)) {
- /* header found: update pointers */
- len = (p + 4) - s->inbuf_ptr;
- buf_ptr += len;
- buf_size -= len;
- s->inbuf_ptr = p;
- /* compute frame size */
- s->free_format_next_header = header;
- s->free_format_frame_size = s->inbuf_ptr - s->inbuf;
- padding = (header1 >> 9) & 1;
- if (s->layer == 1)
- s->free_format_frame_size -= padding * 4;
- else
- s->free_format_frame_size -= padding;
- dprintf("free frame size=%d padding=%d\n",
- s->free_format_frame_size, padding);
- decode_header(s, header1);
- goto next_data;
- }
- p++;
- }
- /* not found: simply increase pointers */
- buf_ptr += len;
- s->inbuf_ptr += len;
- buf_size -= len;
- }
- } else if (len < s->frame_size) {
- if (s->frame_size > MPA_MAX_CODED_FRAME_SIZE)
- s->frame_size = MPA_MAX_CODED_FRAME_SIZE;
- len = s->frame_size - len;
- if (len > buf_size)
- len = buf_size;
- memcpy(s->inbuf_ptr, buf_ptr, len);
- buf_ptr += len;
- s->inbuf_ptr += len;
- buf_size -= len;
- }
- next_data:
- if (s->frame_size > 0 &&
- (s->inbuf_ptr - s->inbuf) >= s->frame_size) {
- if (avctx->parse_only) {
- /* simply return the frame data */
- *(uint8_t **)data = s->inbuf;
- out_size = s->inbuf_ptr - s->inbuf;
- } else {
- out_size = mp_decode_frame(s, out_samples);
- }
- s->inbuf_ptr = s->inbuf;
- s->frame_size = 0;
- if(out_size>=0)
- *data_size = out_size;
- else
- av_log(avctx, AV_LOG_DEBUG, "Error while decoding mpeg audio frame\n"); //FIXME return -1 / but also return the number of bytes consumed
- break;
- }
+retry:
+ if(buf_size < HEADER_SIZE)
+ return -1;
+
+ header = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3];
+ if(ff_mpa_check_header(header) < 0){
+ buf++;
+// buf_size--;
+ av_log(avctx, AV_LOG_ERROR, "Header missing skipping one byte.\n");
+ goto retry;
+ }
+
+ if (decode_header(s, header) == 1) {
+ /* free format: prepare to compute frame size */
+ s->frame_size = -1;
+ return -1;
+ }
+ /* update codec info */
+ avctx->channels = s->nb_channels;
+ avctx->bit_rate = s->bit_rate;
+ avctx->sub_id = s->layer;
+ switch(s->layer) {
+ case 1:
+ avctx->frame_size = 384;
+ break;
+ case 2:
+ avctx->frame_size = 1152;
+ break;
+ case 3:
+ if (s->lsf)
+ avctx->frame_size = 576;
+ else
+ avctx->frame_size = 1152;
+ break;
}
- return buf_ptr - buf;
+
+ if(s->frame_size<=0 || s->frame_size > buf_size){
+ av_log(avctx, AV_LOG_ERROR, "incomplete frame\n");
+ return -1;
+ }else if(s->frame_size < buf_size){
+ av_log(avctx, AV_LOG_ERROR, "incorrect frame size\n");
+ }
+
+ out_size = mp_decode_frame(s, out_samples, buf, buf_size);
+ if(out_size>=0){
+ *data_size = out_size;
+ avctx->sample_rate = s->sample_rate;
+ //FIXME maybe move the other codec info stuff from above here too
+ }else
+ av_log(avctx, AV_LOG_DEBUG, "Error while decoding MPEG audio frame.\n"); //FIXME return -1 / but also return the number of bytes consumed
+ s->frame_size = 0;
+ return buf_size;
}
+static void flush(AVCodecContext *avctx){
+ MPADecodeContext *s = avctx->priv_data;
+ s->last_buf_size= 0;
+}
+#ifdef CONFIG_MP3ADU_DECODER
static int decode_frame_adu(AVCodecContext * avctx,
void *data, int *data_size,
uint8_t * buf, int buf_size)
@@ -2635,12 +2632,8 @@ static int decode_frame_adu(AVCodecContext * avctx,
if (len > MPA_MAX_CODED_FRAME_SIZE)
len = MPA_MAX_CODED_FRAME_SIZE;
- memcpy(s->inbuf, buf, len);
- s->inbuf_ptr = s->inbuf + len;
-
// Get header and restore sync word
- header = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) |
- (s->inbuf[2] << 8) | s->inbuf[3] | 0xffe00000;
+ header = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3] | 0xffe00000;
if (ff_mpa_check_header(header) < 0) { // Bad header, discard frame
*data_size = 0;
@@ -2657,18 +2650,17 @@ static int decode_frame_adu(AVCodecContext * avctx,
avctx->frame_size=s->frame_size = len;
if (avctx->parse_only) {
- /* simply return the frame data */
- *(uint8_t **)data = s->inbuf;
- out_size = s->inbuf_ptr - s->inbuf;
+ out_size = buf_size;
} else {
- out_size = mp_decode_frame(s, out_samples);
+ out_size = mp_decode_frame(s, out_samples, buf, buf_size);
}
*data_size = out_size;
return buf_size;
}
+#endif /* CONFIG_MP3ADU_DECODER */
-
+#ifdef CONFIG_MP3ON4_DECODER
/* Next 3 arrays are indexed by channel config number (passed via codecdata) */
static int mp3Frames[16] = {0,1,1,2,3,3,4,5,2}; /* number of mp3 decoder instances */
static int mp3Channels[16] = {0,1,2,3,4,5,6,8,4}; /* total output channels */
@@ -2724,8 +2716,6 @@ static int decode_init_mp3on4(AVCodecContext * avctx)
for (i = 1; i < s->frames; i++) {
s->mp3decctx[i] = av_mallocz(sizeof(MPADecodeContext));
s->mp3decctx[i]->compute_antialias = s->mp3decctx[0]->compute_antialias;
- s->mp3decctx[i]->inbuf = &s->mp3decctx[i]->inbuf1[0][BACKSTEP_SIZE];
- s->mp3decctx[i]->inbuf_ptr = s->mp3decctx[i]->inbuf;
s->mp3decctx[i]->adu_mode = 1;
}
@@ -2785,13 +2775,9 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
fsize = MPA_MAX_CODED_FRAME_SIZE;
m = s->mp3decctx[fr];
assert (m != NULL);
- /* copy original to new */
- m->inbuf_ptr = m->inbuf + fsize;
- memcpy(m->inbuf, start, fsize);
// Get header
- header = (m->inbuf[0] << 24) | (m->inbuf[1] << 16) |
- (m->inbuf[2] << 8) | m->inbuf[3] | 0xfff00000;
+ header = (start[0] << 24) | (start[1] << 16) | (start[2] << 8) | start[3] | 0xfff00000;
if (ff_mpa_check_header(header) < 0) { // Bad header, discard block
*data_size = 0;
@@ -2799,7 +2785,7 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
}
decode_header(m, header);
- mp_decode_frame(m, decoded_buf);
+ mp_decode_frame(m, decoded_buf, start, fsize);
n = MPA_FRAME_SIZE * m->nb_channels;
out_size += n * sizeof(OUT_INT);
@@ -2831,8 +2817,9 @@ static int decode_frame_mp3on4(AVCodecContext * avctx,
*data_size = out_size;
return buf_size;
}
+#endif /* CONFIG_MP3ON4_DECODER */
-
+#ifdef CONFIG_MP2_DECODER
AVCodec mp2_decoder =
{
"mp2",
@@ -2845,7 +2832,8 @@ AVCodec mp2_decoder =
decode_frame,
CODEC_CAP_PARSE_ONLY,
};
-
+#endif
+#ifdef CONFIG_MP3_DECODER
AVCodec mp3_decoder =
{
"mp3",
@@ -2857,8 +2845,10 @@ AVCodec mp3_decoder =
NULL,
decode_frame,
CODEC_CAP_PARSE_ONLY,
+ .flush= flush,
};
-
+#endif
+#ifdef CONFIG_MP3ADU_DECODER
AVCodec mp3adu_decoder =
{
"mp3adu",
@@ -2870,8 +2860,10 @@ AVCodec mp3adu_decoder =
NULL,
decode_frame_adu,
CODEC_CAP_PARSE_ONLY,
+ .flush= flush,
};
-
+#endif
+#ifdef CONFIG_MP3ON4_DECODER
AVCodec mp3on4_decoder =
{
"mp3on4",
@@ -2882,5 +2874,6 @@ AVCodec mp3on4_decoder =
NULL,
decode_close_mp3on4,
decode_frame_mp3on4,
- 0
+ .flush= flush,
};
+#endif
diff --git a/src/libffmpeg/libavcodec/mpegaudiodectab.h b/contrib/ffmpeg/libavcodec/mpegaudiodectab.h
index 572f7acb5..fdd1096fc 100644
--- a/src/libffmpeg/libavcodec/mpegaudiodectab.h
+++ b/contrib/ffmpeg/libavcodec/mpegaudiodectab.h
@@ -1,3 +1,24 @@
+/*
+ * MPEG Audio decoder
+ * copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file mpegaudiodectab.h
* mpeg audio layer decoder tables.
@@ -221,55 +242,55 @@ static const uint8_t lsf_nsf_table[6][3][4] = {
/* mpegaudio layer 3 huffman tables */
-const uint16_t mpa_huffcodes_1[4] = {
+static const uint16_t mpa_huffcodes_1[4] = {
0x0001, 0x0001, 0x0001, 0x0000,
};
-const uint8_t mpa_huffbits_1[4] = {
+static const uint8_t mpa_huffbits_1[4] = {
1, 3, 2, 3,
};
-const uint16_t mpa_huffcodes_2[9] = {
+static const uint16_t mpa_huffcodes_2[9] = {
0x0001, 0x0002, 0x0001, 0x0003, 0x0001, 0x0001, 0x0003, 0x0002,
0x0000,
};
-const uint8_t mpa_huffbits_2[9] = {
+static const uint8_t mpa_huffbits_2[9] = {
1, 3, 6, 3, 3, 5, 5, 5,
6,
};
-const uint16_t mpa_huffcodes_3[9] = {
+static const uint16_t mpa_huffcodes_3[9] = {
0x0003, 0x0002, 0x0001, 0x0001, 0x0001, 0x0001, 0x0003, 0x0002,
0x0000,
};
-const uint8_t mpa_huffbits_3[9] = {
+static const uint8_t mpa_huffbits_3[9] = {
2, 2, 6, 3, 2, 5, 5, 5,
6,
};
-const uint16_t mpa_huffcodes_5[16] = {
+static const uint16_t mpa_huffcodes_5[16] = {
0x0001, 0x0002, 0x0006, 0x0005, 0x0003, 0x0001, 0x0004, 0x0004,
0x0007, 0x0005, 0x0007, 0x0001, 0x0006, 0x0001, 0x0001, 0x0000,
};
-const uint8_t mpa_huffbits_5[16] = {
+static const uint8_t mpa_huffbits_5[16] = {
1, 3, 6, 7, 3, 3, 6, 7,
6, 6, 7, 8, 7, 6, 7, 8,
};
-const uint16_t mpa_huffcodes_6[16] = {
+static const uint16_t mpa_huffcodes_6[16] = {
0x0007, 0x0003, 0x0005, 0x0001, 0x0006, 0x0002, 0x0003, 0x0002,
0x0005, 0x0004, 0x0004, 0x0001, 0x0003, 0x0003, 0x0002, 0x0000,
};
-const uint8_t mpa_huffbits_6[16] = {
+static const uint8_t mpa_huffbits_6[16] = {
3, 3, 5, 7, 3, 2, 4, 5,
4, 4, 5, 6, 6, 5, 6, 7,
};
-const uint16_t mpa_huffcodes_7[36] = {
+static const uint16_t mpa_huffcodes_7[36] = {
0x0001, 0x0002, 0x000a, 0x0013, 0x0010, 0x000a, 0x0003, 0x0003,
0x0007, 0x000a, 0x0005, 0x0003, 0x000b, 0x0004, 0x000d, 0x0011,
0x0008, 0x0004, 0x000c, 0x000b, 0x0012, 0x000f, 0x000b, 0x0002,
@@ -277,7 +298,7 @@ const uint16_t mpa_huffcodes_7[36] = {
0x0005, 0x0003, 0x0002, 0x0000,
};
-const uint8_t mpa_huffbits_7[36] = {
+static const uint8_t mpa_huffbits_7[36] = {
1, 3, 6, 8, 8, 9, 3, 4,
6, 7, 7, 8, 6, 5, 7, 8,
8, 9, 7, 7, 8, 9, 9, 9,
@@ -285,7 +306,7 @@ const uint8_t mpa_huffbits_7[36] = {
9, 10, 10, 10,
};
-const uint16_t mpa_huffcodes_8[36] = {
+static const uint16_t mpa_huffcodes_8[36] = {
0x0003, 0x0004, 0x0006, 0x0012, 0x000c, 0x0005, 0x0005, 0x0001,
0x0002, 0x0010, 0x0009, 0x0003, 0x0007, 0x0003, 0x0005, 0x000e,
0x0007, 0x0003, 0x0013, 0x0011, 0x000f, 0x000d, 0x000a, 0x0004,
@@ -293,7 +314,7 @@ const uint16_t mpa_huffcodes_8[36] = {
0x0004, 0x0001, 0x0001, 0x0000,
};
-const uint8_t mpa_huffbits_8[36] = {
+static const uint8_t mpa_huffbits_8[36] = {
2, 3, 6, 8, 8, 9, 3, 2,
4, 8, 8, 8, 6, 4, 6, 8,
8, 9, 8, 8, 8, 9, 9, 10,
@@ -301,7 +322,7 @@ const uint8_t mpa_huffbits_8[36] = {
9, 9, 11, 11,
};
-const uint16_t mpa_huffcodes_9[36] = {
+static const uint16_t mpa_huffcodes_9[36] = {
0x0007, 0x0005, 0x0009, 0x000e, 0x000f, 0x0007, 0x0006, 0x0004,
0x0005, 0x0005, 0x0006, 0x0007, 0x0007, 0x0006, 0x0008, 0x0008,
0x0008, 0x0005, 0x000f, 0x0006, 0x0009, 0x000a, 0x0005, 0x0001,
@@ -309,7 +330,7 @@ const uint16_t mpa_huffcodes_9[36] = {
0x0006, 0x0002, 0x0006, 0x0000,
};
-const uint8_t mpa_huffbits_9[36] = {
+static const uint8_t mpa_huffbits_9[36] = {
3, 3, 5, 6, 8, 9, 3, 3,
4, 5, 6, 8, 4, 4, 5, 6,
7, 8, 6, 5, 6, 7, 7, 8,
@@ -317,7 +338,7 @@ const uint8_t mpa_huffbits_9[36] = {
8, 8, 9, 9,
};
-const uint16_t mpa_huffcodes_10[64] = {
+static const uint16_t mpa_huffcodes_10[64] = {
0x0001, 0x0002, 0x000a, 0x0017, 0x0023, 0x001e, 0x000c, 0x0011,
0x0003, 0x0003, 0x0008, 0x000c, 0x0012, 0x0015, 0x000c, 0x0007,
0x000b, 0x0009, 0x000f, 0x0015, 0x0020, 0x0028, 0x0013, 0x0006,
@@ -328,7 +349,7 @@ const uint16_t mpa_huffcodes_10[64] = {
0x0009, 0x0008, 0x0007, 0x0008, 0x0004, 0x0004, 0x0002, 0x0000,
};
-const uint8_t mpa_huffbits_10[64] = {
+static const uint8_t mpa_huffbits_10[64] = {
1, 3, 6, 8, 9, 9, 9, 10,
3, 4, 6, 7, 8, 9, 8, 8,
6, 6, 7, 8, 9, 10, 9, 9,
@@ -339,7 +360,7 @@ const uint8_t mpa_huffbits_10[64] = {
9, 8, 9, 10, 10, 11, 11, 11,
};
-const uint16_t mpa_huffcodes_11[64] = {
+static const uint16_t mpa_huffcodes_11[64] = {
0x0003, 0x0004, 0x000a, 0x0018, 0x0022, 0x0021, 0x0015, 0x000f,
0x0005, 0x0003, 0x0004, 0x000a, 0x0020, 0x0011, 0x000b, 0x000a,
0x000b, 0x0007, 0x000d, 0x0012, 0x001e, 0x001f, 0x0014, 0x0005,
@@ -350,7 +371,7 @@ const uint16_t mpa_huffcodes_11[64] = {
0x000b, 0x0004, 0x0006, 0x0006, 0x0006, 0x0003, 0x0002, 0x0000,
};
-const uint8_t mpa_huffbits_11[64] = {
+static const uint8_t mpa_huffbits_11[64] = {
2, 3, 5, 7, 8, 9, 8, 9,
3, 3, 4, 6, 8, 8, 7, 8,
5, 5, 6, 7, 8, 9, 8, 8,
@@ -361,7 +382,7 @@ const uint8_t mpa_huffbits_11[64] = {
8, 7, 8, 9, 10, 10, 10, 10,
};
-const uint16_t mpa_huffcodes_12[64] = {
+static const uint16_t mpa_huffcodes_12[64] = {
0x0009, 0x0006, 0x0010, 0x0021, 0x0029, 0x0027, 0x0026, 0x001a,
0x0007, 0x0005, 0x0006, 0x0009, 0x0017, 0x0010, 0x001a, 0x000b,
0x0011, 0x0007, 0x000b, 0x000e, 0x0015, 0x001e, 0x000a, 0x0007,
@@ -372,7 +393,7 @@ const uint16_t mpa_huffcodes_12[64] = {
0x001b, 0x000c, 0x0008, 0x000c, 0x0006, 0x0003, 0x0001, 0x0000,
};
-const uint8_t mpa_huffbits_12[64] = {
+static const uint8_t mpa_huffbits_12[64] = {
4, 3, 5, 7, 8, 9, 9, 9,
3, 3, 4, 5, 7, 7, 8, 8,
5, 4, 5, 6, 7, 8, 7, 8,
@@ -383,7 +404,7 @@ const uint8_t mpa_huffbits_12[64] = {
9, 8, 8, 9, 9, 9, 9, 10,
};
-const uint16_t mpa_huffcodes_13[256] = {
+static const uint16_t mpa_huffcodes_13[256] = {
0x0001, 0x0005, 0x000e, 0x0015, 0x0022, 0x0033, 0x002e, 0x0047,
0x002a, 0x0034, 0x0044, 0x0034, 0x0043, 0x002c, 0x002b, 0x0013,
0x0003, 0x0004, 0x000c, 0x0013, 0x001f, 0x001a, 0x002c, 0x0021,
@@ -418,7 +439,7 @@ const uint16_t mpa_huffcodes_13[256] = {
0x0011, 0x000c, 0x0010, 0x0008, 0x0001, 0x0001, 0x0000, 0x0001,
};
-const uint8_t mpa_huffbits_13[256] = {
+static const uint8_t mpa_huffbits_13[256] = {
1, 4, 6, 7, 8, 9, 9, 10,
9, 10, 11, 11, 12, 12, 13, 13,
3, 4, 6, 7, 8, 8, 9, 9,
@@ -453,7 +474,7 @@ const uint8_t mpa_huffbits_13[256] = {
15, 15, 16, 16, 19, 18, 19, 16,
};
-const uint16_t mpa_huffcodes_15[256] = {
+static const uint16_t mpa_huffcodes_15[256] = {
0x0007, 0x000c, 0x0012, 0x0035, 0x002f, 0x004c, 0x007c, 0x006c,
0x0059, 0x007b, 0x006c, 0x0077, 0x006b, 0x0051, 0x007a, 0x003f,
0x000d, 0x0005, 0x0010, 0x001b, 0x002e, 0x0024, 0x003d, 0x0033,
@@ -488,7 +509,7 @@ const uint16_t mpa_huffcodes_15[256] = {
0x0015, 0x0010, 0x000a, 0x0006, 0x0008, 0x0006, 0x0002, 0x0000,
};
-const uint8_t mpa_huffbits_15[256] = {
+static const uint8_t mpa_huffbits_15[256] = {
3, 4, 5, 7, 7, 8, 9, 9,
9, 10, 10, 11, 11, 11, 12, 13,
4, 3, 5, 6, 7, 7, 8, 8,
@@ -523,7 +544,7 @@ const uint8_t mpa_huffbits_15[256] = {
12, 12, 12, 12, 13, 13, 13, 13,
};
-const uint16_t mpa_huffcodes_16[256] = {
+static const uint16_t mpa_huffcodes_16[256] = {
0x0001, 0x0005, 0x000e, 0x002c, 0x004a, 0x003f, 0x006e, 0x005d,
0x00ac, 0x0095, 0x008a, 0x00f2, 0x00e1, 0x00c3, 0x0178, 0x0011,
0x0003, 0x0004, 0x000c, 0x0014, 0x0023, 0x003e, 0x0035, 0x002f,
@@ -558,7 +579,7 @@ const uint16_t mpa_huffcodes_16[256] = {
0x000d, 0x000c, 0x000a, 0x0007, 0x0005, 0x0003, 0x0001, 0x0003,
};
-const uint8_t mpa_huffbits_16[256] = {
+static const uint8_t mpa_huffbits_16[256] = {
1, 4, 6, 8, 9, 9, 10, 10,
11, 11, 11, 12, 12, 12, 13, 9,
3, 4, 6, 7, 8, 9, 9, 9,
@@ -593,7 +614,7 @@ const uint8_t mpa_huffbits_16[256] = {
11, 11, 11, 11, 11, 11, 11, 8,
};
-const uint16_t mpa_huffcodes_24[256] = {
+static const uint16_t mpa_huffcodes_24[256] = {
0x000f, 0x000d, 0x002e, 0x0050, 0x0092, 0x0106, 0x00f8, 0x01b2,
0x01aa, 0x029d, 0x028d, 0x0289, 0x026d, 0x0205, 0x0408, 0x0058,
0x000e, 0x000c, 0x0015, 0x0026, 0x0047, 0x0082, 0x007a, 0x00d8,
@@ -628,7 +649,7 @@ const uint16_t mpa_huffcodes_24[256] = {
0x0007, 0x0006, 0x0004, 0x0007, 0x0005, 0x0003, 0x0001, 0x0003,
};
-const uint8_t mpa_huffbits_24[256] = {
+static const uint8_t mpa_huffbits_24[256] = {
4, 4, 6, 7, 8, 9, 9, 10,
10, 11, 11, 11, 11, 11, 12, 9,
4, 4, 5, 6, 7, 8, 8, 9,
@@ -663,7 +684,7 @@ const uint8_t mpa_huffbits_24[256] = {
7, 7, 7, 8, 8, 8, 8, 4,
};
-const HuffTable mpa_huff_tables[16] = {
+static const HuffTable mpa_huff_tables[16] = {
{ 1, NULL, NULL },
{ 2, mpa_huffbits_1, mpa_huffcodes_1 },
{ 3, mpa_huffbits_2, mpa_huffcodes_2 },
@@ -682,7 +703,7 @@ const HuffTable mpa_huff_tables[16] = {
{ 16, mpa_huffbits_24, mpa_huffcodes_24 },
};
-const uint8_t mpa_huff_data[32][2] = {
+static const uint8_t mpa_huff_data[32][2] = {
{ 0, 0 },
{ 1, 0 },
{ 2, 0 },
@@ -730,7 +751,7 @@ static const uint8_t mpa_quad_bits[2][16] = {
};
/* band size tables */
-const uint8_t band_size_long[9][22] = {
+static const uint8_t band_size_long[9][22] = {
{ 4, 4, 4, 4, 4, 4, 6, 6, 8, 8, 10,
12, 16, 20, 24, 28, 34, 42, 50, 54, 76, 158, }, /* 44100 */
{ 4, 4, 4, 4, 4, 4, 6, 6, 6, 8, 10,
@@ -751,7 +772,7 @@ const uint8_t band_size_long[9][22] = {
40, 48, 56, 64, 76, 90, 2, 2, 2, 2, 2, }, /* 8000 */
};
-const uint8_t band_size_short[9][13] = {
+static const uint8_t band_size_short[9][13] = {
{ 4, 4, 4, 4, 6, 8, 10, 12, 14, 18, 22, 30, 56, }, /* 44100 */
{ 4, 4, 4, 4, 6, 6, 10, 12, 14, 16, 20, 26, 66, }, /* 48000 */
{ 4, 4, 4, 4, 6, 8, 12, 16, 20, 26, 34, 42, 12, }, /* 32000 */
@@ -763,12 +784,12 @@ const uint8_t band_size_short[9][13] = {
{ 8, 8, 8, 12, 16, 20, 24, 28, 36, 2, 2, 2, 26, }, /* 8000 */
};
-const uint8_t mpa_pretab[2][22] = {
+static const uint8_t mpa_pretab[2][22] = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3, 2, 0 },
};
/* table for alias reduction (XXX: store it as integer !) */
-const float ci_table[8] = {
+static const float ci_table[8] = {
-0.6, -0.535, -0.33, -0.185, -0.095, -0.041, -0.0142, -0.0037,
};
diff --git a/src/libffmpeg/libavcodec/mpegaudiotab.h b/contrib/ffmpeg/libavcodec/mpegaudiotab.h
index 2e7d3372f..8fb37ddff 100644
--- a/src/libffmpeg/libavcodec/mpegaudiotab.h
+++ b/contrib/ffmpeg/libavcodec/mpegaudiotab.h
@@ -4,8 +4,21 @@
*
* Copyright (c) 2000, 2001 Fabrice Bellard.
*
- * The licence of this code is contained in file LICENCE found in the
- * same archive
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
diff --git a/src/libffmpeg/libavcodec/mpegvideo.c b/contrib/ffmpeg/libavcodec/mpegvideo.c
index f1c1b34bb..f0a04a402 100644
--- a/src/libffmpeg/libavcodec/mpegvideo.c
+++ b/contrib/ffmpeg/libavcodec/mpegvideo.c
@@ -3,18 +3,20 @@
* Copyright (c) 2000,2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
@@ -32,22 +34,14 @@
#include <limits.h>
#ifdef USE_FASTMEMCPY
-#include "fastmemcpy.h"
+#include "libvo/fastmemcpy.h"
#endif
//#undef NDEBUG
//#include <assert.h>
-
-/* if xine's MPEG encoder is enabled, enable the encoding features in
- * this particular module */
-#if defined(XINE_MPEG_ENCODER) && !defined(CONFIG_ENCODERS)
-#define CONFIG_ENCODERS
-#endif
-
-
#ifdef CONFIG_ENCODERS
-static void encode_picture(MpegEncContext *s, int picture_number);
+static int encode_picture(MpegEncContext *s, int picture_number);
#endif //CONFIG_ENCODERS
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale);
@@ -225,7 +219,7 @@ void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_s
}
#ifdef CONFIG_ENCODERS
-void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
+void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix){
int i;
if(matrix){
@@ -334,6 +328,7 @@ static void copy_picture(Picture *dst, Picture *src){
dst->type= FF_BUFFER_TYPE_COPY;
}
+#ifdef CONFIG_ENCODERS
static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){
int i;
@@ -372,6 +367,7 @@ static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *sr
}
}
}
+#endif
/**
* allocates a Picture
@@ -573,6 +569,7 @@ void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
//STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
}
+#ifdef CONFIG_ENCODERS
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){
#define COPY(a) dst->a= src->a
COPY(pict_type);
@@ -589,6 +586,7 @@ static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContex
COPY(partitioned_frame); //FIXME don't set in encode_header
#undef COPY
}
+#endif
/**
* sets the given MpegEncContext to common defaults (same for encoding and decoding).
@@ -700,12 +698,12 @@ int MPV_common_init(MpegEncContext *s)
yc_size = y_size + 2 * c_size;
/* convert fourcc to upper case */
- s->avctx->codec_tag= toupper( s->avctx->codec_tag &0xFF)
+ s->codec_tag= toupper( s->avctx->codec_tag &0xFF)
+ (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
+ (toupper((s->avctx->codec_tag>>16)&0xFF)<<16)
+ (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
- s->avctx->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
+ s->stream_codec_tag= toupper( s->avctx->stream_codec_tag &0xFF)
+ (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
+ (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16)
+ (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
@@ -944,7 +942,8 @@ int MPV_encode_init(AVCodecContext *avctx)
break;
case CODEC_ID_LJPEG:
case CODEC_ID_MJPEG:
- if(avctx->pix_fmt != PIX_FMT_YUVJ420P && (avctx->pix_fmt != PIX_FMT_YUV420P || avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL)){
+ if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUVJ422P &&
+ ((avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUV422P) || avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL)){
av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
return -1;
}
@@ -1105,6 +1104,17 @@ int MPV_encode_init(AVCodecContext *avctx)
return -1;
}
+ if(s->flags & CODEC_FLAG_LOW_DELAY){
+ if (s->codec_id != CODEC_ID_MPEG2VIDEO && s->codec_id != CODEC_ID_MPEG1VIDEO){
+ av_log(avctx, AV_LOG_ERROR, "low delay forcing is only available for mpeg1/2\n");
+ return -1;
+ }
+ if (s->max_b_frames != 0){
+ av_log(avctx, AV_LOG_ERROR, "b frames cannot be used with low delay\n");
+ return -1;
+ }
+ }
+
if(s->avctx->thread_count > 1 && s->codec_id != CODEC_ID_MPEG4
&& s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO
&& (s->codec_id != CODEC_ID_H263P || !(s->flags & CODEC_FLAG_H263P_SLICE_STRUCT))){
@@ -1170,14 +1180,12 @@ int MPV_encode_init(AVCodecContext *avctx)
switch(avctx->codec->id) {
case CODEC_ID_MPEG1VIDEO:
s->out_format = FMT_MPEG1;
- s->low_delay= 0; //s->max_b_frames ? 0 : 1;
+ s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY);
avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
break;
-/* xine: this is never used in either decode or MPEG-1 encode mode */
-#if 0
case CODEC_ID_MPEG2VIDEO:
s->out_format = FMT_MPEG1;
- s->low_delay= 0; //s->max_b_frames ? 0 : 1;
+ s->low_delay= !!(s->flags & CODEC_FLAG_LOW_DELAY);
avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
s->rtp_mode= 1;
break;
@@ -1188,22 +1196,28 @@ int MPV_encode_init(AVCodecContext *avctx)
s->intra_only = 1; /* force intra only for jpeg */
s->mjpeg_write_tables = avctx->codec->id != CODEC_ID_JPEGLS;
s->mjpeg_data_only_frames = 0; /* write all the needed headers */
- s->mjpeg_vsample[0] = 1<<chroma_v_shift;
- s->mjpeg_vsample[1] = 1;
- s->mjpeg_vsample[2] = 1;
- s->mjpeg_hsample[0] = 1<<chroma_h_shift;
- s->mjpeg_hsample[1] = 1;
- s->mjpeg_hsample[2] = 1;
+ s->mjpeg_vsample[0] = 2;
+ s->mjpeg_vsample[1] = 2>>chroma_v_shift;
+ s->mjpeg_vsample[2] = 2>>chroma_v_shift;
+ s->mjpeg_hsample[0] = 2;
+ s->mjpeg_hsample[1] = 2>>chroma_h_shift;
+ s->mjpeg_hsample[2] = 2>>chroma_h_shift;
if (mjpeg_init(s) < 0)
return -1;
avctx->delay=0;
s->low_delay=1;
break;
+#ifdef CONFIG_H261_ENCODER
case CODEC_ID_H261:
+ if (ff_h261_get_picture_format(s->width, s->height) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "The specified picture size of %dx%d is not valid for the H.261 codec.\nValid sizes are 176x144, 352x288\n", s->width, s->height);
+ return -1;
+ }
s->out_format = FMT_H261;
avctx->delay=0;
s->low_delay=1;
break;
+#endif
case CODEC_ID_H263:
if (h263_get_picture_format(s->width, s->height) == 7) {
av_log(avctx, AV_LOG_INFO, "The specified picture size of %dx%d is not valid for the H.263 codec.\nValid sizes are 128x96, 176x144, 352x288, 704x576, and 1408x1152. Try H.263+.\n", s->width, s->height);
@@ -1310,7 +1324,6 @@ int MPV_encode_init(AVCodecContext *avctx)
avctx->delay=0;
s->low_delay=1;
break;
-#endif /* #if 0 */
default:
return -1;
}
@@ -1326,14 +1339,12 @@ int MPV_encode_init(AVCodecContext *avctx)
if(s->modified_quant)
s->chroma_qscale_table= ff_h263_chroma_qscale_table;
s->progressive_frame=
- s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME));
+ s->progressive_sequence= !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN));
s->quant_precision=5;
ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
#ifdef CONFIG_H261_ENCODER
if (s->out_format == FMT_H261)
ff_h261_encode_init(s);
@@ -1342,8 +1353,6 @@ int MPV_encode_init(AVCodecContext *avctx)
h263_encode_init(s);
if(s->msmpeg4_version)
ff_msmpeg4_encode_init(s);
-#endif /* #if 0 */
-/* xine: we DO want this for MPEG-1 encoding */
if (s->out_format == FMT_MPEG1)
ff_mpeg1_encode_init(s);
@@ -1388,12 +1397,9 @@ int MPV_encode_end(AVCodecContext *avctx)
ff_rate_control_uninit(s);
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
MPV_common_end(s);
if (s->out_format == FMT_MJPEG)
mjpeg_close(s);
-#endif /* #if 0 */
av_freep(&avctx->extradata);
@@ -1705,7 +1711,7 @@ void MPV_frame_end(MpegEncContext *s)
* @param color color of the arrow
*/
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
- int t, x, y, fr, f;
+ int x, y, fr, f;
sx= clip(sx, 0, w-1);
sy= clip(sy, 0, h-1);
@@ -1714,10 +1720,10 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h
buf[sy*stride + sx]+= color;
- if(ABS(ex - sx) > ABS(ey - sy)){
+ if(FFABS(ex - sx) > FFABS(ey - sy)){
if(sx > ex){
- t=sx; sx=ex; ex=t;
- t=sy; sy=ey; ey=t;
+ FFSWAP(int, sx, ex);
+ FFSWAP(int, sy, ey);
}
buf+= sx + sy*stride;
ex-= sx;
@@ -1730,8 +1736,8 @@ static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h
}
}else{
if(sy > ey){
- t=sx; sx=ex; ex=t;
- t=sy; sy=ey; ey=t;
+ FFSWAP(int, sx, ex);
+ FFSWAP(int, sy, ey);
}
buf+= sx + sy*stride;
ey-= sy;
@@ -2048,7 +2054,7 @@ static int get_sae(uint8_t *src, int ref, int stride){
for(y=0; y<16; y++){
for(x=0; x<16; x++){
- acc+= ABS(src[x+y*stride] - ref);
+ acc+= FFABS(src[x+y*stride] - ref);
}
}
@@ -2152,7 +2158,10 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
int w= s->width >>h_shift;
int h= s->height>>v_shift;
uint8_t *src= pic_arg->data[i];
- uint8_t *dst= pic->data[i] + INPLACE_OFFSET;
+ uint8_t *dst= pic->data[i];
+
+ if(!s->avctx->rc_buffer_size)
+ dst +=INPLACE_OFFSET;
if(src_stride==dst_stride)
memcpy(dst, src, src_stride*h);
@@ -2194,9 +2203,9 @@ static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
switch(s->avctx->frame_skip_exp){
case 0: score= FFMAX(score, v); break;
- case 1: score+= ABS(v);break;
+ case 1: score+= FFABS(v);break;
case 2: score+= v*v;break;
- case 3: score64+= ABS(v*v*(int64_t)v);break;
+ case 3: score64+= FFABS(v*v*(int64_t)v);break;
case 4: score64+= v*v*(int64_t)(v*v);break;
}
}
@@ -2227,7 +2236,7 @@ static int estimate_best_b_count(MpegEncContext *s){
// emms_c();
p_lambda= s->last_lambda_for[P_TYPE]; //s->next_picture_ptr->quality;
- b_lambda= s->last_lambda_for[B_TYPE]; //p_lambda *ABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
+ b_lambda= s->last_lambda_for[B_TYPE]; //p_lambda *FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
if(!b_lambda) b_lambda= p_lambda; //FIXME we should do this somewhere else
lambda2= (b_lambda*b_lambda + (1<<FF_LAMBDA_SHIFT)/2 ) >> FF_LAMBDA_SHIFT;
@@ -2341,7 +2350,7 @@ static void select_input_picture(MpegEncContext *s){
if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
if(s->picture_in_gop_number < s->gop_size && skip_check(s, s->input_picture[0], s->next_picture_ptr)){
//FIXME check that te gop check above is +-1 correct
-//av_log(NULL, AV_LOG_DEBUG, "skip %p %Ld\n", s->input_picture[0]->data[0], s->input_picture[0]->pts);
+//av_log(NULL, AV_LOG_DEBUG, "skip %p %"PRId64"\n", s->input_picture[0]->data[0], s->input_picture[0]->pts);
if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
for(i=0; i<4; i++)
@@ -2451,21 +2460,22 @@ no_output_pic:
copy_picture(&s->new_picture, s->reordered_input_picture[0]);
- if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
+ if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED || s->avctx->rc_buffer_size){
// input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable
int i= ff_find_unused_picture(s, 0);
Picture *pic= &s->picture[i];
+ pic->reference = s->reordered_input_picture[0]->reference;
+ alloc_picture(s, pic, 0);
+
/* mark us unused / free shared pic */
+ if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_INTERNAL)
+ s->avctx->release_buffer(s->avctx, (AVFrame*)s->reordered_input_picture[0]);
for(i=0; i<4; i++)
s->reordered_input_picture[0]->data[i]= NULL;
s->reordered_input_picture[0]->type= 0;
- pic->reference = s->reordered_input_picture[0]->reference;
-
- alloc_picture(s, pic, 0);
-
copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]);
s->current_picture_ptr= pic;
@@ -2519,8 +2529,9 @@ int MPV_encode_picture(AVCodecContext *avctx,
//emms_c();
//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
MPV_frame_start(s, avctx);
-
- encode_picture(s, s->picture_number);
+vbv_retry:
+ if (encode_picture(s, s->picture_number) < 0)
+ return -1;
avctx->real_pict_num = s->picture_number;
avctx->header_bits = s->header_bits;
@@ -2534,11 +2545,39 @@ int MPV_encode_picture(AVCodecContext *avctx,
MPV_frame_end(s);
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
if (s->out_format == FMT_MJPEG)
mjpeg_picture_trailer(s);
-#endif /* #if 0 */
+
+ if(avctx->rc_buffer_size){
+ RateControlContext *rcc= &s->rc_context;
+ int max_size= rcc->buffer_index/3;
+
+ if(put_bits_count(&s->pb) > max_size && s->lambda < s->avctx->lmax){
+ s->next_lambda= FFMAX(s->lambda+1, s->lambda*(s->qscale+1) / s->qscale);
+ if(s->adaptive_quant){
+ int i;
+ for(i=0; i<s->mb_height*s->mb_stride; i++)
+ s->lambda_table[i]= FFMAX(s->lambda_table[i]+1, s->lambda_table[i]*(s->qscale+1) / s->qscale);
+ }
+ s->mb_skipped = 0; //done in MPV_frame_start()
+ if(s->pict_type==P_TYPE){ //done in encode_picture() so we must undo it
+ if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
+ s->no_rounding ^= 1;
+ }
+ if(s->pict_type!=B_TYPE){
+ s->time_base= s->last_time_base;
+ s->last_non_b_time= s->time - s->pp_time;
+ }
+// av_log(NULL, AV_LOG_ERROR, "R:%d ", s->next_lambda);
+ for(i=0; i<avctx->thread_count; i++){
+ PutBitContext *pb= &s->thread_context[i]->pb;
+ init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
+ }
+ goto vbv_retry;
+ }
+
+ assert(s->avctx->rc_max_rate);
+ }
if(s->flags&CODEC_FLAG_PASS1)
ff_write_pass1_stats(s);
@@ -3895,7 +3934,7 @@ static always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM bloc
else if (s->h263_pred || s->h263_aic)
s->mbintra_table[mb_xy]=1;
- if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
+ if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
uint8_t *dest_y, *dest_cb, *dest_cr;
int dct_linesize, dct_offset;
op_pixels_func (*op_pix)[4];
@@ -3960,17 +3999,16 @@ static always_inline void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM bloc
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
}
}else{
+ op_qpix= s->me.qpel_put;
if ((!s->no_rounding) || s->pict_type==B_TYPE){
op_pix = s->dsp.put_pixels_tab;
- op_qpix= s->dsp.put_qpel_pixels_tab;
}else{
op_pix = s->dsp.put_no_rnd_pixels_tab;
- op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
}
if (s->mv_dir & MV_DIR_FORWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
op_pix = s->dsp.avg_pixels_tab;
- op_qpix= s->dsp.avg_qpel_pixels_tab;
+ op_qpix= s->me.qpel_avg;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
@@ -4134,7 +4172,7 @@ static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int th
for(i=0; i<=last_index; i++){
const int j = s->intra_scantable.permutated[i];
- const int level = ABS(block[j]);
+ const int level = FFABS(block[j]);
if(level==1){
if(skip_dc && i==0) continue;
score+= tab[run];
@@ -4537,8 +4575,6 @@ static always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, in
case CODEC_ID_MPEG1VIDEO:
case CODEC_ID_MPEG2VIDEO:
mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
case CODEC_ID_MPEG4:
mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
case CODEC_ID_MSMPEG4V2:
@@ -4559,7 +4595,6 @@ static always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, in
h263_encode_mb(s, s->block, motion_x, motion_y); break;
case CODEC_ID_MJPEG:
mjpeg_encode_mb(s, s->block); break;
-#endif /* #if 0 */
default:
assert(0);
}
@@ -4738,7 +4773,7 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
}
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
int acc=0;
int x,y;
@@ -4781,8 +4816,6 @@ static int sse_mb(MpegEncContext *s){
+sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
}
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
MpegEncContext *s= arg;
@@ -4826,7 +4859,6 @@ static int estimate_motion_thread(AVCodecContext *c, void *arg){
}
return 0;
}
-#endif /* #if 0 */
static int mb_var_thread(AVCodecContext *c, void *arg){
MpegEncContext *s= arg;
@@ -4851,8 +4883,6 @@ static int mb_var_thread(AVCodecContext *c, void *arg){
}
static void write_slice_end(MpegEncContext *s){
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
if(s->codec_id==CODEC_ID_MPEG4){
if(s->partitioned_frame){
ff_mpeg4_merge_partitions(s);
@@ -4862,7 +4892,6 @@ static void write_slice_end(MpegEncContext *s){
}else if(s->out_format == FMT_MJPEG){
ff_mjpeg_stuffing(&s->pb);
}
-#endif /* #if 0 */
align_put_bits(&s->pb);
flush_put_bits(&s->pb);
@@ -4916,13 +4945,10 @@ static int encode_thread(AVCodecContext *c, void *arg){
case CODEC_ID_FLV1:
s->gob_index = ff_h263_get_gob_height(s);
break;
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
case CODEC_ID_MPEG4:
if(s->partitioned_frame)
ff_mpeg4_init_partitions(s);
break;
-#endif /* #if 0 */
}
s->resync_mb_x=0;
@@ -4995,12 +5021,9 @@ static int encode_thread(AVCodecContext *c, void *arg){
if(s->start_mb_y != mb_y || mb_x!=0){
write_slice_end(s);
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
ff_mpeg4_init_partitions(s);
}
-#endif /* #if 0 */
}
assert((put_bits_count(&s->pb)&7) == 0);
@@ -5024,25 +5047,19 @@ static int encode_thread(AVCodecContext *c, void *arg){
}
switch(s->codec_id){
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
case CODEC_ID_MPEG4:
ff_mpeg4_encode_video_packet_header(s);
ff_mpeg4_clean_buffers(s);
break;
-#endif /* #if 0 */
case CODEC_ID_MPEG1VIDEO:
case CODEC_ID_MPEG2VIDEO:
ff_mpeg1_encode_slice_header(s);
ff_mpeg1_clean_buffers(s);
break;
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
case CODEC_ID_H263:
case CODEC_ID_H263P:
h263_encode_gob_header(s, mb_y);
break;
-#endif /* #if 0 */
}
if(s->flags&CODEC_FLAG_PASS1){
@@ -5155,10 +5172,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
s->mb_intra= 0;
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
ff_mpeg4_set_direct_mv(s, mx, my);
-#endif /* #if 0 */
encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
&dmin, &next_block, mx, my);
}
@@ -5219,9 +5233,10 @@ static int encode_thread(AVCodecContext *c, void *arg){
if(s->flags & CODEC_FLAG_QP_RD){
if(best_s.mv_type==MV_TYPE_16X16 && !(best_s.mv_dir&MV_DIRECT)){
const int last_qp= backup_s.qscale;
- int dquant, dir, qp, dc[6];
+ int qpi, qp, dc[6];
DCTELEM ac[6][16];
const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
+ static const int dquant_tab[4]={-1,1,-2,2};
assert(backup_s.dquant == 0);
@@ -5234,12 +5249,12 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->mv[1][0][0] = best_s.mv[1][0][0];
s->mv[1][0][1] = best_s.mv[1][0][1];
- dir= s->pict_type == B_TYPE ? 2 : 1;
- if(last_qp + dir > s->avctx->qmax) dir= -dir;
- for(dquant= dir; dquant<=2 && dquant>=-2; dquant += dir){
+ qpi = s->pict_type == B_TYPE ? 2 : 0;
+ for(; qpi<4; qpi++){
+ int dquant= dquant_tab[qpi];
qp= last_qp + dquant;
if(qp < s->avctx->qmin || qp > s->avctx->qmax)
- break;
+ continue;
backup_s.dquant= dquant;
if(s->mb_intra && s->dc_val[0]){
for(i=0; i<6; i++){
@@ -5257,11 +5272,6 @@ static int encode_thread(AVCodecContext *c, void *arg){
memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
}
}
- if(dir > 0 && dquant==dir){
- dquant= 0;
- dir= -dir;
- }else
- break;
}
}
qp= best_s.qscale;
@@ -5344,10 +5354,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->mb_intra= 0;
motion_x=s->b_direct_mv_table[xy][0];
motion_y=s->b_direct_mv_table[xy][1];
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
-#endif /* #if 0 */
break;
case CANDIDATE_MB_TYPE_BIDIR:
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
@@ -5455,11 +5462,8 @@ static int encode_thread(AVCodecContext *c, void *arg){
}
//not beautiful here but we must write it before flushing so it has to be here
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
msmpeg4_encode_ext_header(s);
-#endif /* #if 0 */
write_slice_end(s);
@@ -5514,14 +5518,19 @@ static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
flush_put_bits(&dst->pb);
}
-static void estimate_qp(MpegEncContext *s, int dry_run){
- if (!s->fixed_qscale)
+static int estimate_qp(MpegEncContext *s, int dry_run){
+ if (s->next_lambda){
+ s->current_picture_ptr->quality=
+ s->current_picture.quality = s->next_lambda;
+ if(!dry_run) s->next_lambda= 0;
+ } else if (!s->fixed_qscale) {
s->current_picture_ptr->quality=
s->current_picture.quality = ff_rate_estimate_qscale(s, dry_run);
+ if (s->current_picture.quality < 0)
+ return -1;
+ }
if(s->adaptive_quant){
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
switch(s->codec_id){
case CODEC_ID_MPEG4:
ff_clean_mpeg4_qscales(s);
@@ -5532,7 +5541,6 @@ static void estimate_qp(MpegEncContext *s, int dry_run){
ff_clean_h263_qscales(s);
break;
}
-#endif /* #if 0 */
s->lambda= s->lambda_table[0];
//FIXME broken
@@ -5540,9 +5548,10 @@ static void estimate_qp(MpegEncContext *s, int dry_run){
s->lambda= s->current_picture.quality;
//printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
update_qscale(s);
+ return 0;
}
-static void encode_picture(MpegEncContext *s, int picture_number)
+static int encode_picture(MpegEncContext *s, int picture_number)
{
int i;
int bits;
@@ -5553,13 +5562,10 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->me.mb_var_sum_temp =
s->me.mc_mb_var_sum_temp = 0;
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
/* we need to initialize some time vars before we can encode b-frames */
// RAL: Condition added for MPEG1VIDEO
if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
ff_set_mpeg4_time(s, s->picture_number); //FIXME rename and use has_b_frames or similar
-#endif /* #if 0 */
s->me.scene_change_score=0;
@@ -5574,7 +5580,8 @@ static void encode_picture(MpegEncContext *s, int picture_number)
}
if(s->flags & CODEC_FLAG_PASS2){
- estimate_qp(s, 1);
+ if (estimate_qp(s,1) < 0)
+ return -1;
ff_get_2pass_fcode(s);
}else if(!(s->flags & CODEC_FLAG_QSCALE)){
if(s->pict_type==B_TYPE)
@@ -5589,14 +5596,12 @@ static void encode_picture(MpegEncContext *s, int picture_number)
ff_update_duplicate_context(s->thread_context[i], s);
}
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
ff_init_me(s);
/* Estimate motion for every MB */
if(s->pict_type != I_TYPE){
s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
- s->lambda2= (s->lambda2* s->avctx->me_penalty_compensation + 128)>>8;
+ s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
if(s->pict_type != B_TYPE && s->avctx->me_threshold==0){
if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
@@ -5605,8 +5610,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
}else /* if(s->pict_type == I_TYPE) */{
-#endif /* #if 0 */
- {
/* I-Frame */
for(i=0; i<s->mb_stride*s->mb_height; i++)
s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
@@ -5630,8 +5633,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
}
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
if(!s->umvplus){
if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
@@ -5685,9 +5686,9 @@ static void encode_picture(MpegEncContext *s, int picture_number)
}
}
}
-#endif /* #if 0 */
- estimate_qp(s, 0);
+ if (estimate_qp(s, 0) < 0)
+ return -1;
if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
s->qscale= 3; //reduce clipping problems
@@ -5716,8 +5717,6 @@ static void encode_picture(MpegEncContext *s, int picture_number)
s->last_bits= put_bits_count(&s->pb);
switch(s->out_format) {
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
case FMT_MJPEG:
mjpeg_picture_header(s);
break;
@@ -5746,15 +5745,11 @@ static void encode_picture(MpegEncContext *s, int picture_number)
else
h263_encode_picture_header(s, picture_number);
break;
-#endif /* #if 0 */
case FMT_MPEG1:
mpeg1_encode_picture_header(s, picture_number);
break;
-/* xine: do not need this for decode or MPEG-1 encoding modes */
-#if 0
case FMT_H264:
break;
-#endif /* #if 0 */
default:
assert(0);
}
@@ -5769,10 +5764,9 @@ static void encode_picture(MpegEncContext *s, int picture_number)
merge_context_after_encode(s, s->thread_context[i]);
}
emms_c();
+ return 0;
}
-#endif //CONFIG_ENCODERS
-
static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
const int intra= s->mb_intra;
int i;
@@ -5797,8 +5791,6 @@ static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
}
}
-#ifdef CONFIG_ENCODERS
-
static int dct_quantize_trellis_c(MpegEncContext *s,
DCTELEM *block, int n,
int qscale, int *overflow){
@@ -5917,13 +5909,13 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
for(i=start_i; i<=last_non_zero; i++){
int level_index, j;
- const int dct_coeff= ABS(block[ scantable[i] ]);
+ const int dct_coeff= FFABS(block[ scantable[i] ]);
const int zero_distoration= dct_coeff*dct_coeff;
int best_score=256*256*256*120;
for(level_index=0; level_index < coeff_count[i]; level_index++){
int distoration;
int level= coeff[level_index][i];
- const int alevel= ABS(level);
+ const int alevel= FFABS(level);
int unquant_coeff;
assert(level);
@@ -6033,7 +6025,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
s->coded_score[n] = last_score;
- dc= ABS(block[0]);
+ dc= FFABS(block[0]);
last_non_zero= last_i - 1;
memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
@@ -6046,7 +6038,7 @@ static int dct_quantize_trellis_c(MpegEncContext *s,
for(i=0; i<coeff_count[0]; i++){
int level= coeff[i][0];
- int alevel= ABS(level);
+ int alevel= FFABS(level);
int unquant_coeff, score, distortion;
if(s->out_format == FMT_H263){
@@ -6188,7 +6180,7 @@ STOP_TIMER("memset rem[]")}
int qns=4;
int w;
- w= ABS(weight[i]) + qns*one;
+ w= FFABS(weight[i]) + qns*one;
w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
weight[i] = w;
@@ -6312,7 +6304,7 @@ STOP_TIMER("dct")}
int score, new_coeff, unquant_change;
score=0;
- if(s->avctx->quantizer_noise_shaping < 2 && ABS(new_level) > ABS(level))
+ if(s->avctx->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
continue;
if(new_level){
@@ -6332,7 +6324,7 @@ STOP_TIMER("dct")}
- last_length[UNI_AC_ENC_INDEX(run, level+64)];
}
}else{
- assert(ABS(new_level)==1);
+ assert(FFABS(new_level)==1);
if(analyze_gradient){
int g= d1[ scantable[i] ];
@@ -6365,7 +6357,7 @@ STOP_TIMER("dct")}
}
}else{
new_coeff=0;
- assert(ABS(level)==1);
+ assert(FFABS(level)==1);
if(i < last_non_zero){
int next_i= i + run2 + 1;
@@ -6433,7 +6425,7 @@ after_last++;
#ifdef REFINE_STATS
if(block[j]){
if(block[j] - best_change){
- if(ABS(block[j]) > ABS(block[j] - best_change)){
+ if(FFABS(block[j]) > FFABS(block[j] - best_change)){
raise++;
}else{
lower++;
@@ -6905,7 +6897,7 @@ AVCodec mjpeg_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
- .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUVJ420P, -1},
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, -1},
};
#endif //CONFIG_ENCODERS
diff --git a/src/libffmpeg/libavcodec/mpegvideo.h b/contrib/ffmpeg/libavcodec/mpegvideo.h
index 023e65700..011678a42 100644
--- a/src/libffmpeg/libavcodec/mpegvideo.h
+++ b/contrib/ffmpeg/libavcodec/mpegvideo.h
@@ -3,18 +3,20 @@
* Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,6 +30,8 @@
#include "dsputil.h"
#include "bitstream.h"
+#include "ratecontrol.h"
+#include "parser.h"
#define FRAME_SKIPPED 100 ///< return value for header parsers if frame is not coded
@@ -72,60 +76,6 @@ enum OutputFormat {
#define INPLACE_OFFSET 16
-typedef struct Predictor{
- double coeff;
- double count;
- double decay;
-} Predictor;
-
-typedef struct RateControlEntry{
- int pict_type;
- float qscale;
- int mv_bits;
- int i_tex_bits;
- int p_tex_bits;
- int misc_bits;
- int header_bits;
- uint64_t expected_bits;
- int new_pict_type;
- float new_qscale;
- int mc_mb_var_sum;
- int mb_var_sum;
- int i_count;
- int skip_count;
- int f_code;
- int b_code;
-}RateControlEntry;
-
-/**
- * rate control context.
- */
-typedef struct RateControlContext{
- FILE *stats_file;
- int num_entries; ///< number of RateControlEntries
- RateControlEntry *entry;
- double buffer_index; ///< amount of bits in the video/audio buffer
- Predictor pred[5];
- double short_term_qsum; ///< sum of recent qscales
- double short_term_qcount; ///< count of recent qscales
- double pass1_rc_eq_output_sum;///< sum of the output of the rc equation, this is used for normalization
- double pass1_wanted_bits; ///< bits which should have been outputed by the pass1 code (including complexity init)
- double last_qscale;
- double last_qscale_for[5]; ///< last qscale for a specific pict type, used for max_diff & ipb factor stuff
- int last_mc_mb_var_sum;
- int last_mb_var_sum;
- uint64_t i_cplx_sum[5];
- uint64_t p_cplx_sum[5];
- uint64_t mv_bits_sum[5];
- uint64_t qscale_sum[5];
- int frame_count[5];
- int last_non_b_pict_type;
-
- void *non_lavc_opaque; ///< context for non lavc rc code (for example xvid)
- float dry_run_qscale; ///< for xvid rc
- int last_picture_number; ///< for xvid rc
-}RateControlContext;
-
/**
* Scantable.
*/
@@ -193,17 +143,6 @@ typedef struct Picture{
int b_frame_score; /* */
} Picture;
-typedef struct ParseContext{
- uint8_t *buffer;
- int index;
- int last_index;
- unsigned int buffer_size;
- uint32_t state; ///< contains the last few bytes in MSB order
- int frame_start_found;
- int overread; ///< the number of bytes which where irreversibly read from the next frame
- int overread_index; ///< the index into ParseContext.buffer of the overreaded bytes
-} ParseContext;
-
struct MpegEncContext;
/**
@@ -286,6 +225,8 @@ typedef struct MpegEncContext {
int chroma_elim_threshold;
int strict_std_compliance; ///< strictly follow the std (MPEG4, ...)
int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically
+ int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag
+ int stream_codec_tag; ///< internal stream_codec_tag upper case converted from avctx stream_codec_tag
/* the following fields are managed internally by the encoder */
/** bit output */
@@ -372,8 +313,8 @@ typedef struct MpegEncContext {
int qscale; ///< QP
int chroma_qscale; ///< chroma QP
- int lambda; ///< lagrange multipler used in rate distortion
- int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT
+ unsigned int lambda; ///< lagrange multipler used in rate distortion
+ unsigned int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT
int *lambda_table;
int adaptive_quant; ///< use adaptive quantization
int dquant; ///< qscale difference to prev qscale
@@ -512,6 +453,7 @@ typedef struct MpegEncContext {
int64_t wanted_bits;
int64_t total_bits;
int frame_bits; ///< bits used for the current frame
+ int next_lambda; ///< next lambda used for retrying to encode a frame
RateControlContext rc_context; ///< contains stuff only accessed in ratecontrol.c
/* statistics, used for 2-pass encoding */
@@ -767,12 +709,9 @@ void ff_init_scantable(uint8_t *, ScanTable *st, const uint8_t *src_scantable);
void ff_draw_horiz_band(MpegEncContext *s, int y, int h);
void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h,
int src_x, int src_y, int w, int h);
-#define END_NOT_FOUND -100
-int ff_combine_frame(ParseContext *pc, int next, uint8_t **buf, int *buf_size);
-void ff_parse_close(AVCodecParserContext *s);
void ff_mpeg_flush(AVCodecContext *avctx);
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict);
-void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix);
+void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
int ff_find_unused_picture(MpegEncContext *s, int shared);
void ff_denoise_dct(MpegEncContext *s, DCTELEM *block);
void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
@@ -824,12 +763,12 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y);
inline int ff_epzs_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr,
int P[10][2], int src_index, int ref_index, int16_t (*last_mv)[2],
int ref_mv_scale, int size, int h);
-int inline ff_get_mb_score(MpegEncContext * s, int mx, int my, int src_index,
+inline int ff_get_mb_score(MpegEncContext * s, int mx, int my, int src_index,
int ref_index, int size, int h, int add_rate);
/* mpeg12.c */
-extern const int16_t ff_mpeg1_default_intra_matrix[64];
-extern const int16_t ff_mpeg1_default_non_intra_matrix[64];
+extern const uint16_t ff_mpeg1_default_intra_matrix[64];
+extern const uint16_t ff_mpeg1_default_non_intra_matrix[64];
extern const uint8_t ff_mpeg1_dc_scale_table[128];
void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number);
@@ -839,7 +778,6 @@ void mpeg1_encode_mb(MpegEncContext *s,
void ff_mpeg1_encode_init(MpegEncContext *s);
void ff_mpeg1_encode_slice_header(MpegEncContext *s);
void ff_mpeg1_clean_buffers(MpegEncContext *s);
-int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size);
/** RLTable. */
@@ -886,6 +824,7 @@ void ff_h261_encode_mb(MpegEncContext *s,
int motion_x, int motion_y);
void ff_h261_encode_picture_header(MpegEncContext * s, int picture_number);
void ff_h261_encode_init(MpegEncContext *s);
+int ff_h261_get_picture_format(int width, int height);
/* h263.c, h263dec.c */
@@ -909,7 +848,11 @@ void mpeg4_pred_ac(MpegEncContext * s, DCTELEM *block, int n,
int dir);
void ff_set_mpeg4_time(MpegEncContext * s, int picture_number);
void mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
+#ifdef CONFIG_ENCODERS
void h263_encode_init(MpegEncContext *s);
+#else
+static void h263_encode_init(MpegEncContext *s) {assert(0);}
+#endif
void h263_decode_init_vlc(MpegEncContext *s);
int h263_decode_picture_header(MpegEncContext *s);
int ff_h263_decode_gob_header(MpegEncContext *s);
@@ -938,10 +881,10 @@ int ff_mpeg4_decode_partitions(MpegEncContext *s);
int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s);
int ff_h263_resync(MpegEncContext *s);
int ff_h263_get_gob_height(MpegEncContext *s);
+void ff_mpeg4_init_direct_mv(MpegEncContext *s);
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my);
int ff_h263_round_chroma(int x);
void ff_h263_encode_motion(MpegEncContext * s, int val, int f_code);
-int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size);
/* rv10.c */
@@ -981,21 +924,5 @@ void mjpeg_picture_header(MpegEncContext *s);
void mjpeg_picture_trailer(MpegEncContext *s);
void ff_mjpeg_stuffing(PutBitContext * pbc);
-
-/* rate control */
-int ff_rate_control_init(MpegEncContext *s);
-float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run);
-void ff_write_pass1_stats(MpegEncContext *s);
-void ff_rate_control_uninit(MpegEncContext *s);
-double ff_eval(char *s, double *const_value, const char **const_name,
- double (**func1)(void *, double), const char **func1_name,
- double (**func2)(void *, double, double), char **func2_name,
- void *opaque);
-int ff_vbv_update(MpegEncContext *s, int frame_size);
-void ff_get_2pass_fcode(MpegEncContext *s);
-
-int ff_xvid_rate_control_init(MpegEncContext *s);
-void ff_xvid_rate_control_uninit(MpegEncContext *s);
-float ff_xvid_rate_estimate_qscale(MpegEncContext *s, int dry_run);
-
#endif /* AVCODEC_MPEGVIDEO_H */
+
diff --git a/src/libffmpeg/libavcodec/msmpeg4.c b/contrib/ffmpeg/libavcodec/msmpeg4.c
index 6d83f5c6c..a8124172b 100644
--- a/src/libffmpeg/libavcodec/msmpeg4.c
+++ b/contrib/ffmpeg/libavcodec/msmpeg4.c
@@ -3,18 +3,20 @@
* Copyright (c) 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* msmpeg4v1 & v2 stuff by Michael Niedermayer <michaelni@gmx.at>
@@ -65,10 +67,10 @@ static inline int msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr);
static int msmpeg4_decode_motion(MpegEncContext * s,
int *mx_ptr, int *my_ptr);
-static void msmpeg4v2_encode_motion(MpegEncContext * s, int val);
static void init_h263_dc_for_msmpeg4(void);
static inline void msmpeg4_memsetw(short *tab, int val, int n);
#ifdef CONFIG_ENCODERS
+static void msmpeg4v2_encode_motion(MpegEncContext * s, int val);
static int get_size_of_code(MpegEncContext * s, RLTable *rl, int last, int run, int level, int intra);
#endif //CONFIG_ENCODERS
static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
@@ -627,7 +629,7 @@ static int get_dc(uint8_t *src, int stride, int scale)
/* dir = 0: left, dir = 1: top prediction */
static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
- uint16_t **dc_val_ptr, int *dir_ptr)
+ int16_t **dc_val_ptr, int *dir_ptr)
{
int a, b, c, wrap, pred, scale;
int16_t *dc_val;
@@ -657,7 +659,7 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
necessitate to modify mpegvideo.c. The problem comes from the
fact they decided to store the quantized DC (which would lead
to problems if Q could vary !) */
-#if (defined(ARCH_X86) || defined(ARCH_X86_64)) && !defined PIC
+#if (defined(ARCH_X86)) && !defined PIC
asm volatile(
"movl %3, %%eax \n\t"
"shrl $1, %%eax \n\t"
@@ -673,7 +675,7 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
"mull %4 \n\t"
"movl %%edx, %2 \n\t"
: "+b" (a), "+c" (b), "+D" (c)
- : "g" (scale), "S" (inverse[scale])
+ : "g" (scale), "S" (ff_inverse[scale])
: "%eax", "%edx"
);
#else
@@ -787,7 +789,7 @@ static void msmpeg4_encode_dc(MpegEncContext * s, int level, int n, int *dir_ptr
/* update predictor */
*dc_val= level;
}else{
- uint16_t *dc_val;
+ int16_t *dc_val;
pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
/* update predictor */
@@ -1343,6 +1345,7 @@ static inline void msmpeg4_memsetw(short *tab, int val, int n)
tab[i] = val;
}
+#ifdef CONFIG_ENCODERS
static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
{
int range, bit_size, sign, code, bits;
@@ -1375,6 +1378,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
}
}
}
+#endif
/* this is identical to h263 except that its range is multiplied by 2 */
static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code)
@@ -1712,7 +1716,7 @@ static inline int msmpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
//printf("level: %d, run: %d at %d %d\n", level, run, s->mb_x, s->mb_y);
#if 0 // waste of time / this will detect very few errors
{
- const int abs_level= ABS(level);
+ const int abs_level= FFABS(level);
const int run1= run - rl->max_run[last][abs_level] - run_diff;
if(abs_level<=MAX_LEVEL && run<=MAX_RUN){
if(abs_level <= rl->max_level[last][run]){
@@ -1873,7 +1877,7 @@ static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
/* update predictor */
*dc_val= level;
}else{
- uint16_t *dc_val;
+ int16_t *dc_val;
pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
level += pred;
diff --git a/src/libffmpeg/libavcodec/msmpeg4data.h b/contrib/ffmpeg/libavcodec/msmpeg4data.h
index 1fbd8aadf..d1ff70371 100644
--- a/src/libffmpeg/libavcodec/msmpeg4data.h
+++ b/contrib/ffmpeg/libavcodec/msmpeg4data.h
@@ -1,3 +1,27 @@
+/*
+ * MSMPEG4 backend for ffmpeg encoder and decoder
+ * copyright (c) 2001 Fabrice Bellard
+ * copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * msmpeg4v1 & v2 stuff by Michael Niedermayer <michaelni@gmx.at>
+ */
+
/**
* @file msmpeg4data.h
* MSMPEG4 data tables.
diff --git a/src/libffmpeg/libavcodec/msrle.c b/contrib/ffmpeg/libavcodec/msrle.c
index 7cdbf7c77..fae5616e5 100644
--- a/src/libffmpeg/libavcodec/msrle.c
+++ b/contrib/ffmpeg/libavcodec/msrle.c
@@ -2,18 +2,20 @@
* Micrsoft RLE Video Decoder
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/msvideo1.c b/contrib/ffmpeg/libavcodec/msvideo1.c
index e8524b32e..5929e1c63 100644
--- a/src/libffmpeg/libavcodec/msvideo1.c
+++ b/contrib/ffmpeg/libavcodec/msvideo1.c
@@ -2,18 +2,20 @@
* Microsoft Video-1 Decoder
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/nuv.c b/contrib/ffmpeg/libavcodec/nuv.c
index d31518250..592d3de03 100644
--- a/src/libffmpeg/libavcodec/nuv.c
+++ b/contrib/ffmpeg/libavcodec/nuv.c
@@ -2,18 +2,20 @@
* NuppelVideo decoder
* Copyright (c) 2006 Reimar Doeffinger
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
diff --git a/contrib/ffmpeg/libavcodec/oggvorbis.c b/contrib/ffmpeg/libavcodec/oggvorbis.c
new file mode 100644
index 000000000..9e684a0f4
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/oggvorbis.c
@@ -0,0 +1,381 @@
+/*
+ * copyright (c) 2002 Mark Hills <mark@pogo.org.uk>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file oggvorbis.c
+ * Ogg Vorbis codec support via libvorbisenc.
+ * @author Mark Hills <mark@pogo.org.uk>
+ */
+
+#include <vorbis/vorbisenc.h>
+
+#include "avcodec.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#define OGGVORBIS_FRAME_SIZE 64
+
+#define BUFFER_SIZE (1024*64)
+
+typedef struct OggVorbisContext {
+ vorbis_info vi ;
+ vorbis_dsp_state vd ;
+ vorbis_block vb ;
+ uint8_t buffer[BUFFER_SIZE];
+ int buffer_index;
+
+ /* decoder */
+ vorbis_comment vc ;
+ ogg_packet op;
+} OggVorbisContext ;
+
+
+static int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext) {
+ double cfreq;
+
+ if(avccontext->flags & CODEC_FLAG_QSCALE) {
+ /* variable bitrate */
+ if(vorbis_encode_setup_vbr(vi, avccontext->channels,
+ avccontext->sample_rate,
+ avccontext->global_quality / (float)FF_QP2LAMBDA))
+ return -1;
+ } else {
+ /* constant bitrate */
+ if(vorbis_encode_setup_managed(vi, avccontext->channels,
+ avccontext->sample_rate, -1, avccontext->bit_rate, -1))
+ return -1;
+
+#ifdef OGGVORBIS_VBR_BY_ESTIMATE
+ /* variable bitrate by estimate */
+ if(vorbis_encode_ctl(vi, OV_ECTL_RATEMANAGE_AVG, NULL))
+ return -1;
+#endif
+ }
+
+ /* cutoff frequency */
+ if(avccontext->cutoff > 0) {
+ cfreq = avccontext->cutoff / 1000.0;
+ if(vorbis_encode_ctl(vi, OV_ECTL_LOWPASS_SET, &cfreq))
+ return -1;
+ }
+
+ return vorbis_encode_setup_init(vi);
+}
+
+static int oggvorbis_encode_init(AVCodecContext *avccontext) {
+ OggVorbisContext *context = avccontext->priv_data ;
+ ogg_packet header, header_comm, header_code;
+ uint8_t *p;
+ unsigned int offset, len;
+
+ vorbis_info_init(&context->vi) ;
+ if(oggvorbis_init_encoder(&context->vi, avccontext) < 0) {
+ av_log(avccontext, AV_LOG_ERROR, "oggvorbis_encode_init: init_encoder failed") ;
+ return -1 ;
+ }
+ vorbis_analysis_init(&context->vd, &context->vi) ;
+ vorbis_block_init(&context->vd, &context->vb) ;
+
+ vorbis_comment_init(&context->vc);
+ vorbis_comment_add_tag(&context->vc, "encoder", LIBAVCODEC_IDENT) ;
+
+ vorbis_analysis_headerout(&context->vd, &context->vc, &header,
+ &header_comm, &header_code);
+
+ len = header.bytes + header_comm.bytes + header_code.bytes;
+ avccontext->extradata_size= 64 + len + len/255;
+ p = avccontext->extradata= av_mallocz(avccontext->extradata_size);
+ p[0] = 2;
+ offset = 1;
+ offset += av_xiphlacing(&p[offset], header.bytes);
+ offset += av_xiphlacing(&p[offset], header_comm.bytes);
+ memcpy(&p[offset], header.packet, header.bytes);
+ offset += header.bytes;
+ memcpy(&p[offset], header_comm.packet, header_comm.bytes);
+ offset += header_comm.bytes;
+ memcpy(&p[offset], header_code.packet, header_code.bytes);
+ offset += header_code.bytes;
+ avccontext->extradata_size = offset;
+ avccontext->extradata= av_realloc(avccontext->extradata, avccontext->extradata_size);
+
+/* vorbis_block_clear(&context->vb);
+ vorbis_dsp_clear(&context->vd);
+ vorbis_info_clear(&context->vi);*/
+ vorbis_comment_clear(&context->vc);
+
+ avccontext->frame_size = OGGVORBIS_FRAME_SIZE ;
+
+ avccontext->coded_frame= avcodec_alloc_frame();
+ avccontext->coded_frame->key_frame= 1;
+
+ return 0 ;
+}
+
+
+static int oggvorbis_encode_frame(AVCodecContext *avccontext,
+ unsigned char *packets,
+ int buf_size, void *data)
+{
+ OggVorbisContext *context = avccontext->priv_data ;
+ float **buffer ;
+ ogg_packet op ;
+ signed short *audio = data ;
+ int l, samples = data ? OGGVORBIS_FRAME_SIZE : 0;
+
+ buffer = vorbis_analysis_buffer(&context->vd, samples) ;
+
+ if(context->vi.channels == 1) {
+ for(l = 0 ; l < samples ; l++)
+ buffer[0][l]=audio[l]/32768.f;
+ } else {
+ for(l = 0 ; l < samples ; l++){
+ buffer[0][l]=audio[l*2]/32768.f;
+ buffer[1][l]=audio[l*2+1]/32768.f;
+ }
+ }
+
+ vorbis_analysis_wrote(&context->vd, samples) ;
+
+ while(vorbis_analysis_blockout(&context->vd, &context->vb) == 1) {
+ vorbis_analysis(&context->vb, NULL);
+ vorbis_bitrate_addblock(&context->vb) ;
+
+ while(vorbis_bitrate_flushpacket(&context->vd, &op)) {
+ if(op.bytes==1) //id love to say this is a hack, bad sadly its not, appearently the end of stream decission is in libogg
+ continue;
+ memcpy(context->buffer + context->buffer_index, &op, sizeof(ogg_packet));
+ context->buffer_index += sizeof(ogg_packet);
+ memcpy(context->buffer + context->buffer_index, op.packet, op.bytes);
+ context->buffer_index += op.bytes;
+// av_log(avccontext, AV_LOG_DEBUG, "e%d / %d\n", context->buffer_index, op.bytes);
+ }
+ }
+
+ l=0;
+ if(context->buffer_index){
+ ogg_packet *op2= (ogg_packet*)context->buffer;
+ op2->packet = context->buffer + sizeof(ogg_packet);
+
+ l= op2->bytes;
+ avccontext->coded_frame->pts= av_rescale_q(op2->granulepos, (AVRational){1, avccontext->sample_rate}, avccontext->time_base);
+ //FIXME we should reorder the user supplied pts and not assume that they are spaced by 1/sample_rate
+
+ memcpy(packets, op2->packet, l);
+ context->buffer_index -= l + sizeof(ogg_packet);
+ memcpy(context->buffer, context->buffer + l + sizeof(ogg_packet), context->buffer_index);
+// av_log(avccontext, AV_LOG_DEBUG, "E%d\n", l);
+ }
+
+ return l;
+}
+
+
+static int oggvorbis_encode_close(AVCodecContext *avccontext) {
+ OggVorbisContext *context = avccontext->priv_data ;
+/* ogg_packet op ; */
+
+ vorbis_analysis_wrote(&context->vd, 0) ; /* notify vorbisenc this is EOF */
+
+ vorbis_block_clear(&context->vb);
+ vorbis_dsp_clear(&context->vd);
+ vorbis_info_clear(&context->vi);
+
+ av_freep(&avccontext->coded_frame);
+ av_freep(&avccontext->extradata);
+
+ return 0 ;
+}
+
+
+AVCodec oggvorbis_encoder = {
+ "vorbis",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_VORBIS,
+ sizeof(OggVorbisContext),
+ oggvorbis_encode_init,
+ oggvorbis_encode_frame,
+ oggvorbis_encode_close,
+ .capabilities= CODEC_CAP_DELAY,
+} ;
+
+static int oggvorbis_decode_init(AVCodecContext *avccontext) {
+ OggVorbisContext *context = avccontext->priv_data ;
+ uint8_t *p= avccontext->extradata;
+ int i, hsizes[3];
+ unsigned char *headers[3], *extradata = avccontext->extradata;
+
+ vorbis_info_init(&context->vi) ;
+ vorbis_comment_init(&context->vc) ;
+
+ if(! avccontext->extradata_size || ! p) {
+ av_log(avccontext, AV_LOG_ERROR, "vorbis extradata absent\n");
+ return -1;
+ }
+
+ if(p[0] == 0 && p[1] == 30) {
+ for(i = 0; i < 3; i++){
+ hsizes[i] = *p++ << 8;
+ hsizes[i] += *p++;
+ headers[i] = p;
+ p += hsizes[i];
+ }
+ } else if(*p == 2) {
+ unsigned int offset = 1;
+ p++;
+ for(i=0; i<2; i++) {
+ hsizes[i] = 0;
+ while((*p == 0xFF) && (offset < avccontext->extradata_size)) {
+ hsizes[i] += 0xFF;
+ offset++;
+ p++;
+ }
+ if(offset >= avccontext->extradata_size - 1) {
+ av_log(avccontext, AV_LOG_ERROR,
+ "vorbis header sizes damaged\n");
+ return -1;
+ }
+ hsizes[i] += *p;
+ offset++;
+ p++;
+ }
+ hsizes[2] = avccontext->extradata_size - hsizes[0]-hsizes[1]-offset;
+#if 0
+ av_log(avccontext, AV_LOG_DEBUG,
+ "vorbis header sizes: %d, %d, %d, / extradata_len is %d \n",
+ hsizes[0], hsizes[1], hsizes[2], avccontext->extradata_size);
+#endif
+ headers[0] = extradata + offset;
+ headers[1] = extradata + offset + hsizes[0];
+ headers[2] = extradata + offset + hsizes[0] + hsizes[1];
+ } else {
+ av_log(avccontext, AV_LOG_ERROR,
+ "vorbis initial header len is wrong: %d\n", *p);
+ return -1;
+ }
+
+ for(i=0; i<3; i++){
+ context->op.b_o_s= i==0;
+ context->op.bytes = hsizes[i];
+ context->op.packet = headers[i];
+ if(vorbis_synthesis_headerin(&context->vi, &context->vc, &context->op)<0){
+ av_log(avccontext, AV_LOG_ERROR, "%d. vorbis header damaged\n", i+1);
+ return -1;
+ }
+ }
+
+ avccontext->channels = context->vi.channels;
+ avccontext->sample_rate = context->vi.rate;
+ avccontext->time_base= (AVRational){1, avccontext->sample_rate};
+
+ vorbis_synthesis_init(&context->vd, &context->vi);
+ vorbis_block_init(&context->vd, &context->vb);
+
+ return 0 ;
+}
+
+
+static inline int conv(int samples, float **pcm, char *buf, int channels) {
+ int i, j, val ;
+ ogg_int16_t *ptr, *data = (ogg_int16_t*)buf ;
+ float *mono ;
+
+ for(i = 0 ; i < channels ; i++){
+ ptr = &data[i];
+ mono = pcm[i] ;
+
+ for(j = 0 ; j < samples ; j++) {
+
+ val = mono[j] * 32767.f;
+
+ if(val > 32767) val = 32767 ;
+ if(val < -32768) val = -32768 ;
+
+ *ptr = val ;
+ ptr += channels;
+ }
+ }
+
+ return 0 ;
+}
+
+
+static int oggvorbis_decode_frame(AVCodecContext *avccontext,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ OggVorbisContext *context = avccontext->priv_data ;
+ float **pcm ;
+ ogg_packet *op= &context->op;
+ int samples, total_samples, total_bytes;
+
+ if(!buf_size){
+ //FIXME flush
+ return 0;
+ }
+
+ op->packet = buf;
+ op->bytes = buf_size;
+
+// av_log(avccontext, AV_LOG_DEBUG, "%d %d %d %"PRId64" %"PRId64" %d %d\n", op->bytes, op->b_o_s, op->e_o_s, op->granulepos, op->packetno, buf_size, context->vi.rate);
+
+/* for(i=0; i<op->bytes; i++)
+ av_log(avccontext, AV_LOG_DEBUG, "%02X ", op->packet[i]);
+ av_log(avccontext, AV_LOG_DEBUG, "\n");*/
+
+ if(vorbis_synthesis(&context->vb, op) == 0)
+ vorbis_synthesis_blockin(&context->vd, &context->vb) ;
+
+ total_samples = 0 ;
+ total_bytes = 0 ;
+
+ while((samples = vorbis_synthesis_pcmout(&context->vd, &pcm)) > 0) {
+ conv(samples, pcm, (char*)data + total_bytes, context->vi.channels) ;
+ total_bytes += samples * 2 * context->vi.channels ;
+ total_samples += samples ;
+ vorbis_synthesis_read(&context->vd, samples) ;
+ }
+
+ *data_size = total_bytes ;
+ return buf_size ;
+}
+
+
+static int oggvorbis_decode_close(AVCodecContext *avccontext) {
+ OggVorbisContext *context = avccontext->priv_data ;
+
+ vorbis_info_clear(&context->vi) ;
+ vorbis_comment_clear(&context->vc) ;
+
+ return 0 ;
+}
+
+
+AVCodec oggvorbis_decoder = {
+ "vorbis",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_VORBIS,
+ sizeof(OggVorbisContext),
+ oggvorbis_decode_init,
+ NULL,
+ oggvorbis_decode_close,
+ oggvorbis_decode_frame,
+ .capabilities= CODEC_CAP_DELAY,
+} ;
diff --git a/contrib/ffmpeg/libavcodec/opt.c b/contrib/ffmpeg/libavcodec/opt.c
new file mode 100644
index 000000000..a200d9a82
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/opt.c
@@ -0,0 +1,381 @@
+/*
+ * AVOptions
+ * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+/**
+ * @file opt.c
+ * AVOptions
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#include "avcodec.h"
+#include "opt.h"
+#include "eval.h"
+
+//FIXME order them and do a bin search
+static AVOption *find_opt(void *v, const char *name, const char *unit){
+ AVClass *c= *(AVClass**)v; //FIXME silly way of storing AVClass
+ AVOption *o= c->option;
+
+ for(;o && o->name; o++){
+ if(!strcmp(o->name, name) && (!unit || !strcmp(o->unit, unit)) )
+ return o;
+ }
+ return NULL;
+}
+
+AVOption *av_next_option(void *obj, AVOption *last){
+ if(last && last[1].name) return ++last;
+ else if(last) return NULL;
+ else return (*(AVClass**)obj)->option;
+}
+
+static AVOption *av_set_number(void *obj, const char *name, double num, int den, int64_t intnum){
+ AVOption *o= find_opt(obj, name, NULL);
+ void *dst;
+ if(!o || o->offset<=0)
+ return NULL;
+
+ if(o->max*den < num*intnum || o->min*den > num*intnum) {
+ av_log(NULL, AV_LOG_ERROR, "Value %lf for parameter '%s' out of range.\n", num, name);
+ return NULL;
+ }
+
+ dst= ((uint8_t*)obj) + o->offset;
+
+ switch(o->type){
+ case FF_OPT_TYPE_FLAGS:
+ case FF_OPT_TYPE_INT: *(int *)dst= lrintf(num/den)*intnum; break;
+ case FF_OPT_TYPE_INT64: *(int64_t *)dst= lrintf(num/den)*intnum; break;
+ case FF_OPT_TYPE_FLOAT: *(float *)dst= num*intnum/den; break;
+ case FF_OPT_TYPE_DOUBLE:*(double *)dst= num*intnum/den; break;
+ case FF_OPT_TYPE_RATIONAL:
+ if((int)num == num) *(AVRational*)dst= (AVRational){num*intnum, den};
+ else *(AVRational*)dst= av_d2q(num*intnum/den, 1<<24);
+ default:
+ return NULL;
+ }
+ return o;
+}
+
+static AVOption *set_all_opt(void *v, const char *unit, double d){
+ AVClass *c= *(AVClass**)v; //FIXME silly way of storing AVClass
+ AVOption *o= c->option;
+ AVOption *ret=NULL;
+
+ for(;o && o->name; o++){
+ if(o->type != FF_OPT_TYPE_CONST && o->unit && !strcmp(o->unit, unit)){
+ double tmp= d;
+ if(o->type == FF_OPT_TYPE_FLAGS)
+ tmp= av_get_int(v, o->name, NULL) | (int64_t)d;
+
+ av_set_number(v, o->name, tmp, 1, 1);
+ ret= o;
+ }
+ }
+ return ret;
+}
+
+static double const_values[]={
+ M_PI,
+ M_E,
+ FF_QP2LAMBDA,
+ 0
+};
+
+static const char *const_names[]={
+ "PI",
+ "E",
+ "QP2LAMBDA",
+ 0
+};
+
+AVOption *av_set_string(void *obj, const char *name, const char *val){
+ AVOption *o= find_opt(obj, name, NULL);
+ if(o && o->offset==0 && o->type == FF_OPT_TYPE_CONST && o->unit){
+ return set_all_opt(obj, o->unit, o->default_val);
+ }
+ if(!o || !val || o->offset<=0)
+ return NULL;
+ if(o->type != FF_OPT_TYPE_STRING){
+ for(;;){
+ int i;
+ char buf[256];
+ int cmd=0;
+ double d;
+ char *error = NULL;
+
+ if(*val == '+' || *val == '-')
+ cmd= *(val++);
+
+ for(i=0; i<sizeof(buf)-1 && val[i] && val[i]!='+' && val[i]!='-'; i++)
+ buf[i]= val[i];
+ buf[i]=0;
+ val+= i;
+
+ d = ff_eval2(buf, const_values, const_names, NULL, NULL, NULL, NULL, NULL, &error);
+ if(isnan(d)) {
+ AVOption *o_named= find_opt(obj, buf, o->unit);
+ if(o_named && o_named->type == FF_OPT_TYPE_CONST)
+ d= o_named->default_val;
+ else if(!strcmp(buf, "default")) d= o->default_val;
+ else if(!strcmp(buf, "max" )) d= o->max;
+ else if(!strcmp(buf, "min" )) d= o->min;
+ else {
+ if (!error)
+ av_log(NULL, AV_LOG_ERROR, "Unable to parse option value \"%s\": %s\n", val, error);
+ return NULL;
+ }
+ }
+ if(o->type == FF_OPT_TYPE_FLAGS){
+ if (cmd=='+') d= av_get_int(obj, name, NULL) | (int64_t)d;
+ else if(cmd=='-') d= av_get_int(obj, name, NULL) &~(int64_t)d;
+ }else if(cmd=='-')
+ d= -d;
+
+ av_set_number(obj, name, d, 1, 1);
+ if(!*val)
+ return o;
+ }
+ return NULL;
+ }
+
+ memcpy(((uint8_t*)obj) + o->offset, val, sizeof(val));
+ return o;
+}
+
+AVOption *av_set_double(void *obj, const char *name, double n){
+ return av_set_number(obj, name, n, 1, 1);
+}
+
+AVOption *av_set_q(void *obj, const char *name, AVRational n){
+ return av_set_number(obj, name, n.num, n.den, 1);
+}
+
+AVOption *av_set_int(void *obj, const char *name, int64_t n){
+ return av_set_number(obj, name, 1, 1, n);
+}
+
+/**
+ *
+ * @param buf a buffer which is used for returning non string values as strings, can be NULL
+ * @param buf_len allocated length in bytes of buf
+ */
+const char *av_get_string(void *obj, const char *name, AVOption **o_out, char *buf, int buf_len){
+ AVOption *o= find_opt(obj, name, NULL);
+ void *dst;
+ if(!o || o->offset<=0)
+ return NULL;
+ if(o->type != FF_OPT_TYPE_STRING && (!buf || !buf_len))
+ return NULL;
+
+ dst= ((uint8_t*)obj) + o->offset;
+ if(o_out) *o_out= o;
+
+ if(o->type == FF_OPT_TYPE_STRING)
+ return dst;
+
+ switch(o->type){
+ case FF_OPT_TYPE_FLAGS: snprintf(buf, buf_len, "0x%08X",*(int *)dst);break;
+ case FF_OPT_TYPE_INT: snprintf(buf, buf_len, "%d" , *(int *)dst);break;
+ case FF_OPT_TYPE_INT64: snprintf(buf, buf_len, "%"PRId64, *(int64_t*)dst);break;
+ case FF_OPT_TYPE_FLOAT: snprintf(buf, buf_len, "%f" , *(float *)dst);break;
+ case FF_OPT_TYPE_DOUBLE: snprintf(buf, buf_len, "%f" , *(double *)dst);break;
+ case FF_OPT_TYPE_RATIONAL: snprintf(buf, buf_len, "%d/%d", ((AVRational*)dst)->num, ((AVRational*)dst)->den);break;
+ default: return NULL;
+ }
+ return buf;
+}
+
+static int av_get_number(void *obj, const char *name, AVOption **o_out, double *num, int *den, int64_t *intnum){
+ AVOption *o= find_opt(obj, name, NULL);
+ void *dst;
+ if(!o || o->offset<=0)
+ goto error;
+
+ dst= ((uint8_t*)obj) + o->offset;
+
+ if(o_out) *o_out= o;
+
+ switch(o->type){
+ case FF_OPT_TYPE_FLAGS:
+ case FF_OPT_TYPE_INT: *intnum= *(int *)dst;return 0;
+ case FF_OPT_TYPE_INT64: *intnum= *(int64_t*)dst;return 0;
+ case FF_OPT_TYPE_FLOAT: *num= *(float *)dst;return 0;
+ case FF_OPT_TYPE_DOUBLE: *num= *(double *)dst;return 0;
+ case FF_OPT_TYPE_RATIONAL: *intnum= ((AVRational*)dst)->num;
+ *den = ((AVRational*)dst)->den;
+ return 0;
+ }
+error:
+ *den=*intnum=0;
+ return -1;
+}
+
+double av_get_double(void *obj, const char *name, AVOption **o_out){
+ int64_t intnum=1;
+ double num=1;
+ int den=1;
+
+ av_get_number(obj, name, o_out, &num, &den, &intnum);
+ return num*intnum/den;
+}
+
+AVRational av_get_q(void *obj, const char *name, AVOption **o_out){
+ int64_t intnum=1;
+ double num=1;
+ int den=1;
+
+ av_get_number(obj, name, o_out, &num, &den, &intnum);
+ if(num == 1.0 && (int)intnum == intnum)
+ return (AVRational){intnum, den};
+ else
+ return av_d2q(num*intnum/den, 1<<24);
+}
+
+int64_t av_get_int(void *obj, const char *name, AVOption **o_out){
+ int64_t intnum=1;
+ double num=1;
+ int den=1;
+
+ av_get_number(obj, name, o_out, &num, &den, &intnum);
+ return num*intnum/den;
+}
+
+static void opt_list(void *obj, void *av_log_obj, char *unit)
+{
+ AVOption *opt=NULL;
+
+ while((opt= av_next_option(obj, opt))){
+ if(!(opt->flags & (AV_OPT_FLAG_ENCODING_PARAM|AV_OPT_FLAG_DECODING_PARAM)))
+ continue;
+
+ /* Don't print CONST's on level one.
+ * Don't print anything but CONST's on level two.
+ * Only print items from the requested unit.
+ */
+ if (!unit && opt->type==FF_OPT_TYPE_CONST)
+ continue;
+ else if (unit && opt->type!=FF_OPT_TYPE_CONST)
+ continue;
+ else if (unit && opt->type==FF_OPT_TYPE_CONST && strcmp(unit, opt->unit))
+ continue;
+ else if (unit && opt->type == FF_OPT_TYPE_CONST)
+ av_log(av_log_obj, AV_LOG_INFO, " %-15s ", opt->name);
+ else
+ av_log(av_log_obj, AV_LOG_INFO, "-%-17s ", opt->name);
+
+ switch( opt->type )
+ {
+ case FF_OPT_TYPE_FLAGS:
+ av_log( av_log_obj, AV_LOG_INFO, "%-7s ", "<flags>" );
+ break;
+ case FF_OPT_TYPE_INT:
+ av_log( av_log_obj, AV_LOG_INFO, "%-7s ", "<int>" );
+ break;
+ case FF_OPT_TYPE_INT64:
+ av_log( av_log_obj, AV_LOG_INFO, "%-7s ", "<int64>" );
+ break;
+ case FF_OPT_TYPE_DOUBLE:
+ av_log( av_log_obj, AV_LOG_INFO, "%-7s ", "<double>" );
+ break;
+ case FF_OPT_TYPE_FLOAT:
+ av_log( av_log_obj, AV_LOG_INFO, "%-7s ", "<float>" );
+ break;
+ case FF_OPT_TYPE_STRING:
+ av_log( av_log_obj, AV_LOG_INFO, "%-7s ", "<string>" );
+ break;
+ case FF_OPT_TYPE_RATIONAL:
+ av_log( av_log_obj, AV_LOG_INFO, "%-7s ", "<rational>" );
+ break;
+ case FF_OPT_TYPE_CONST:
+ default:
+ av_log( av_log_obj, AV_LOG_INFO, "%-7s ", "" );
+ break;
+ }
+ av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_ENCODING_PARAM) ? 'E' : '.');
+ av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_DECODING_PARAM) ? 'D' : '.');
+ av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_VIDEO_PARAM ) ? 'V' : '.');
+ av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_AUDIO_PARAM ) ? 'A' : '.');
+ av_log(av_log_obj, AV_LOG_INFO, "%c", (opt->flags & AV_OPT_FLAG_SUBTITLE_PARAM) ? 'S' : '.');
+
+ if(opt->help)
+ av_log(av_log_obj, AV_LOG_INFO, " %s", opt->help);
+ av_log(av_log_obj, AV_LOG_INFO, "\n");
+ if (opt->unit && opt->type != FF_OPT_TYPE_CONST) {
+ opt_list(obj, av_log_obj, opt->unit);
+ }
+ }
+}
+
+int av_opt_show(void *obj, void *av_log_obj){
+ if(!obj)
+ return -1;
+
+ av_log(av_log_obj, AV_LOG_INFO, "%s AVOptions:\n", (*(AVClass**)obj)->class_name);
+
+ opt_list(obj, av_log_obj, NULL);
+
+ return 0;
+}
+
+/** Set the values of the AVCodecContext or AVFormatContext structure.
+ * They are set to the defaults specified in the according AVOption options
+ * array default_val field.
+ *
+ * @param s AVCodecContext or AVFormatContext for which the defaults will be set
+ */
+void av_opt_set_defaults(void *s)
+{
+ AVOption *opt = NULL;
+ while ((opt = av_next_option(s, opt)) != NULL) {
+ switch(opt->type) {
+ case FF_OPT_TYPE_CONST:
+ /* Nothing to be done here */
+ break;
+ case FF_OPT_TYPE_FLAGS:
+ case FF_OPT_TYPE_INT: {
+ int val;
+ val = opt->default_val;
+ av_set_int(s, opt->name, val);
+ }
+ break;
+ case FF_OPT_TYPE_FLOAT: {
+ double val;
+ val = opt->default_val;
+ av_set_double(s, opt->name, val);
+ }
+ break;
+ case FF_OPT_TYPE_RATIONAL: {
+ AVRational val;
+ val = av_d2q(opt->default_val, INT_MAX);
+ av_set_q(s, opt->name, val);
+ }
+ break;
+ case FF_OPT_TYPE_STRING:
+ /* Cannot set default for string as default_val is of type * double */
+ break;
+ default:
+ av_log(s, AV_LOG_DEBUG, "AVOption type %d of option %s not implemented yet\n", opt->type, opt->name);
+ }
+ }
+}
+
diff --git a/src/libffmpeg/libavcodec/opt.h b/contrib/ffmpeg/libavcodec/opt.h
index 058c6b63a..b8a17031b 100644
--- a/src/libffmpeg/libavcodec/opt.h
+++ b/contrib/ffmpeg/libavcodec/opt.h
@@ -1,3 +1,24 @@
+/*
+ * AVOptions
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#ifndef AVOPT_H
#define AVOPT_H
@@ -57,5 +78,6 @@ int64_t av_get_int(void *obj, const char *name, AVOption **o_out);
const char *av_get_string(void *obj, const char *name, AVOption **o_out, char *buf, int buf_len);
AVOption *av_next_option(void *obj, AVOption *last);
int av_opt_show(void *obj, void *av_log_obj);
+void av_opt_set_defaults(void *s);
#endif
diff --git a/contrib/ffmpeg/libavcodec/os2thread.c b/contrib/ffmpeg/libavcodec/os2thread.c
new file mode 100644
index 000000000..c52b7ae02
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/os2thread.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+//#define DEBUG
+
+// Ported by Vlad Stelmahovsky
+
+#include "avcodec.h"
+#include "common.h"
+
+#define INCL_DOS
+#define INCL_DOSERRORS
+#define INCL_DOSDEVIOCTL
+#include <os2.h>
+
+typedef struct ThreadContext{
+ AVCodecContext *avctx;
+ int thread;
+ HEV work_sem;
+ HEV done_sem;
+ int (*func)(AVCodecContext *c, void *arg);
+ void *arg;
+ int ret;
+}ThreadContext;
+
+
+void thread_func(void *v){
+ ThreadContext *c= v;
+
+ for(;;){
+ //printf("thread_func %X enter wait\n", (int)v); fflush(stdout);
+ DosWaitEventSem(c->work_sem, SEM_INDEFINITE_WAIT);
+// WaitForSingleObject(c->work_sem, INFINITE);
+//printf("thread_func %X after wait (func=%X)\n", (int)v, (int)c->func); fflush(stdout);
+ if(c->func)
+ c->ret= c->func(c->avctx, c->arg);
+ else
+ return;
+ //printf("thread_func %X signal complete\n", (int)v); fflush(stdout);
+ DosPostEventSem(c->done_sem);
+// ReleaseSemaphore(c->done_sem, 1, 0);
+ }
+
+ return;
+}
+
+/**
+ * free what has been allocated by avcodec_thread_init().
+ * must be called after decoding has finished, especially dont call while avcodec_thread_execute() is running
+ */
+void avcodec_thread_free(AVCodecContext *s){
+ ThreadContext *c= s->thread_opaque;
+ int i;
+
+ for(i=0; i<s->thread_count; i++){
+
+ c[i].func= NULL;
+ DosPostEventSem(c[i].work_sem);
+ // ReleaseSemaphore(c[i].work_sem, 1, 0);
+ DosWaitThread((PTID)&c[i].thread,DCWW_WAIT);
+// WaitForSingleObject(c[i].thread, INFINITE);
+ if(c[i].work_sem) DosCloseEventSem(c[i].work_sem);//CloseHandle(c[i].work_sem);
+ if(c[i].done_sem) DosCloseEventSem(c[i].done_sem);//CloseHandle(c[i].done_sem);
+ }
+
+ av_freep(&s->thread_opaque);
+}
+
+int avcodec_thread_execute(AVCodecContext *s, int (*func)(AVCodecContext *c2, void *arg2),void **arg, int *ret, int count){
+ ThreadContext *c= s->thread_opaque;
+ int i;
+
+ assert(s == c->avctx);
+ assert(count <= s->thread_count);
+
+ /* note, we can be certain that this is not called with the same AVCodecContext by different threads at the same time */
+
+ for(i=0; i<count; i++){
+
+ c[i].arg= arg[i];
+ c[i].func= func;
+ c[i].ret= 12345;
+
+ DosPostEventSem(c[i].work_sem);
+// ReleaseSemaphore(c[i].work_sem, 1, 0);
+ }
+ for(i=0; i<count; i++){
+ DosWaitEventSem(c[i].done_sem,SEM_INDEFINITE_WAIT);
+// WaitForSingleObject(c[i].done_sem, INFINITE);
+
+ c[i].func= NULL;
+ if(ret) ret[i]= c[i].ret;
+ }
+ return 0;
+}
+
+int avcodec_thread_init(AVCodecContext *s, int thread_count){
+ int i;
+ ThreadContext *c;
+ uint32_t threadid;
+
+ s->thread_count= thread_count;
+
+ assert(!s->thread_opaque);
+ c= av_mallocz(sizeof(ThreadContext)*thread_count);
+ s->thread_opaque= c;
+
+ for(i=0; i<thread_count; i++){
+//printf("init semaphors %d\n", i); fflush(stdout);
+ c[i].avctx= s;
+
+ if (DosCreateEventSem(NULL,&c[i].work_sem,DC_SEM_SHARED,0))
+ goto fail;
+ if (DosCreateEventSem(NULL,&c[i].done_sem,DC_SEM_SHARED,0))
+ goto fail;
+
+//printf("create thread %d\n", i); fflush(stdout);
+// c[i].thread = (HANDLE)_beginthreadex(NULL, 0, thread_func, &c[i], 0, &threadid );
+ c[i].thread = _beginthread(thread_func, NULL, 0x10000, &c[i]);
+ if( c[i].thread <= 0 ) goto fail;
+ }
+//printf("init done\n"); fflush(stdout);
+
+ s->execute= avcodec_thread_execute;
+
+ return 0;
+fail:
+ avcodec_thread_free(s);
+ return -1;
+}
diff --git a/src/libffmpeg/libavcodec/parser.c b/contrib/ffmpeg/libavcodec/parser.c
index 59087cdb8..72a3e55a3 100644
--- a/src/libffmpeg/libavcodec/parser.c
+++ b/contrib/ffmpeg/libavcodec/parser.c
@@ -3,23 +3,26 @@
* Copyright (c) 2003 Fabrice Bellard.
* Copyright (c) 2003 Michael Niedermayer.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
#include "mpegvideo.h"
#include "mpegaudio.h"
+#include "parser.h"
AVCodecParser *av_first_parser = NULL;
@@ -69,8 +72,29 @@ AVCodecParserContext *av_parser_init(int codec_id)
return s;
}
-/* NOTE: buf_size == 0 is used to signal EOF so that the last frame
- can be returned if necessary */
+/**
+ *
+ * @param buf input
+ * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output)
+ * @param pts input presentation timestamp
+ * @param dts input decoding timestamp
+ * @param poutbuf will contain a pointer to the first byte of the output frame
+ * @param poutbuf_size will contain the length of the output frame
+ * @return the number of bytes of the input bitstream used
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
int av_parser_parse(AVCodecParserContext *s,
AVCodecContext *avctx,
uint8_t **poutbuf, int *poutbuf_size,
@@ -104,7 +128,7 @@ int av_parser_parse(AVCodecParserContext *s,
/* WARNING: the returned index can be negative */
index = s->parser->parser_parse(s, avctx, poutbuf, poutbuf_size, buf, buf_size);
-//av_log(NULL, AV_LOG_DEBUG, "parser: in:%lld, %lld, out:%lld, %lld, in:%d out:%d id:%d\n", pts, dts, s->last_pts, s->last_dts, buf_size, *poutbuf_size, avctx->codec_id);
+//av_log(NULL, AV_LOG_DEBUG, "parser: in:%"PRId64", %"PRId64", out:%"PRId64", %"PRId64", in:%d out:%d id:%d\n", pts, dts, s->last_pts, s->last_dts, buf_size, *poutbuf_size, avctx->codec_id);
/* update the file pointer */
if (*poutbuf_size) {
/* fill the data for the current frame */
@@ -190,27 +214,6 @@ void av_parser_close(AVCodecParserContext *s)
/*****************************************************/
-//#define END_NOT_FOUND (-100)
-
-#define PICTURE_START_CODE 0x00000100
-#define SEQ_START_CODE 0x000001b3
-#define EXT_START_CODE 0x000001b5
-#define SLICE_MIN_START_CODE 0x00000101
-#define SLICE_MAX_START_CODE 0x000001af
-
-typedef struct ParseContext1{
- ParseContext pc;
-/* XXX/FIXME PC1 vs. PC */
- /* MPEG2 specific */
- int frame_rate;
- int progressive_sequence;
- int width, height;
-
- /* XXX: suppress that, needed by MPEG4 */
- MpegEncContext *enc;
- int first_picture;
-} ParseContext1;
-
/**
* combines the (truncated) bitstream to a complete frame
* @returns -1 if no complete frame could be created
@@ -273,186 +276,6 @@ int ff_combine_frame(ParseContext *pc, int next, uint8_t **buf, int *buf_size)
return 0;
}
-/* XXX: merge with libavcodec ? */
-#define MPEG1_FRAME_RATE_BASE 1001
-
-static const int frame_rate_tab[16] = {
- 0,
- 24000,
- 24024,
- 25025,
- 30000,
- 30030,
- 50050,
- 60000,
- 60060,
- // Xing's 15fps: (9)
- 15015,
- // libmpeg3's "Unofficial economy rates": (10-13)
- 5005,
- 10010,
- 12012,
- 15015,
- // random, just to avoid segfault !never encode these
- 25025,
- 25025,
-};
-
-#ifdef CONFIG_MPEGVIDEO_PARSER
-//FIXME move into mpeg12.c
-static void mpegvideo_extract_headers(AVCodecParserContext *s,
- AVCodecContext *avctx,
- const uint8_t *buf, int buf_size)
-{
- ParseContext1 *pc = s->priv_data;
- const uint8_t *buf_end;
- int32_t start_code;
- int frame_rate_index, ext_type, bytes_left;
- int frame_rate_ext_n, frame_rate_ext_d;
- int picture_structure, top_field_first, repeat_first_field, progressive_frame;
- int horiz_size_ext, vert_size_ext, bit_rate_ext;
-//FIXME replace the crap with get_bits()
- s->repeat_pict = 0;
- buf_end = buf + buf_size;
- while (buf < buf_end) {
- start_code= -1;
- buf= ff_find_start_code(buf, buf_end, &start_code);
- bytes_left = buf_end - buf;
- switch(start_code) {
- case PICTURE_START_CODE:
- if (bytes_left >= 2) {
- s->pict_type = (buf[1] >> 3) & 7;
- }
- break;
- case SEQ_START_CODE:
- if (bytes_left >= 7) {
- pc->width = (buf[0] << 4) | (buf[1] >> 4);
- pc->height = ((buf[1] & 0x0f) << 8) | buf[2];
- avcodec_set_dimensions(avctx, pc->width, pc->height);
- frame_rate_index = buf[3] & 0xf;
- pc->frame_rate = avctx->time_base.den = frame_rate_tab[frame_rate_index];
- avctx->time_base.num = MPEG1_FRAME_RATE_BASE;
- avctx->bit_rate = ((buf[4]<<10) | (buf[5]<<2) | (buf[6]>>6))*400;
- avctx->codec_id = CODEC_ID_MPEG1VIDEO;
- avctx->sub_id = 1;
- }
- break;
- case EXT_START_CODE:
- if (bytes_left >= 1) {
- ext_type = (buf[0] >> 4);
- switch(ext_type) {
- case 0x1: /* sequence extension */
- if (bytes_left >= 6) {
- horiz_size_ext = ((buf[1] & 1) << 1) | (buf[2] >> 7);
- vert_size_ext = (buf[2] >> 5) & 3;
- bit_rate_ext = ((buf[2] & 0x1F)<<7) | (buf[3]>>1);
- frame_rate_ext_n = (buf[5] >> 5) & 3;
- frame_rate_ext_d = (buf[5] & 0x1f);
- pc->progressive_sequence = buf[1] & (1 << 3);
- avctx->has_b_frames= !(buf[5] >> 7);
-
- pc->width |=(horiz_size_ext << 12);
- pc->height |=( vert_size_ext << 12);
- avctx->bit_rate += (bit_rate_ext << 18) * 400;
- avcodec_set_dimensions(avctx, pc->width, pc->height);
- avctx->time_base.den = pc->frame_rate * (frame_rate_ext_n + 1);
- avctx->time_base.num = MPEG1_FRAME_RATE_BASE * (frame_rate_ext_d + 1);
- avctx->codec_id = CODEC_ID_MPEG2VIDEO;
- avctx->sub_id = 2; /* forces MPEG2 */
- }
- break;
- case 0x8: /* picture coding extension */
- if (bytes_left >= 5) {
- picture_structure = buf[2]&3;
- top_field_first = buf[3] & (1 << 7);
- repeat_first_field = buf[3] & (1 << 1);
- progressive_frame = buf[4] & (1 << 7);
-
- /* check if we must repeat the frame */
- if (repeat_first_field) {
- if (pc->progressive_sequence) {
- if (top_field_first)
- s->repeat_pict = 4;
- else
- s->repeat_pict = 2;
- } else if (progressive_frame) {
- s->repeat_pict = 1;
- }
- }
-
- /* the packet only represents half a frame
- XXX,FIXME maybe find a different solution */
- if(picture_structure != 3)
- s->repeat_pict = -1;
- }
- break;
- }
- }
- break;
- case -1:
- goto the_end;
- default:
- /* we stop parsing when we encounter a slice. It ensures
- that this function takes a negligible amount of time */
- if (start_code >= SLICE_MIN_START_CODE &&
- start_code <= SLICE_MAX_START_CODE)
- goto the_end;
- break;
- }
- }
- the_end: ;
-}
-
-static int mpegvideo_parse(AVCodecParserContext *s,
- AVCodecContext *avctx,
- uint8_t **poutbuf, int *poutbuf_size,
- const uint8_t *buf, int buf_size)
-{
- ParseContext1 *pc1 = s->priv_data;
- ParseContext *pc= &pc1->pc;
- int next;
-
- if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){
- next= buf_size;
- }else{
- next= ff_mpeg1_find_frame_end(pc, buf, buf_size);
-
- if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) {
- *poutbuf = NULL;
- *poutbuf_size = 0;
- return buf_size;
- }
-
- }
- /* we have a full frame : we just parse the first few MPEG headers
- to have the full timing information. The time take by this
- function should be negligible for uncorrupted streams */
- mpegvideo_extract_headers(s, avctx, buf, buf_size);
-#if 0
- printf("pict_type=%d frame_rate=%0.3f repeat_pict=%d\n",
- s->pict_type, (double)avctx->time_base.den / avctx->time_base.num, s->repeat_pict);
-#endif
-
- *poutbuf = (uint8_t *)buf;
- *poutbuf_size = buf_size;
- return next;
-}
-
-static int mpegvideo_split(AVCodecContext *avctx,
- const uint8_t *buf, int buf_size)
-{
- int i;
- uint32_t state= -1;
-
- for(i=0; i<buf_size; i++){
- state= (state<<8) | buf[i];
- if(state != 0x1B3 && state != 0x1B5 && state < 0x200 && state >= 0x100)
- return i-3;
- }
- return 0;
-}
-#endif /* CONFIG_MPEGVIDEO_PARSER */
-
void ff_parse_close(AVCodecParserContext *s)
{
ParseContext *pc = s->priv_data;
@@ -460,7 +283,7 @@ void ff_parse_close(AVCodecParserContext *s)
av_free(pc->buffer);
}
-static void parse1_close(AVCodecParserContext *s)
+void ff_parse1_close(AVCodecParserContext *s)
{
ParseContext1 *pc1 = s->priv_data;
@@ -538,33 +361,7 @@ static int mpeg4video_parse(AVCodecParserContext *s,
}
#endif
-#ifdef CONFIG_CAVSVIDEO_PARSER
-static int cavsvideo_parse(AVCodecParserContext *s,
- AVCodecContext *avctx,
- uint8_t **poutbuf, int *poutbuf_size,
- const uint8_t *buf, int buf_size)
-{
- ParseContext *pc = s->priv_data;
- int next;
-
- if(s->flags & PARSER_FLAG_COMPLETE_FRAMES){
- next= buf_size;
- }else{
- next= ff_cavs_find_frame_end(pc, buf, buf_size);
-
- if (ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size) < 0) {
- *poutbuf = NULL;
- *poutbuf_size = 0;
- return buf_size;
- }
- }
- *poutbuf = (uint8_t *)buf;
- *poutbuf_size = buf_size;
- return next;
-}
-#endif /* CONFIG_CAVSVIDEO_PARSER */
-
-static int mpeg4video_split(AVCodecContext *avctx,
+int ff_mpeg4video_split(AVCodecContext *avctx,
const uint8_t *buf, int buf_size)
{
int i;
@@ -634,9 +431,7 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
}
/* no header seen : find one. We need at least MPA_HEADER_SIZE
bytes to parse it */
- len = MPA_HEADER_SIZE - len;
- if (len > buf_size)
- len = buf_size;
+ len = FFMIN(MPA_HEADER_SIZE - len, buf_size);
if (len > 0) {
memcpy(s->inbuf_ptr, buf_ptr, len);
buf_ptr += len;
@@ -645,11 +440,10 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
}
if ((s->inbuf_ptr - s->inbuf) >= MPA_HEADER_SIZE) {
got_header:
- sr= avctx->sample_rate;
header = (s->inbuf[0] << 24) | (s->inbuf[1] << 16) |
(s->inbuf[2] << 8) | s->inbuf[3];
- ret = mpa_decode_header(avctx, header);
+ ret = mpa_decode_header(avctx, header, &sr);
if (ret < 0) {
s->header_count= -2;
/* no sync found : move by one byte (inefficient, but simple!) */
@@ -673,8 +467,8 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
}
#endif
}
- if(s->header_count <= 0)
- avctx->sample_rate= sr; //FIXME ugly
+ if(s->header_count > 1)
+ avctx->sample_rate= sr;
}
} else
#if 0
@@ -736,14 +530,25 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
if (len < s->frame_size) {
if (s->frame_size > MPA_MAX_CODED_FRAME_SIZE)
s->frame_size = MPA_MAX_CODED_FRAME_SIZE;
- len = s->frame_size - len;
- if (len > buf_size)
- len = buf_size;
+ len = FFMIN(s->frame_size - len, buf_size);
memcpy(s->inbuf_ptr, buf_ptr, len);
buf_ptr += len;
s->inbuf_ptr += len;
buf_size -= len;
}
+
+ if(s->frame_size > 0 && buf_ptr - buf == s->inbuf_ptr - s->inbuf
+ && buf_size + buf_ptr - buf >= s->frame_size){
+ if(s->header_count > 0){
+ *poutbuf = buf;
+ *poutbuf_size = s->frame_size;
+ }
+ buf_ptr = buf + s->frame_size;
+ s->inbuf_ptr = s->inbuf;
+ s->frame_size = 0;
+ break;
+ }
+
// next_data:
if (s->frame_size > 0 &&
(s->inbuf_ptr - s->inbuf) >= s->frame_size) {
@@ -1016,34 +821,14 @@ static int ac3_parse(AVCodecParserContext *s1,
}
#endif /* CONFIG_AC3_PARSER || CONFIG_AAC_PARSER */
-#ifdef CONFIG_MPEGVIDEO_PARSER
-AVCodecParser mpegvideo_parser = {
- { CODEC_ID_MPEG1VIDEO, CODEC_ID_MPEG2VIDEO },
- sizeof(ParseContext1),
- NULL,
- mpegvideo_parse,
- parse1_close,
- mpegvideo_split,
-};
-#endif
#ifdef CONFIG_MPEG4VIDEO_PARSER
AVCodecParser mpeg4video_parser = {
{ CODEC_ID_MPEG4 },
sizeof(ParseContext1),
mpeg4video_parse_init,
mpeg4video_parse,
- parse1_close,
- mpeg4video_split,
-};
-#endif
-#ifdef CONFIG_CAVSVIDEO_PARSER
-AVCodecParser cavsvideo_parser = {
- { CODEC_ID_CAVS },
- sizeof(ParseContext1),
- NULL,
- cavsvideo_parse,
- parse1_close,
- mpeg4video_split,
+ ff_parse1_close,
+ ff_mpeg4video_split,
};
#endif
#ifdef CONFIG_MPEGAUDIO_PARSER
diff --git a/contrib/ffmpeg/libavcodec/parser.h b/contrib/ffmpeg/libavcodec/parser.h
new file mode 100644
index 000000000..3496b341f
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/parser.h
@@ -0,0 +1,63 @@
+/*
+ * AVCodecParser prototypes and definitions
+ * Copyright (c) 2003 Fabrice Bellard.
+ * Copyright (c) 2003 Michael Niedermayer.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FFMPEG_PARSER_H
+#define FFMPEG_PARSER_H
+
+typedef struct ParseContext{
+ uint8_t *buffer;
+ int index;
+ int last_index;
+ unsigned int buffer_size;
+ uint32_t state; ///< contains the last few bytes in MSB order
+ int frame_start_found;
+ int overread; ///< the number of bytes which where irreversibly read from the next frame
+ int overread_index; ///< the index into ParseContext.buffer of the overreaded bytes
+} ParseContext;
+
+struct MpegEncContext;
+
+typedef struct ParseContext1{
+ ParseContext pc;
+/* XXX/FIXME PC1 vs. PC */
+ /* MPEG2 specific */
+ AVRational frame_rate;
+ int progressive_sequence;
+ int width, height;
+
+ /* XXX: suppress that, needed by MPEG4 */
+ struct MpegEncContext *enc;
+ int first_picture;
+} ParseContext1;
+
+#define END_NOT_FOUND (-100)
+
+int ff_combine_frame(ParseContext *pc, int next, uint8_t **buf, int *buf_size);
+int ff_mpeg4video_split(AVCodecContext *avctx, const uint8_t *buf,
+ int buf_size);
+void ff_parse_close(AVCodecParserContext *s);
+void ff_parse1_close(AVCodecParserContext *s);
+
+/* h263dec.c */
+int ff_mpeg4_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size);
+
+#endif /* !FFMPEG_PARSER_H */
diff --git a/src/libffmpeg/libavcodec/pcm.c b/contrib/ffmpeg/libavcodec/pcm.c
index 0b4dd1c86..26c38b329 100644
--- a/src/libffmpeg/libavcodec/pcm.c
+++ b/contrib/ffmpeg/libavcodec/pcm.c
@@ -2,18 +2,20 @@
* PCM codecs
* Copyright (c) 2001 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/contrib/ffmpeg/libavcodec/png.c b/contrib/ffmpeg/libavcodec/png.c
new file mode 100644
index 000000000..a257492b7
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/png.c
@@ -0,0 +1,968 @@
+/*
+ * PNG image format
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avcodec.h"
+
+/* TODO:
+ * - add 2, 4 and 16 bit depth support
+ * - use filters when generating a png (better compression)
+ */
+
+#ifdef CONFIG_ZLIB
+#include <zlib.h>
+
+//#define DEBUG
+
+#define PNG_COLOR_MASK_PALETTE 1
+#define PNG_COLOR_MASK_COLOR 2
+#define PNG_COLOR_MASK_ALPHA 4
+
+#define PNG_COLOR_TYPE_GRAY 0
+#define PNG_COLOR_TYPE_PALETTE (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_PALETTE)
+#define PNG_COLOR_TYPE_RGB (PNG_COLOR_MASK_COLOR)
+#define PNG_COLOR_TYPE_RGB_ALPHA (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_ALPHA)
+#define PNG_COLOR_TYPE_GRAY_ALPHA (PNG_COLOR_MASK_ALPHA)
+
+#define PNG_FILTER_VALUE_NONE 0
+#define PNG_FILTER_VALUE_SUB 1
+#define PNG_FILTER_VALUE_UP 2
+#define PNG_FILTER_VALUE_AVG 3
+#define PNG_FILTER_VALUE_PAETH 4
+
+#define PNG_IHDR 0x0001
+#define PNG_IDAT 0x0002
+#define PNG_ALLIMAGE 0x0004
+#define PNG_PLTE 0x0008
+
+#define NB_PASSES 7
+
+#define IOBUF_SIZE 4096
+
+typedef struct PNGContext {
+ uint8_t *bytestream;
+ uint8_t *bytestream_start;
+ uint8_t *bytestream_end;
+ AVFrame picture;
+
+ int state;
+ int width, height;
+ int bit_depth;
+ int color_type;
+ int compression_type;
+ int interlace_type;
+ int filter_type;
+ int channels;
+ int bits_per_pixel;
+ int bpp;
+
+ uint8_t *image_buf;
+ int image_linesize;
+ uint32_t palette[256];
+ uint8_t *crow_buf;
+ uint8_t *last_row;
+ uint8_t *tmp_row;
+ int pass;
+ int crow_size; /* compressed row size (include filter type) */
+ int row_size; /* decompressed row size */
+ int pass_row_size; /* decompress row size of the current pass */
+ int y;
+ z_stream zstream;
+ uint8_t buf[IOBUF_SIZE];
+} PNGContext;
+
+static unsigned int get32(uint8_t **b){
+ (*b) += 4;
+ return ((*b)[-4]<<24) + ((*b)[-3]<<16) + ((*b)[-2]<<8) + (*b)[-1];
+}
+
+#ifdef CONFIG_ENCODERS
+static void put32(uint8_t **b, unsigned int v){
+ *(*b)++= v>>24;
+ *(*b)++= v>>16;
+ *(*b)++= v>>8;
+ *(*b)++= v;
+}
+#endif
+
+static const uint8_t pngsig[8] = {137, 80, 78, 71, 13, 10, 26, 10};
+
+/* Mask to determine which y pixels are valid in a pass */
+static const uint8_t png_pass_ymask[NB_PASSES] = {
+ 0x80, 0x80, 0x08, 0x88, 0x22, 0xaa, 0x55,
+};
+
+/* Mask to determine which y pixels can be written in a pass */
+static const uint8_t png_pass_dsp_ymask[NB_PASSES] = {
+ 0xff, 0xff, 0x0f, 0xcc, 0x33, 0xff, 0x55,
+};
+
+/* minimum x value */
+static const uint8_t png_pass_xmin[NB_PASSES] = {
+ 0, 4, 0, 2, 0, 1, 0
+};
+
+/* x shift to get row width */
+static const uint8_t png_pass_xshift[NB_PASSES] = {
+ 3, 3, 2, 2, 1, 1, 0
+};
+
+/* Mask to determine which pixels are valid in a pass */
+static const uint8_t png_pass_mask[NB_PASSES] = {
+ 0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff
+};
+
+/* Mask to determine which pixels to overwrite while displaying */
+static const uint8_t png_pass_dsp_mask[NB_PASSES] = {
+ 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
+};
+#if 0
+static int png_probe(AVProbeData *pd)
+{
+ if (pd->buf_size >= 8 &&
+ memcmp(pd->buf, pngsig, 8) == 0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+#endif
+static void *png_zalloc(void *opaque, unsigned int items, unsigned int size)
+{
+ if(items >= UINT_MAX / size)
+ return NULL;
+ return av_malloc(items * size);
+}
+
+static void png_zfree(void *opaque, void *ptr)
+{
+ av_free(ptr);
+}
+
+static int png_get_nb_channels(int color_type)
+{
+ int channels;
+ channels = 1;
+ if ((color_type & (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_PALETTE)) ==
+ PNG_COLOR_MASK_COLOR)
+ channels = 3;
+ if (color_type & PNG_COLOR_MASK_ALPHA)
+ channels++;
+ return channels;
+}
+
+/* compute the row size of an interleaved pass */
+static int png_pass_row_size(int pass, int bits_per_pixel, int width)
+{
+ int shift, xmin, pass_width;
+
+ xmin = png_pass_xmin[pass];
+ if (width <= xmin)
+ return 0;
+ shift = png_pass_xshift[pass];
+ pass_width = (width - xmin + (1 << shift) - 1) >> shift;
+ return (pass_width * bits_per_pixel + 7) >> 3;
+}
+
+/* NOTE: we try to construct a good looking image at each pass. width
+ is the original image width. We also do pixel format convertion at
+ this stage */
+static void png_put_interlaced_row(uint8_t *dst, int width,
+ int bits_per_pixel, int pass,
+ int color_type, const uint8_t *src)
+{
+ int x, mask, dsp_mask, j, src_x, b, bpp;
+ uint8_t *d;
+ const uint8_t *s;
+
+ mask = png_pass_mask[pass];
+ dsp_mask = png_pass_dsp_mask[pass];
+ switch(bits_per_pixel) {
+ case 1:
+ /* we must intialize the line to zero before writing to it */
+ if (pass == 0)
+ memset(dst, 0, (width + 7) >> 3);
+ src_x = 0;
+ for(x = 0; x < width; x++) {
+ j = (x & 7);
+ if ((dsp_mask << j) & 0x80) {
+ b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
+ dst[x >> 3] |= b << (7 - j);
+ }
+ if ((mask << j) & 0x80)
+ src_x++;
+ }
+ break;
+ default:
+ bpp = bits_per_pixel >> 3;
+ d = dst;
+ s = src;
+ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ for(x = 0; x < width; x++) {
+ j = x & 7;
+ if ((dsp_mask << j) & 0x80) {
+ *(uint32_t *)d = (s[3] << 24) | (s[0] << 16) | (s[1] << 8) | s[2];
+ }
+ d += bpp;
+ if ((mask << j) & 0x80)
+ s += bpp;
+ }
+ } else {
+ for(x = 0; x < width; x++) {
+ j = x & 7;
+ if ((dsp_mask << j) & 0x80) {
+ memcpy(d, s, bpp);
+ }
+ d += bpp;
+ if ((mask << j) & 0x80)
+ s += bpp;
+ }
+ }
+ break;
+ }
+}
+
+#ifdef CONFIG_ENCODERS
+static void png_get_interlaced_row(uint8_t *dst, int row_size,
+ int bits_per_pixel, int pass,
+ const uint8_t *src, int width)
+{
+ int x, mask, dst_x, j, b, bpp;
+ uint8_t *d;
+ const uint8_t *s;
+
+ mask = png_pass_mask[pass];
+ switch(bits_per_pixel) {
+ case 1:
+ memset(dst, 0, row_size);
+ dst_x = 0;
+ for(x = 0; x < width; x++) {
+ j = (x & 7);
+ if ((mask << j) & 0x80) {
+ b = (src[x >> 3] >> (7 - j)) & 1;
+ dst[dst_x >> 3] |= b << (7 - (dst_x & 7));
+ dst_x++;
+ }
+ }
+ break;
+ default:
+ bpp = bits_per_pixel >> 3;
+ d = dst;
+ s = src;
+ for(x = 0; x < width; x++) {
+ j = x & 7;
+ if ((mask << j) & 0x80) {
+ memcpy(d, s, bpp);
+ d += bpp;
+ }
+ s += bpp;
+ }
+ break;
+ }
+}
+#endif
+
+/* XXX: optimize */
+/* NOTE: 'dst' can be equal to 'last' */
+static void png_filter_row(uint8_t *dst, int filter_type,
+ uint8_t *src, uint8_t *last, int size, int bpp)
+{
+ int i, p;
+
+ switch(filter_type) {
+ case PNG_FILTER_VALUE_NONE:
+ memcpy(dst, src, size);
+ break;
+ case PNG_FILTER_VALUE_SUB:
+ for(i = 0; i < bpp; i++) {
+ dst[i] = src[i];
+ }
+ for(i = bpp; i < size; i++) {
+ p = dst[i - bpp];
+ dst[i] = p + src[i];
+ }
+ break;
+ case PNG_FILTER_VALUE_UP:
+ for(i = 0; i < size; i++) {
+ p = last[i];
+ dst[i] = p + src[i];
+ }
+ break;
+ case PNG_FILTER_VALUE_AVG:
+ for(i = 0; i < bpp; i++) {
+ p = (last[i] >> 1);
+ dst[i] = p + src[i];
+ }
+ for(i = bpp; i < size; i++) {
+ p = ((dst[i - bpp] + last[i]) >> 1);
+ dst[i] = p + src[i];
+ }
+ break;
+ case PNG_FILTER_VALUE_PAETH:
+ for(i = 0; i < bpp; i++) {
+ p = last[i];
+ dst[i] = p + src[i];
+ }
+ for(i = bpp; i < size; i++) {
+ int a, b, c, pa, pb, pc;
+
+ a = dst[i - bpp];
+ b = last[i];
+ c = last[i - bpp];
+
+ p = b - c;
+ pc = a - c;
+
+ pa = abs(p);
+ pb = abs(pc);
+ pc = abs(p + pc);
+
+ if (pa <= pb && pa <= pc)
+ p = a;
+ else if (pb <= pc)
+ p = b;
+ else
+ p = c;
+ dst[i] = p + src[i];
+ }
+ break;
+ }
+}
+
+#ifdef CONFIG_ENCODERS
+static void convert_from_rgba32(uint8_t *dst, const uint8_t *src, int width)
+{
+ uint8_t *d;
+ int j;
+ unsigned int v;
+
+ d = dst;
+ for(j = 0; j < width; j++) {
+ v = ((const uint32_t *)src)[j];
+ d[0] = v >> 16;
+ d[1] = v >> 8;
+ d[2] = v;
+ d[3] = v >> 24;
+ d += 4;
+ }
+}
+#endif
+
+#ifdef CONFIG_DECODERS
+static void convert_to_rgba32(uint8_t *dst, const uint8_t *src, int width)
+{
+ int j;
+ unsigned int r, g, b, a;
+
+ for(j = 0;j < width; j++) {
+ r = src[0];
+ g = src[1];
+ b = src[2];
+ a = src[3];
+ *(uint32_t *)dst = (a << 24) | (r << 16) | (g << 8) | b;
+ dst += 4;
+ src += 4;
+ }
+}
+
+/* process exactly one decompressed row */
+static void png_handle_row(PNGContext *s)
+{
+ uint8_t *ptr, *last_row;
+ int got_line;
+
+ if (!s->interlace_type) {
+ ptr = s->image_buf + s->image_linesize * s->y;
+ /* need to swap bytes correctly for RGB_ALPHA */
+ if (s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
+ s->last_row, s->row_size, s->bpp);
+ memcpy(s->last_row, s->tmp_row, s->row_size);
+ convert_to_rgba32(ptr, s->tmp_row, s->width);
+ } else {
+ /* in normal case, we avoid one copy */
+ if (s->y == 0)
+ last_row = s->last_row;
+ else
+ last_row = ptr - s->image_linesize;
+
+ png_filter_row(ptr, s->crow_buf[0], s->crow_buf + 1,
+ last_row, s->row_size, s->bpp);
+ }
+ s->y++;
+ if (s->y == s->height) {
+ s->state |= PNG_ALLIMAGE;
+ }
+ } else {
+ got_line = 0;
+ for(;;) {
+ ptr = s->image_buf + s->image_linesize * s->y;
+ if ((png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
+ /* if we already read one row, it is time to stop to
+ wait for the next one */
+ if (got_line)
+ break;
+ png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
+ s->last_row, s->pass_row_size, s->bpp);
+ memcpy(s->last_row, s->tmp_row, s->pass_row_size);
+ got_line = 1;
+ }
+ if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
+ /* NOTE: rgba32 is handled directly in png_put_interlaced_row */
+ png_put_interlaced_row(ptr, s->width, s->bits_per_pixel, s->pass,
+ s->color_type, s->last_row);
+ }
+ s->y++;
+ if (s->y == s->height) {
+ for(;;) {
+ if (s->pass == NB_PASSES - 1) {
+ s->state |= PNG_ALLIMAGE;
+ goto the_end;
+ } else {
+ s->pass++;
+ s->y = 0;
+ s->pass_row_size = png_pass_row_size(s->pass,
+ s->bits_per_pixel,
+ s->width);
+ s->crow_size = s->pass_row_size + 1;
+ if (s->pass_row_size != 0)
+ break;
+ /* skip pass if empty row */
+ }
+ }
+ }
+ }
+ the_end: ;
+ }
+}
+
+static int png_decode_idat(PNGContext *s, int length)
+{
+ int ret;
+ s->zstream.avail_in = length;
+ s->zstream.next_in = s->bytestream;
+ s->bytestream += length;
+
+ if(s->bytestream > s->bytestream_end)
+ return -1;
+
+ /* decode one line if possible */
+ while (s->zstream.avail_in > 0) {
+ ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
+ if (ret != Z_OK && ret != Z_STREAM_END) {
+ return -1;
+ }
+ if (s->zstream.avail_out == 0) {
+ if (!(s->state & PNG_ALLIMAGE)) {
+ png_handle_row(s);
+ }
+ s->zstream.avail_out = s->crow_size;
+ s->zstream.next_out = s->crow_buf;
+ }
+ }
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ PNGContext * const s = avctx->priv_data;
+ AVFrame *picture = data;
+ AVFrame * const p= (AVFrame*)&s->picture;
+ uint32_t tag, length;
+ int ret, crc;
+
+ s->bytestream_start=
+ s->bytestream= buf;
+ s->bytestream_end= buf + buf_size;
+
+ /* check signature */
+ if (memcmp(s->bytestream, pngsig, 8) != 0)
+ return -1;
+ s->bytestream+= 8;
+ s->y=
+ s->state=0;
+// memset(s, 0, sizeof(PNGContext));
+ /* init the zlib */
+ s->zstream.zalloc = png_zalloc;
+ s->zstream.zfree = png_zfree;
+ s->zstream.opaque = NULL;
+ ret = inflateInit(&s->zstream);
+ if (ret != Z_OK)
+ return -1;
+ for(;;) {
+ int tag32;
+ if (s->bytestream >= s->bytestream_end)
+ goto fail;
+ length = get32(&s->bytestream);
+ if (length > 0x7fffffff)
+ goto fail;
+ tag32 = get32(&s->bytestream);
+ tag = bswap_32(tag32);
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_DEBUG, "png: tag=%c%c%c%c length=%u\n",
+ (tag & 0xff),
+ ((tag >> 8) & 0xff),
+ ((tag >> 16) & 0xff),
+ ((tag >> 24) & 0xff), length);
+#endif
+ switch(tag) {
+ case MKTAG('I', 'H', 'D', 'R'):
+ if (length != 13)
+ goto fail;
+ s->width = get32(&s->bytestream);
+ s->height = get32(&s->bytestream);
+ if(avcodec_check_dimensions(avctx, s->width, s->height)){
+ s->width= s->height= 0;
+ goto fail;
+ }
+ s->bit_depth = *s->bytestream++;
+ s->color_type = *s->bytestream++;
+ s->compression_type = *s->bytestream++;
+ s->filter_type = *s->bytestream++;
+ s->interlace_type = *s->bytestream++;
+ crc = get32(&s->bytestream);
+ s->state |= PNG_IHDR;
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
+ s->width, s->height, s->bit_depth, s->color_type,
+ s->compression_type, s->filter_type, s->interlace_type);
+#endif
+ break;
+ case MKTAG('I', 'D', 'A', 'T'):
+ if (!(s->state & PNG_IHDR))
+ goto fail;
+ if (!(s->state & PNG_IDAT)) {
+ /* init image info */
+ avctx->width = s->width;
+ avctx->height = s->height;
+
+ s->channels = png_get_nb_channels(s->color_type);
+ s->bits_per_pixel = s->bit_depth * s->channels;
+ s->bpp = (s->bits_per_pixel + 7) >> 3;
+ s->row_size = (avctx->width * s->bits_per_pixel + 7) >> 3;
+
+ if (s->bit_depth == 8 &&
+ s->color_type == PNG_COLOR_TYPE_RGB) {
+ avctx->pix_fmt = PIX_FMT_RGB24;
+ } else if (s->bit_depth == 8 &&
+ s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ avctx->pix_fmt = PIX_FMT_RGBA32;
+ } else if (s->bit_depth == 8 &&
+ s->color_type == PNG_COLOR_TYPE_GRAY) {
+ avctx->pix_fmt = PIX_FMT_GRAY8;
+ } else if (s->bit_depth == 16 &&
+ s->color_type == PNG_COLOR_TYPE_GRAY) {
+ avctx->pix_fmt = PIX_FMT_GRAY16BE;
+ } else if (s->bit_depth == 1 &&
+ s->color_type == PNG_COLOR_TYPE_GRAY) {
+ avctx->pix_fmt = PIX_FMT_MONOBLACK;
+ } else if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
+ avctx->pix_fmt = PIX_FMT_PAL8;
+ } else {
+ goto fail;
+ }
+ if(p->data[0])
+ avctx->release_buffer(avctx, p);
+
+ p->reference= 0;
+ if(avctx->get_buffer(avctx, p) < 0){
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ goto fail;
+ }
+ p->pict_type= FF_I_TYPE;
+ p->key_frame= 1;
+ p->interlaced_frame = !!s->interlace_type;
+
+ /* compute the compressed row size */
+ if (!s->interlace_type) {
+ s->crow_size = s->row_size + 1;
+ } else {
+ s->pass = 0;
+ s->pass_row_size = png_pass_row_size(s->pass,
+ s->bits_per_pixel,
+ s->width);
+ s->crow_size = s->pass_row_size + 1;
+ }
+#ifdef DEBUG
+ av_log(avctx, AV_LOG_DEBUG, "row_size=%d crow_size =%d\n",
+ s->row_size, s->crow_size);
+#endif
+ s->image_buf = p->data[0];
+ s->image_linesize = p->linesize[0];
+ /* copy the palette if needed */
+ if (s->color_type == PNG_COLOR_TYPE_PALETTE)
+ memcpy(p->data[1], s->palette, 256 * sizeof(uint32_t));
+ /* empty row is used if differencing to the first row */
+ s->last_row = av_mallocz(s->row_size);
+ if (!s->last_row)
+ goto fail;
+ if (s->interlace_type ||
+ s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ s->tmp_row = av_malloc(s->row_size);
+ if (!s->tmp_row)
+ goto fail;
+ }
+ /* compressed row */
+ s->crow_buf = av_malloc(s->row_size + 1);
+ if (!s->crow_buf)
+ goto fail;
+ s->zstream.avail_out = s->crow_size;
+ s->zstream.next_out = s->crow_buf;
+ }
+ s->state |= PNG_IDAT;
+ if (png_decode_idat(s, length) < 0)
+ goto fail;
+ /* skip crc */
+ crc = get32(&s->bytestream);
+ break;
+ case MKTAG('P', 'L', 'T', 'E'):
+ {
+ int n, i, r, g, b;
+
+ if ((length % 3) != 0 || length > 256 * 3)
+ goto skip_tag;
+ /* read the palette */
+ n = length / 3;
+ for(i=0;i<n;i++) {
+ r = *s->bytestream++;
+ g = *s->bytestream++;
+ b = *s->bytestream++;
+ s->palette[i] = (0xff << 24) | (r << 16) | (g << 8) | b;
+ }
+ for(;i<256;i++) {
+ s->palette[i] = (0xff << 24);
+ }
+ s->state |= PNG_PLTE;
+ crc = get32(&s->bytestream);
+ }
+ break;
+ case MKTAG('t', 'R', 'N', 'S'):
+ {
+ int v, i;
+
+ /* read the transparency. XXX: Only palette mode supported */
+ if (s->color_type != PNG_COLOR_TYPE_PALETTE ||
+ length > 256 ||
+ !(s->state & PNG_PLTE))
+ goto skip_tag;
+ for(i=0;i<length;i++) {
+ v = *s->bytestream++;
+ s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
+ }
+ crc = get32(&s->bytestream);
+ }
+ break;
+ case MKTAG('I', 'E', 'N', 'D'):
+ if (!(s->state & PNG_ALLIMAGE))
+ goto fail;
+ crc = get32(&s->bytestream);
+ goto exit_loop;
+ default:
+ /* skip tag */
+ skip_tag:
+ s->bytestream += length + 4;
+ break;
+ }
+ }
+ exit_loop:
+ *picture= *(AVFrame*)&s->picture;
+ *data_size = sizeof(AVPicture);
+
+ ret = s->bytestream - s->bytestream_start;
+ the_end:
+ inflateEnd(&s->zstream);
+ av_freep(&s->crow_buf);
+ av_freep(&s->last_row);
+ av_freep(&s->tmp_row);
+ return ret;
+ fail:
+ ret = -1;
+ goto the_end;
+}
+#endif
+
+#ifdef CONFIG_ENCODERS
+static void png_write_chunk(uint8_t **f, uint32_t tag,
+ const uint8_t *buf, int length)
+{
+ uint32_t crc;
+ uint8_t tagbuf[4];
+
+ put32(f, length);
+ crc = crc32(0, Z_NULL, 0);
+ tagbuf[0] = tag;
+ tagbuf[1] = tag >> 8;
+ tagbuf[2] = tag >> 16;
+ tagbuf[3] = tag >> 24;
+ crc = crc32(crc, tagbuf, 4);
+ put32(f, bswap_32(tag));
+ if (length > 0) {
+ crc = crc32(crc, buf, length);
+ memcpy(*f, buf, length);
+ *f += length;
+ }
+ put32(f, crc);
+}
+
+/* XXX: use avcodec generic function ? */
+static void to_be32(uint8_t *p, uint32_t v)
+{
+ p[0] = v >> 24;
+ p[1] = v >> 16;
+ p[2] = v >> 8;
+ p[3] = v;
+}
+
+/* XXX: do filtering */
+static int png_write_row(PNGContext *s, const uint8_t *data, int size)
+{
+ int ret;
+
+ s->zstream.avail_in = size;
+ s->zstream.next_in = (uint8_t *)data;
+ while (s->zstream.avail_in > 0) {
+ ret = deflate(&s->zstream, Z_NO_FLUSH);
+ if (ret != Z_OK)
+ return -1;
+ if (s->zstream.avail_out == 0) {
+ if(s->bytestream_end - s->bytestream > IOBUF_SIZE + 100)
+ png_write_chunk(&s->bytestream, MKTAG('I', 'D', 'A', 'T'), s->buf, IOBUF_SIZE);
+ s->zstream.avail_out = IOBUF_SIZE;
+ s->zstream.next_out = s->buf;
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_ENCODERS */
+
+static int common_init(AVCodecContext *avctx){
+ PNGContext *s = avctx->priv_data;
+
+ avcodec_get_frame_defaults((AVFrame*)&s->picture);
+ avctx->coded_frame= (AVFrame*)&s->picture;
+// s->avctx= avctx;
+
+ return 0;
+}
+
+#ifdef CONFIG_ENCODERS
+static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
+ PNGContext *s = avctx->priv_data;
+ AVFrame *pict = data;
+ AVFrame * const p= (AVFrame*)&s->picture;
+ int bit_depth, color_type, y, len, row_size, ret, is_progressive;
+ int bits_per_pixel, pass_row_size;
+ uint8_t *ptr;
+ uint8_t *crow_buf = NULL;
+ uint8_t *tmp_buf = NULL;
+
+ *p = *pict;
+ p->pict_type= FF_I_TYPE;
+ p->key_frame= 1;
+
+ s->bytestream_start=
+ s->bytestream= buf;
+ s->bytestream_end= buf+buf_size;
+
+ is_progressive = !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
+ switch(avctx->pix_fmt) {
+ case PIX_FMT_RGBA32:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_RGB_ALPHA;
+ break;
+ case PIX_FMT_RGB24:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_RGB;
+ break;
+ case PIX_FMT_GRAY8:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case PIX_FMT_MONOBLACK:
+ bit_depth = 1;
+ color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case PIX_FMT_PAL8:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_PALETTE;
+ break;
+ default:
+ return -1;
+ }
+ bits_per_pixel = png_get_nb_channels(color_type) * bit_depth;
+ row_size = (avctx->width * bits_per_pixel + 7) >> 3;
+
+ s->zstream.zalloc = png_zalloc;
+ s->zstream.zfree = png_zfree;
+ s->zstream.opaque = NULL;
+ ret = deflateInit2(&s->zstream, Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED, 15, 8, Z_DEFAULT_STRATEGY);
+ if (ret != Z_OK)
+ return -1;
+ crow_buf = av_malloc(row_size + 1);
+ if (!crow_buf)
+ goto fail;
+ if (is_progressive) {
+ tmp_buf = av_malloc(row_size + 1);
+ if (!tmp_buf)
+ goto fail;
+ }
+
+ /* write png header */
+ memcpy(s->bytestream, pngsig, 8);
+ s->bytestream += 8;
+
+ to_be32(s->buf, avctx->width);
+ to_be32(s->buf + 4, avctx->height);
+ s->buf[8] = bit_depth;
+ s->buf[9] = color_type;
+ s->buf[10] = 0; /* compression type */
+ s->buf[11] = 0; /* filter type */
+ s->buf[12] = is_progressive; /* interlace type */
+
+ png_write_chunk(&s->bytestream, MKTAG('I', 'H', 'D', 'R'), s->buf, 13);
+
+ /* put the palette if needed */
+ if (color_type == PNG_COLOR_TYPE_PALETTE) {
+ int has_alpha, alpha, i;
+ unsigned int v;
+ uint32_t *palette;
+ uint8_t *alpha_ptr;
+
+ palette = (uint32_t *)p->data[1];
+ ptr = s->buf;
+ alpha_ptr = s->buf + 256 * 3;
+ has_alpha = 0;
+ for(i = 0; i < 256; i++) {
+ v = palette[i];
+ alpha = v >> 24;
+ if (alpha && alpha != 0xff)
+ has_alpha = 1;
+ *alpha_ptr++ = alpha;
+ ptr[0] = v >> 16;
+ ptr[1] = v >> 8;
+ ptr[2] = v;
+ ptr += 3;
+ }
+ png_write_chunk(&s->bytestream, MKTAG('P', 'L', 'T', 'E'), s->buf, 256 * 3);
+ if (has_alpha) {
+ png_write_chunk(&s->bytestream, MKTAG('t', 'R', 'N', 'S'), s->buf + 256 * 3, 256);
+ }
+ }
+
+ /* now put each row */
+ s->zstream.avail_out = IOBUF_SIZE;
+ s->zstream.next_out = s->buf;
+ if (is_progressive) {
+ uint8_t *ptr1;
+ int pass;
+
+ for(pass = 0; pass < NB_PASSES; pass++) {
+ /* NOTE: a pass is completely omited if no pixels would be
+ output */
+ pass_row_size = png_pass_row_size(pass, bits_per_pixel, avctx->width);
+ if (pass_row_size > 0) {
+ for(y = 0; y < avctx->height; y++) {
+ if ((png_pass_ymask[pass] << (y & 7)) & 0x80) {
+ ptr = p->data[0] + y * p->linesize[0];
+ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ convert_from_rgba32(tmp_buf, ptr, avctx->width);
+ ptr1 = tmp_buf;
+ } else {
+ ptr1 = ptr;
+ }
+ png_get_interlaced_row(crow_buf + 1, pass_row_size,
+ bits_per_pixel, pass,
+ ptr1, avctx->width);
+ crow_buf[0] = PNG_FILTER_VALUE_NONE;
+ png_write_row(s, crow_buf, pass_row_size + 1);
+ }
+ }
+ }
+ }
+ } else {
+ for(y = 0; y < avctx->height; y++) {
+ ptr = p->data[0] + y * p->linesize[0];
+ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA)
+ convert_from_rgba32(crow_buf + 1, ptr, avctx->width);
+ else
+ memcpy(crow_buf + 1, ptr, row_size);
+ crow_buf[0] = PNG_FILTER_VALUE_NONE;
+ png_write_row(s, crow_buf, row_size + 1);
+ }
+ }
+ /* compress last bytes */
+ for(;;) {
+ ret = deflate(&s->zstream, Z_FINISH);
+ if (ret == Z_OK || ret == Z_STREAM_END) {
+ len = IOBUF_SIZE - s->zstream.avail_out;
+ if (len > 0 && s->bytestream_end - s->bytestream > len + 100) {
+ png_write_chunk(&s->bytestream, MKTAG('I', 'D', 'A', 'T'), s->buf, len);
+ }
+ s->zstream.avail_out = IOBUF_SIZE;
+ s->zstream.next_out = s->buf;
+ if (ret == Z_STREAM_END)
+ break;
+ } else {
+ goto fail;
+ }
+ }
+ png_write_chunk(&s->bytestream, MKTAG('I', 'E', 'N', 'D'), NULL, 0);
+
+ ret = s->bytestream - s->bytestream_start;
+ the_end:
+ av_free(crow_buf);
+ av_free(tmp_buf);
+ deflateEnd(&s->zstream);
+ return ret;
+ fail:
+ ret = -1;
+ goto the_end;
+}
+#endif
+
+#ifdef CONFIG_PNG_DECODER
+AVCodec png_decoder = {
+ "png",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_PNG,
+ sizeof(PNGContext),
+ common_init,
+ NULL,
+ NULL, //decode_end,
+ decode_frame,
+ 0 /*CODEC_CAP_DR1*/ /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
+ NULL
+};
+#endif
+
+#ifdef CONFIG_PNG_ENCODER
+AVCodec png_encoder = {
+ "png",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_PNG,
+ sizeof(PNGContext),
+ common_init,
+ encode_frame,
+ NULL, //encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_PAL8, PIX_FMT_GRAY8, PIX_FMT_MONOBLACK, -1},
+};
+#endif // CONFIG_PNG_ENCODER
+#endif
diff --git a/contrib/ffmpeg/libavcodec/pnm.c b/contrib/ffmpeg/libavcodec/pnm.c
new file mode 100644
index 000000000..610bb28be
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/pnm.c
@@ -0,0 +1,606 @@
+/*
+ * PNM image format
+ * Copyright (c) 2002, 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avcodec.h"
+#include "parser.h" //for ParseContext
+
+typedef struct PNMContext {
+ uint8_t *bytestream;
+ uint8_t *bytestream_start;
+ uint8_t *bytestream_end;
+ AVFrame picture;
+} PNMContext;
+
+static inline int pnm_space(int c)
+{
+ return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
+}
+
+static void pnm_get(PNMContext *sc, char *str, int buf_size)
+{
+ char *s;
+ int c;
+
+ /* skip spaces and comments */
+ for(;;) {
+ c = *sc->bytestream++;
+ if (c == '#') {
+ do {
+ c = *sc->bytestream++;
+ } while (c != '\n' && sc->bytestream < sc->bytestream_end);
+ } else if (!pnm_space(c)) {
+ break;
+ }
+ }
+
+ s = str;
+ while (sc->bytestream < sc->bytestream_end && !pnm_space(c)) {
+ if ((s - str) < buf_size - 1)
+ *s++ = c;
+ c = *sc->bytestream++;
+ }
+ *s = '\0';
+}
+
+static int common_init(AVCodecContext *avctx){
+ PNMContext *s = avctx->priv_data;
+
+ avcodec_get_frame_defaults((AVFrame*)&s->picture);
+ avctx->coded_frame= (AVFrame*)&s->picture;
+
+ return 0;
+}
+
+static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){
+ char buf1[32], tuple_type[32];
+ int h, w, depth, maxval;
+
+ pnm_get(s, buf1, sizeof(buf1));
+ if (!strcmp(buf1, "P4")) {
+ avctx->pix_fmt = PIX_FMT_MONOWHITE;
+ } else if (!strcmp(buf1, "P5")) {
+ if (avctx->codec_id == CODEC_ID_PGMYUV)
+ avctx->pix_fmt = PIX_FMT_YUV420P;
+ else
+ avctx->pix_fmt = PIX_FMT_GRAY8;
+ } else if (!strcmp(buf1, "P6")) {
+ avctx->pix_fmt = PIX_FMT_RGB24;
+ } else if (!strcmp(buf1, "P7")) {
+ w = -1;
+ h = -1;
+ maxval = -1;
+ depth = -1;
+ tuple_type[0] = '\0';
+ for(;;) {
+ pnm_get(s, buf1, sizeof(buf1));
+ if (!strcmp(buf1, "WIDTH")) {
+ pnm_get(s, buf1, sizeof(buf1));
+ w = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "HEIGHT")) {
+ pnm_get(s, buf1, sizeof(buf1));
+ h = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "DEPTH")) {
+ pnm_get(s, buf1, sizeof(buf1));
+ depth = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "MAXVAL")) {
+ pnm_get(s, buf1, sizeof(buf1));
+ maxval = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "TUPLETYPE")) {
+ pnm_get(s, tuple_type, sizeof(tuple_type));
+ } else if (!strcmp(buf1, "ENDHDR")) {
+ break;
+ } else {
+ return -1;
+ }
+ }
+ /* check that all tags are present */
+ if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || avcodec_check_dimensions(avctx, w, h))
+ return -1;
+
+ avctx->width = w;
+ avctx->height = h;
+ if (depth == 1) {
+ if (maxval == 1)
+ avctx->pix_fmt = PIX_FMT_MONOWHITE;
+ else
+ avctx->pix_fmt = PIX_FMT_GRAY8;
+ } else if (depth == 3) {
+ avctx->pix_fmt = PIX_FMT_RGB24;
+ } else if (depth == 4) {
+ avctx->pix_fmt = PIX_FMT_RGBA32;
+ } else {
+ return -1;
+ }
+ return 0;
+ } else {
+ return -1;
+ }
+ pnm_get(s, buf1, sizeof(buf1));
+ avctx->width = atoi(buf1);
+ if (avctx->width <= 0)
+ return -1;
+ pnm_get(s, buf1, sizeof(buf1));
+ avctx->height = atoi(buf1);
+ if(avcodec_check_dimensions(avctx, avctx->width, avctx->height))
+ return -1;
+ if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
+ pnm_get(s, buf1, sizeof(buf1));
+ if(atoi(buf1) == 65535 && avctx->pix_fmt == PIX_FMT_GRAY8)
+ avctx->pix_fmt = PIX_FMT_GRAY16BE;
+ }
+ /* more check if YUV420 */
+ if (avctx->pix_fmt == PIX_FMT_YUV420P) {
+ if ((avctx->width & 1) != 0)
+ return -1;
+ h = (avctx->height * 2);
+ if ((h % 3) != 0)
+ return -1;
+ h /= 3;
+ avctx->height = h;
+ }
+ return 0;
+}
+
+static int pnm_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ PNMContext * const s = avctx->priv_data;
+ AVFrame *picture = data;
+ AVFrame * const p= (AVFrame*)&s->picture;
+ int i, n, linesize, h;
+ unsigned char *ptr;
+
+ s->bytestream_start=
+ s->bytestream= buf;
+ s->bytestream_end= buf + buf_size;
+
+ if(pnm_decode_header(avctx, s) < 0)
+ return -1;
+
+ if(p->data[0])
+ avctx->release_buffer(avctx, p);
+
+ p->reference= 0;
+ if(avctx->get_buffer(avctx, p) < 0){
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+ p->pict_type= FF_I_TYPE;
+ p->key_frame= 1;
+
+ switch(avctx->pix_fmt) {
+ default:
+ return -1;
+ case PIX_FMT_RGB24:
+ n = avctx->width * 3;
+ goto do_read;
+ case PIX_FMT_GRAY8:
+ n = avctx->width;
+ goto do_read;
+ case PIX_FMT_GRAY16BE:
+ n = avctx->width * 2;
+ goto do_read;
+ case PIX_FMT_MONOWHITE:
+ case PIX_FMT_MONOBLACK:
+ n = (avctx->width + 7) >> 3;
+ do_read:
+ ptr = p->data[0];
+ linesize = p->linesize[0];
+ if(s->bytestream + n*avctx->height > s->bytestream_end)
+ return -1;
+ for(i = 0; i < avctx->height; i++) {
+ memcpy(ptr, s->bytestream, n);
+ s->bytestream += n;
+ ptr += linesize;
+ }
+ break;
+ case PIX_FMT_YUV420P:
+ {
+ unsigned char *ptr1, *ptr2;
+
+ n = avctx->width;
+ ptr = p->data[0];
+ linesize = p->linesize[0];
+ if(s->bytestream + n*avctx->height*3/2 > s->bytestream_end)
+ return -1;
+ for(i = 0; i < avctx->height; i++) {
+ memcpy(ptr, s->bytestream, n);
+ s->bytestream += n;
+ ptr += linesize;
+ }
+ ptr1 = p->data[1];
+ ptr2 = p->data[2];
+ n >>= 1;
+ h = avctx->height >> 1;
+ for(i = 0; i < h; i++) {
+ memcpy(ptr1, s->bytestream, n);
+ s->bytestream += n;
+ memcpy(ptr2, s->bytestream, n);
+ s->bytestream += n;
+ ptr1 += p->linesize[1];
+ ptr2 += p->linesize[2];
+ }
+ }
+ break;
+ case PIX_FMT_RGBA32:
+ ptr = p->data[0];
+ linesize = p->linesize[0];
+ if(s->bytestream + avctx->width*avctx->height*4 > s->bytestream_end)
+ return -1;
+ for(i = 0; i < avctx->height; i++) {
+ int j, r, g, b, a;
+
+ for(j = 0;j < avctx->width; j++) {
+ r = *s->bytestream++;
+ g = *s->bytestream++;
+ b = *s->bytestream++;
+ a = *s->bytestream++;
+ ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
+ }
+ ptr += linesize;
+ }
+ break;
+ }
+ *picture= *(AVFrame*)&s->picture;
+ *data_size = sizeof(AVPicture);
+
+ return s->bytestream - s->bytestream_start;
+}
+
+static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
+ PNMContext *s = avctx->priv_data;
+ AVFrame *pict = data;
+ AVFrame * const p= (AVFrame*)&s->picture;
+ int i, h, h1, c, n, linesize;
+ uint8_t *ptr, *ptr1, *ptr2;
+
+ if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
+ av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ return -1;
+ }
+
+ *p = *pict;
+ p->pict_type= FF_I_TYPE;
+ p->key_frame= 1;
+
+ s->bytestream_start=
+ s->bytestream= outbuf;
+ s->bytestream_end= outbuf+buf_size;
+
+ h = avctx->height;
+ h1 = h;
+ switch(avctx->pix_fmt) {
+ case PIX_FMT_MONOWHITE:
+ c = '4';
+ n = (avctx->width + 7) >> 3;
+ break;
+ case PIX_FMT_GRAY8:
+ c = '5';
+ n = avctx->width;
+ break;
+ case PIX_FMT_GRAY16BE:
+ c = '5';
+ n = avctx->width * 2;
+ break;
+ case PIX_FMT_RGB24:
+ c = '6';
+ n = avctx->width * 3;
+ break;
+ case PIX_FMT_YUV420P:
+ c = '5';
+ n = avctx->width;
+ h1 = (h * 3) / 2;
+ break;
+ default:
+ return -1;
+ }
+ snprintf(s->bytestream, s->bytestream_end - s->bytestream,
+ "P%c\n%d %d\n",
+ c, avctx->width, h1);
+ s->bytestream += strlen(s->bytestream);
+ if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
+ snprintf(s->bytestream, s->bytestream_end - s->bytestream,
+ "%d\n", (avctx->pix_fmt != PIX_FMT_GRAY16BE) ? 255 : 65535);
+ s->bytestream += strlen(s->bytestream);
+ }
+
+ ptr = p->data[0];
+ linesize = p->linesize[0];
+ for(i=0;i<h;i++) {
+ memcpy(s->bytestream, ptr, n);
+ s->bytestream += n;
+ ptr += linesize;
+ }
+
+ if (avctx->pix_fmt == PIX_FMT_YUV420P) {
+ h >>= 1;
+ n >>= 1;
+ ptr1 = p->data[1];
+ ptr2 = p->data[2];
+ for(i=0;i<h;i++) {
+ memcpy(s->bytestream, ptr1, n);
+ s->bytestream += n;
+ memcpy(s->bytestream, ptr2, n);
+ s->bytestream += n;
+ ptr1 += p->linesize[1];
+ ptr2 += p->linesize[2];
+ }
+ }
+ return s->bytestream - s->bytestream_start;
+}
+
+static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){
+ PNMContext *s = avctx->priv_data;
+ AVFrame *pict = data;
+ AVFrame * const p= (AVFrame*)&s->picture;
+ int i, h, w, n, linesize, depth, maxval;
+ const char *tuple_type;
+ uint8_t *ptr;
+
+ if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){
+ av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ return -1;
+ }
+
+ *p = *pict;
+ p->pict_type= FF_I_TYPE;
+ p->key_frame= 1;
+
+ s->bytestream_start=
+ s->bytestream= outbuf;
+ s->bytestream_end= outbuf+buf_size;
+
+ h = avctx->height;
+ w = avctx->width;
+ switch(avctx->pix_fmt) {
+ case PIX_FMT_MONOWHITE:
+ n = (w + 7) >> 3;
+ depth = 1;
+ maxval = 1;
+ tuple_type = "BLACKANDWHITE";
+ break;
+ case PIX_FMT_GRAY8:
+ n = w;
+ depth = 1;
+ maxval = 255;
+ tuple_type = "GRAYSCALE";
+ break;
+ case PIX_FMT_RGB24:
+ n = w * 3;
+ depth = 3;
+ maxval = 255;
+ tuple_type = "RGB";
+ break;
+ case PIX_FMT_RGBA32:
+ n = w * 4;
+ depth = 4;
+ maxval = 255;
+ tuple_type = "RGB_ALPHA";
+ break;
+ default:
+ return -1;
+ }
+ snprintf(s->bytestream, s->bytestream_end - s->bytestream,
+ "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
+ w, h, depth, maxval, tuple_type);
+ s->bytestream += strlen(s->bytestream);
+
+ ptr = p->data[0];
+ linesize = p->linesize[0];
+
+ if (avctx->pix_fmt == PIX_FMT_RGBA32) {
+ int j;
+ unsigned int v;
+
+ for(i=0;i<h;i++) {
+ for(j=0;j<w;j++) {
+ v = ((uint32_t *)ptr)[j];
+ *s->bytestream++ = v >> 16;
+ *s->bytestream++ = v >> 8;
+ *s->bytestream++ = v;
+ *s->bytestream++ = v >> 24;
+ }
+ ptr += linesize;
+ }
+ } else {
+ for(i=0;i<h;i++) {
+ memcpy(s->bytestream, ptr, n);
+ s->bytestream += n;
+ ptr += linesize;
+ }
+ }
+ return s->bytestream - s->bytestream_start;
+}
+
+#if 0
+static int pnm_probe(AVProbeData *pd)
+{
+ const char *p = pd->buf;
+ if (pd->buf_size >= 8 &&
+ p[0] == 'P' &&
+ p[1] >= '4' && p[1] <= '6' &&
+ pnm_space(p[2]) )
+ return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
+ else
+ return 0;
+}
+
+static int pgmyuv_probe(AVProbeData *pd)
+{
+ if (match_ext(pd->filename, "pgmyuv"))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int pam_probe(AVProbeData *pd)
+{
+ const char *p = pd->buf;
+ if (pd->buf_size >= 8 &&
+ p[0] == 'P' &&
+ p[1] == '7' &&
+ p[2] == '\n')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PNM_PARSER
+static int pnm_parse(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size)
+{
+ ParseContext *pc = s->priv_data;
+ PNMContext pnmctx;
+ int next;
+
+ for(; pc->overread>0; pc->overread--){
+ pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
+ }
+retry:
+ if(pc->index){
+ pnmctx.bytestream_start=
+ pnmctx.bytestream= pc->buffer;
+ pnmctx.bytestream_end= pc->buffer + pc->index;
+ }else{
+ pnmctx.bytestream_start=
+ pnmctx.bytestream= (uint8_t *) buf; /* casts avoid warnings */
+ pnmctx.bytestream_end= (uint8_t *) buf + buf_size;
+ }
+ if(pnm_decode_header(avctx, &pnmctx) < 0){
+ if(pnmctx.bytestream < pnmctx.bytestream_end){
+ if(pc->index){
+ pc->index=0;
+ }else{
+ buf++;
+ buf_size--;
+ }
+ goto retry;
+ }
+#if 0
+ if(pc->index && pc->index*2 + FF_INPUT_BUFFER_PADDING_SIZE < pc->buffer_size && buf_size > pc->index){
+ memcpy(pc->buffer + pc->index, buf, pc->index);
+ pc->index += pc->index;
+ buf += pc->index;
+ buf_size -= pc->index;
+ goto retry;
+ }
+#endif
+ next= END_NOT_FOUND;
+ }else{
+ next= pnmctx.bytestream - pnmctx.bytestream_start
+ + avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
+ if(pnmctx.bytestream_start!=buf)
+ next-= pc->index;
+ if(next > buf_size)
+ next= END_NOT_FOUND;
+ }
+
+ if(ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size)<0){
+ *poutbuf = NULL;
+ *poutbuf_size = 0;
+ return buf_size;
+ }
+ *poutbuf = (uint8_t *)buf;
+ *poutbuf_size = buf_size;
+ return next;
+}
+
+AVCodecParser pnm_parser = {
+ { CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PPM, CODEC_ID_PBM, CODEC_ID_PAM},
+ sizeof(ParseContext),
+ NULL,
+ pnm_parse,
+ ff_parse_close,
+};
+#endif /* CONFIG_PNM_PARSER */
+
+#ifdef CONFIG_PGM_ENCODER
+AVCodec pgm_encoder = {
+ "pgm",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_PGM,
+ sizeof(PNMContext),
+ common_init,
+ pnm_encode_frame,
+ NULL, //encode_end,
+ pnm_decode_frame,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, PIX_FMT_GRAY16BE, -1},
+};
+#endif // CONFIG_PGM_ENCODER
+
+#ifdef CONFIG_PGMYUV_ENCODER
+AVCodec pgmyuv_encoder = {
+ "pgmyuv",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_PGMYUV,
+ sizeof(PNMContext),
+ common_init,
+ pnm_encode_frame,
+ NULL, //encode_end,
+ pnm_decode_frame,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+};
+#endif // CONFIG_PGMYUV_ENCODER
+
+#ifdef CONFIG_PPM_ENCODER
+AVCodec ppm_encoder = {
+ "ppm",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_PPM,
+ sizeof(PNMContext),
+ common_init,
+ pnm_encode_frame,
+ NULL, //encode_end,
+ pnm_decode_frame,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, -1},
+};
+#endif // CONFIG_PPM_ENCODER
+
+#ifdef CONFIG_PBM_ENCODER
+AVCodec pbm_encoder = {
+ "pbm",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_PBM,
+ sizeof(PNMContext),
+ common_init,
+ pnm_encode_frame,
+ NULL, //encode_end,
+ pnm_decode_frame,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_MONOWHITE, -1},
+};
+#endif // CONFIG_PBM_ENCODER
+
+#ifdef CONFIG_PAM_ENCODER
+AVCodec pam_encoder = {
+ "pam",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_PAM,
+ sizeof(PNMContext),
+ common_init,
+ pam_encode_frame,
+ NULL, //encode_end,
+ pnm_decode_frame,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1},
+};
+#endif // CONFIG_PAM_ENCODER
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c b/contrib/ffmpeg/libavcodec/ppc/dsputil_altivec.c
index 81a32c9e3..6f48893a4 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c
+++ b/contrib/ffmpeg/libavcodec/ppc/dsputil_altivec.c
@@ -3,18 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -616,61 +618,28 @@ void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
}
void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- int i;
- for(i=0; i+7<w; i++){
- dst[i+0] += src[i+0];
- dst[i+1] += src[i+1];
- dst[i+2] += src[i+2];
- dst[i+3] += src[i+3];
- dst[i+4] += src[i+4];
- dst[i+5] += src[i+5];
- dst[i+6] += src[i+6];
- dst[i+7] += src[i+7];
- }
- for(; i<w; i++)
- dst[i+0] += src[i+0];
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
register int i;
register vector unsigned char vdst, vsrc;
/* dst and src are 16 bytes-aligned (guaranteed) */
- for(i = 0 ; (i + 15) < w ; i++)
+ for(i = 0 ; (i + 15) < w ; i+=16)
{
- vdst = vec_ld(i << 4, (unsigned char*)dst);
- vsrc = vec_ld(i << 4, (unsigned char*)src);
+ vdst = vec_ld(i, (unsigned char*)dst);
+ vsrc = vec_ld(i, (unsigned char*)src);
vdst = vec_add(vsrc, vdst);
- vec_st(vdst, i << 4, (unsigned char*)dst);
+ vec_st(vdst, i, (unsigned char*)dst);
}
/* if w is not a multiple of 16 */
for (; (i < w) ; i++)
{
dst[i] = src[i];
}
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
/* next one assumes that ((line_size % 16) == 0) */
void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_put_pixels16_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- int i;
-
-POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
-
- for(i=0; i<h; i++) {
- *((uint32_t*)(block)) = LD32(pixels);
- *((uint32_t*)(block+4)) = LD32(pixels+4);
- *((uint32_t*)(block+8)) = LD32(pixels+8);
- *((uint32_t*)(block+12)) = LD32(pixels+12);
- pixels+=line_size;
- block +=line_size;
- }
-
-POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
-
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
register vector unsigned char pixelsv1, pixelsv2;
register vector unsigned char pixelsv1B, pixelsv2B;
register vector unsigned char pixelsv1C, pixelsv2C;
@@ -700,13 +669,13 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
#else
for(i=0; i<h; i+=4) {
pixelsv1 = vec_ld(0, (unsigned char*)pixels);
- pixelsv2 = vec_ld(16, (unsigned char*)pixels);
+ pixelsv2 = vec_ld(15, (unsigned char*)pixels);
pixelsv1B = vec_ld(line_size, (unsigned char*)pixels);
- pixelsv2B = vec_ld(16 + line_size, (unsigned char*)pixels);
+ pixelsv2B = vec_ld(15 + line_size, (unsigned char*)pixels);
pixelsv1C = vec_ld(line_size_2, (unsigned char*)pixels);
- pixelsv2C = vec_ld(16 + line_size_2, (unsigned char*)pixels);
+ pixelsv2C = vec_ld(15 + line_size_2, (unsigned char*)pixels);
pixelsv1D = vec_ld(line_size_3, (unsigned char*)pixels);
- pixelsv2D = vec_ld(16 + line_size_3, (unsigned char*)pixels);
+ pixelsv2D = vec_ld(15 + line_size_3, (unsigned char*)pixels);
vec_st(vec_perm(pixelsv1, pixelsv2, perm),
0, (unsigned char*)block);
vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
@@ -720,8 +689,6 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_num, 1);
}
#endif
POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
-
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
/* next one assumes that ((line_size % 16) == 0) */
@@ -729,23 +696,6 @@ POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_num, 1);
void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_avg_pixels16_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- int i;
-
-POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
-
- for(i=0; i<h; i++) {
- op_avg(*((uint32_t*)(block)),LD32(pixels));
- op_avg(*((uint32_t*)(block+4)),LD32(pixels+4));
- op_avg(*((uint32_t*)(block+8)),LD32(pixels+8));
- op_avg(*((uint32_t*)(block+12)),LD32(pixels+12));
- pixels+=line_size;
- block +=line_size;
- }
-
-POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
-
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
register vector unsigned char perm = vec_lvsl(0, pixels);
int i;
@@ -764,37 +714,12 @@ POWERPC_PERF_START_COUNT(altivec_avg_pixels16_num, 1);
}
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels16_num, 1);
-
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
/* next one assumes that ((line_size % 8) == 0) */
void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_avg_pixels8_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- int i;
-POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
- for (i = 0; i < h; i++) {
- *((uint32_t *) (block)) =
- (((*((uint32_t *) (block))) |
- ((((const struct unaligned_32 *) (pixels))->l))) -
- ((((*((uint32_t *) (block))) ^
- ((((const struct unaligned_32 *) (pixels))->
- l))) & 0xFEFEFEFEUL) >> 1));
- *((uint32_t *) (block + 4)) =
- (((*((uint32_t *) (block + 4))) |
- ((((const struct unaligned_32 *) (pixels + 4))->l))) -
- ((((*((uint32_t *) (block + 4))) ^
- ((((const struct unaligned_32 *) (pixels +
- 4))->
- l))) & 0xFEFEFEFEUL) >> 1));
- pixels += line_size;
- block += line_size;
- }
-POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
-
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
int i;
@@ -830,52 +755,12 @@ POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1);
}
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1);
-
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
/* next one assumes that ((line_size % 8) == 0) */
void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- int j;
-POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
- for (j = 0; j < 2; j++) {
- int i;
- const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- const uint32_t b =
- (((const struct unaligned_32 *) (pixels + 1))->l);
- uint32_t l0 =
- (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
- uint32_t h0 =
- ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- uint32_t l1, h1;
- pixels += line_size;
- for (i = 0; i < h; i += 2) {
- uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
- h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) =
- h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
- pixels += line_size;
- block += line_size;
- a = (((const struct unaligned_32 *) (pixels))->l);
- b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
- h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) =
- h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
- pixels += line_size;
- block += line_size;
- } pixels += 4 - line_size * (h + 1);
- block += 4 - line_size * h;
- }
-
-POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
-
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
register int i;
register vector unsigned char
pixelsv1, pixelsv2,
@@ -946,51 +831,12 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1);
}
POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
/* next one assumes that ((line_size % 8) == 0) */
void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- int j;
-POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
- for (j = 0; j < 2; j++) {
- int i;
- const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- const uint32_t b =
- (((const struct unaligned_32 *) (pixels + 1))->l);
- uint32_t l0 =
- (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
- uint32_t h0 =
- ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- uint32_t l1, h1;
- pixels += line_size;
- for (i = 0; i < h; i += 2) {
- uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
- h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) =
- h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
- pixels += line_size;
- block += line_size;
- a = (((const struct unaligned_32 *) (pixels))->l);
- b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
- h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) =
- h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
- pixels += line_size;
- block += line_size;
- } pixels += 4 - line_size * (h + 1);
- block += 4 - line_size * h;
- }
-
-POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
-
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
register int i;
register vector unsigned char
pixelsv1, pixelsv2,
@@ -1062,51 +908,12 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
}
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
/* next one assumes that ((line_size % 16) == 0) */
void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- int j;
-POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
- for (j = 0; j < 4; j++) {
- int i;
- const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- const uint32_t b =
- (((const struct unaligned_32 *) (pixels + 1))->l);
- uint32_t l0 =
- (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
- uint32_t h0 =
- ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- uint32_t l1, h1;
- pixels += line_size;
- for (i = 0; i < h; i += 2) {
- uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
- h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) =
- h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
- pixels += line_size;
- block += line_size;
- a = (((const struct unaligned_32 *) (pixels))->l);
- b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
- h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) =
- h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
- pixels += line_size;
- block += line_size;
- } pixels += 4 - line_size * (h + 1);
- block += 4 - line_size * h;
- }
-
-POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
-
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
register int i;
register vector unsigned char
pixelsv1, pixelsv2, pixelsv3, pixelsv4;
@@ -1183,51 +990,12 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1);
}
POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
/* next one assumes that ((line_size % 16) == 0) */
void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- int j;
-POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
- for (j = 0; j < 4; j++) {
- int i;
- const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- const uint32_t b =
- (((const struct unaligned_32 *) (pixels + 1))->l);
- uint32_t l0 =
- (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
- uint32_t h0 =
- ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- uint32_t l1, h1;
- pixels += line_size;
- for (i = 0; i < h; i += 2) {
- uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
- h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) =
- h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
- pixels += line_size;
- block += line_size;
- a = (((const struct unaligned_32 *) (pixels))->l);
- b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
- h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) =
- h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
- pixels += line_size;
- block += line_size;
- } pixels += 4 - line_size * (h + 1);
- block += 4 - line_size * h;
- }
-
-POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
-
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
register int i;
register vector unsigned char
pixelsv1, pixelsv2, pixelsv3, pixelsv4;
@@ -1305,34 +1073,32 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
}
POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
- int sum;
- register const_vector unsigned char vzero = (const_vector unsigned char)vec_splat_u8(0);
- register vector signed short temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+ int sum;
+ register const_vector unsigned char vzero =
+ (const_vector unsigned char)vec_splat_u8(0);
+ register vector signed short temp0, temp1, temp2, temp3, temp4,
+ temp5, temp6, temp7;
POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
{
- register const_vector signed short vprod1 = (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
- register const_vector signed short vprod2 = (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
- register const_vector signed short vprod3 = (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
+ register const_vector signed short vprod1 =(const_vector signed short)
+ AVV( 1,-1, 1,-1, 1,-1, 1,-1);
+ register const_vector signed short vprod2 =(const_vector signed short)
+ AVV( 1, 1,-1,-1, 1, 1,-1,-1);
+ register const_vector signed short vprod3 =(const_vector signed short)
+ AVV( 1, 1, 1, 1,-1,-1,-1,-1);
register const_vector unsigned char perm1 = (const_vector unsigned char)
- AVV(0x02, 0x03, 0x00, 0x01,
- 0x06, 0x07, 0x04, 0x05,
- 0x0A, 0x0B, 0x08, 0x09,
- 0x0E, 0x0F, 0x0C, 0x0D);
+ AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
+ 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
register const_vector unsigned char perm2 = (const_vector unsigned char)
- AVV(0x04, 0x05, 0x06, 0x07,
- 0x00, 0x01, 0x02, 0x03,
- 0x0C, 0x0D, 0x0E, 0x0F,
- 0x08, 0x09, 0x0A, 0x0B);
+ AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
register const_vector unsigned char perm3 = (const_vector unsigned char)
- AVV(0x08, 0x09, 0x0A, 0x0B,
- 0x0C, 0x0D, 0x0E, 0x0F,
- 0x00, 0x01, 0x02, 0x03,
- 0x04, 0x05, 0x06, 0x07);
+ AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
#define ONEITERBUTTERFLY(i, res) \
{ \
@@ -1443,45 +1209,46 @@ POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1);
*/
static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
- int sum;
- register vector signed short
- temp0 REG_v(v0),
- temp1 REG_v(v1),
- temp2 REG_v(v2),
- temp3 REG_v(v3),
- temp4 REG_v(v4),
- temp5 REG_v(v5),
- temp6 REG_v(v6),
- temp7 REG_v(v7);
- register vector signed short
- temp0S REG_v(v8),
- temp1S REG_v(v9),
- temp2S REG_v(v10),
- temp3S REG_v(v11),
- temp4S REG_v(v12),
- temp5S REG_v(v13),
- temp6S REG_v(v14),
- temp7S REG_v(v15);
- register const_vector unsigned char vzero REG_v(v31)= (const_vector unsigned char)vec_splat_u8(0);
+ int sum;
+ register vector signed short
+ temp0 REG_v(v0),
+ temp1 REG_v(v1),
+ temp2 REG_v(v2),
+ temp3 REG_v(v3),
+ temp4 REG_v(v4),
+ temp5 REG_v(v5),
+ temp6 REG_v(v6),
+ temp7 REG_v(v7);
+ register vector signed short
+ temp0S REG_v(v8),
+ temp1S REG_v(v9),
+ temp2S REG_v(v10),
+ temp3S REG_v(v11),
+ temp4S REG_v(v12),
+ temp5S REG_v(v13),
+ temp6S REG_v(v14),
+ temp7S REG_v(v15);
+ register const_vector unsigned char vzero REG_v(v31)=
+ (const_vector unsigned char)vec_splat_u8(0);
{
- register const_vector signed short vprod1 REG_v(v16)= (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
- register const_vector signed short vprod2 REG_v(v17)= (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
- register const_vector signed short vprod3 REG_v(v18)= (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
- register const_vector unsigned char perm1 REG_v(v19)= (const_vector unsigned char)
- AVV(0x02, 0x03, 0x00, 0x01,
- 0x06, 0x07, 0x04, 0x05,
- 0x0A, 0x0B, 0x08, 0x09,
- 0x0E, 0x0F, 0x0C, 0x0D);
- register const_vector unsigned char perm2 REG_v(v20)= (const_vector unsigned char)
- AVV(0x04, 0x05, 0x06, 0x07,
- 0x00, 0x01, 0x02, 0x03,
- 0x0C, 0x0D, 0x0E, 0x0F,
- 0x08, 0x09, 0x0A, 0x0B);
- register const_vector unsigned char perm3 REG_v(v21)= (const_vector unsigned char)
- AVV(0x08, 0x09, 0x0A, 0x0B,
- 0x0C, 0x0D, 0x0E, 0x0F,
- 0x00, 0x01, 0x02, 0x03,
- 0x04, 0x05, 0x06, 0x07);
+ register const_vector signed short vprod1 REG_v(v16)=
+ (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
+ register const_vector signed short vprod2 REG_v(v17)=
+ (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
+ register const_vector signed short vprod3 REG_v(v18)=
+ (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
+ register const_vector unsigned char perm1 REG_v(v19)=
+ (const_vector unsigned char)
+ AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
+ 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
+ register const_vector unsigned char perm2 REG_v(v20)=
+ (const_vector unsigned char)
+ AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
+ register const_vector unsigned char perm3 REG_v(v21)=
+ (const_vector unsigned char)
+ AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
#define ONEITERBUTTERFLY(i, res1, res2) \
{ \
@@ -1642,27 +1409,27 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst,
int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
- int score;
+ int score;
POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
- score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
- if (h==16) {
- dst += 8*stride;
- src += 8*stride;
- score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
- }
+ score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
+ if (h==16) {
+ dst += 8*stride;
+ src += 8*stride;
+ score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
+ }
POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
- return score;
+ return score;
}
int has_altivec(void)
{
#ifdef __AMIGAOS4__
- ULONG result = 0;
- extern struct ExecIFace *IExec;
+ ULONG result = 0;
+ extern struct ExecIFace *IExec;
- IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE);
- if (result == VECTORTYPE_ALTIVEC) return 1;
- return 0;
+ IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE);
+ if (result == VECTORTYPE_ALTIVEC) return 1;
+ return 0;
#else /* __AMIGAOS4__ */
#ifdef CONFIG_DARWIN
@@ -1698,112 +1465,127 @@ int has_altivec(void)
#endif /* __AMIGAOS4__ */
}
+static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
+ int blocksize)
+{
+ int i;
+ vector float m, a;
+ vector bool int t0, t1;
+ const vector unsigned int v_31 = //XXX
+ vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1));
+ for(i=0; i<blocksize; i+=4) {
+ m = vec_ld(0, mag+i);
+ a = vec_ld(0, ang+i);
+ t0 = vec_cmple(m, (vector float)vec_splat_u32(0));
+ t1 = vec_cmple(a, (vector float)vec_splat_u32(0));
+ a = vec_xor(a, (vector float) vec_sl((vector unsigned int)t0, v_31));
+ t0 = (vector bool int)vec_and(a, t1);
+ t1 = (vector bool int)vec_andc(a, t1);
+ a = vec_sub(m, (vector float)t1);
+ m = vec_add(m, (vector float)t0);
+ vec_stl(a, 0, ang+i);
+ vec_stl(m, 0, mag+i);
+ }
+}
+
/* next one assumes that ((line_size % 8) == 0) */
void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-
- int j;
-POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
- for (j = 0; j < 2; j++) {
- int i;
- const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- const uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
- uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
- uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- uint32_t l1, h1;
- pixels += line_size;
- for (i = 0; i < h; i += 2) {
- uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
- uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
- h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
- pixels += line_size;
- block += line_size;
- a = (((const struct unaligned_32 *) (pixels))->l);
- b = (((const struct unaligned_32 *) (pixels + 1))->l);
- l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
- h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
- *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
- pixels += line_size;
- block += line_size;
- } pixels += 4 - line_size * (h + 1);
- block += 4 - line_size * h;
- }
-POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
- register int i;
- register vector unsigned char
- pixelsv1, pixelsv2,
- pixelsavg;
- register vector unsigned char
- blockv, temp1, temp2, blocktemp;
- register vector unsigned short
- pixelssum1, pixelssum2, temp3;
- register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
- register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
-
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
- {
- pixelsv2 = temp2;
- }
- else
- {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
- pixelssum1 = vec_add((vector unsigned short)pixelsv1,
- (vector unsigned short)pixelsv2);
- pixelssum1 = vec_add(pixelssum1, vctwo);
+ register int i;
+ register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
+ register vector unsigned char blockv, temp1, temp2, blocktemp;
+ register vector unsigned short pixelssum1, pixelssum2, temp3;
+
+ register const_vector unsigned char vczero = (const_vector unsigned char)
+ vec_splat_u8(0);
+ register const_vector unsigned short vctwo = (const_vector unsigned short)
+ vec_splat_u16(2);
+
+ temp1 = vec_ld(0, pixels);
+ temp2 = vec_ld(16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
+ pixelsv2 = temp2;
+ } else {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
+ }
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ pixelssum1 = vec_add(pixelssum1, vctwo);
POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
- for (i = 0; i < h ; i++) {
- int rightside = ((unsigned long)block & 0x0000000F);
- blockv = vec_ld(0, block);
+ for (i = 0; i < h ; i++) {
+ int rightside = ((unsigned long)block & 0x0000000F);
+ blockv = vec_ld(0, block);
+
+ temp1 = vec_ld(line_size, pixels);
+ temp2 = vec_ld(line_size + 16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ } else {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
+ }
+
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ temp3 = vec_add(pixelssum1, pixelssum2);
+ temp3 = vec_sra(temp3, vctwo);
+ pixelssum1 = vec_add(pixelssum2, vctwo);
+ pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
+
+ if (rightside) {
+ blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
+ } else {
+ blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
+ }
+
+ blockv = vec_avg(blocktemp, blockv);
+ vec_st(blockv, 0, block);
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
- {
- pixelsv2 = temp2;
- }
- else
- {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
- pixelssum2 = vec_add((vector unsigned short)pixelsv1,
- (vector unsigned short)pixelsv2);
- temp3 = vec_add(pixelssum1, pixelssum2);
- temp3 = vec_sra(temp3, vctwo);
- pixelssum1 = vec_add(pixelssum2, vctwo);
- pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
-
- if (rightside)
- {
- blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
- }
- else
- {
- blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
- }
-
- blockv = vec_avg(blocktemp, blockv);
- vec_st(blockv, 0, block);
-
- block += line_size;
- pixels += line_size;
- }
+ block += line_size;
+ pixels += line_size;
+ }
POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
+
+void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx)
+{
+ c->pix_abs[0][1] = sad16_x2_altivec;
+ c->pix_abs[0][2] = sad16_y2_altivec;
+ c->pix_abs[0][3] = sad16_xy2_altivec;
+ c->pix_abs[0][0] = sad16_altivec;
+ c->pix_abs[1][0] = sad8_altivec;
+ c->sad[0]= sad16_altivec;
+ c->sad[1]= sad8_altivec;
+ c->pix_norm1 = pix_norm1_altivec;
+ c->sse[1]= sse8_altivec;
+ c->sse[0]= sse16_altivec;
+ c->pix_sum = pix_sum_altivec;
+ c->diff_pixels = diff_pixels_altivec;
+ c->get_pixels = get_pixels_altivec;
+ c->add_bytes= add_bytes_altivec;
+ c->put_pixels_tab[0][0] = put_pixels16_altivec;
+ /* the two functions do the same thing, so use the same code */
+ c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
+ c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
+ c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
+ c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
+ c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
+ c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
+ c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
+ c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
+
+ c->hadamard8_diff[0] = hadamard8_diff16_altivec;
+ c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
+#ifdef CONFIG_VORBIS_DECODER
+ c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
+#endif
}
diff --git a/contrib/ffmpeg/libavcodec/ppc/dsputil_altivec.h b/contrib/ffmpeg/libavcodec/ppc/dsputil_altivec.h
new file mode 100644
index 000000000..560d778bb
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ppc/dsputil_altivec.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2002 Brian Foley
+ * Copyright (c) 2002 Dieter Shirley
+ * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _DSPUTIL_ALTIVEC_
+#define _DSPUTIL_ALTIVEC_
+
+#include "dsputil_ppc.h"
+
+#ifdef HAVE_ALTIVEC
+
+extern int has_altivec(void);
+
+void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
+
+void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
+
+// used to build registers permutation vectors (vcprm)
+// the 's' are for words in the _s_econd vector
+#define WORD_0 0x00,0x01,0x02,0x03
+#define WORD_1 0x04,0x05,0x06,0x07
+#define WORD_2 0x08,0x09,0x0a,0x0b
+#define WORD_3 0x0c,0x0d,0x0e,0x0f
+#define WORD_s0 0x10,0x11,0x12,0x13
+#define WORD_s1 0x14,0x15,0x16,0x17
+#define WORD_s2 0x18,0x19,0x1a,0x1b
+#define WORD_s3 0x1c,0x1d,0x1e,0x1f
+
+#ifdef CONFIG_DARWIN
+#define vcprm(a,b,c,d) (const vector unsigned char)(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
+#else
+#define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d}
+#endif
+
+// vcprmle is used to keep the same index as in the SSE version.
+// it's the same as vcprm, with the index inversed
+// ('le' is Little Endian)
+#define vcprmle(a,b,c,d) vcprm(d,c,b,a)
+
+// used to build inverse/identity vectors (vcii)
+// n is _n_egative, p is _p_ositive
+#define FLOAT_n -1.
+#define FLOAT_p 1.
+
+
+#ifdef CONFIG_DARWIN
+#define vcii(a,b,c,d) (const vector float)(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
+#else
+#define vcii(a,b,c,d) (const vector float){FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d}
+#endif
+
+// Transpose 8x8 matrix of 16-bit elements (in-place)
+#define TRANSPOSE8(a,b,c,d,e,f,g,h) \
+do { \
+ vector signed short A1, B1, C1, D1, E1, F1, G1, H1; \
+ vector signed short A2, B2, C2, D2, E2, F2, G2, H2; \
+ \
+ A1 = vec_mergeh (a, e); \
+ B1 = vec_mergel (a, e); \
+ C1 = vec_mergeh (b, f); \
+ D1 = vec_mergel (b, f); \
+ E1 = vec_mergeh (c, g); \
+ F1 = vec_mergel (c, g); \
+ G1 = vec_mergeh (d, h); \
+ H1 = vec_mergel (d, h); \
+ \
+ A2 = vec_mergeh (A1, E1); \
+ B2 = vec_mergel (A1, E1); \
+ C2 = vec_mergeh (B1, F1); \
+ D2 = vec_mergel (B1, F1); \
+ E2 = vec_mergeh (C1, G1); \
+ F2 = vec_mergel (C1, G1); \
+ G2 = vec_mergeh (D1, H1); \
+ H2 = vec_mergel (D1, H1); \
+ \
+ a = vec_mergeh (A2, E2); \
+ b = vec_mergel (A2, E2); \
+ c = vec_mergeh (B2, F2); \
+ d = vec_mergel (B2, F2); \
+ e = vec_mergeh (C2, G2); \
+ f = vec_mergel (C2, G2); \
+ g = vec_mergeh (D2, H2); \
+ h = vec_mergel (D2, H2); \
+} while (0)
+
+#endif /* HAVE_ALTIVEC */
+
+#endif /* _DSPUTIL_ALTIVEC_ */
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c b/contrib/ffmpeg/libavcodec/ppc/dsputil_ppc.c
index b63c8dd84..9169eaef0 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c
+++ b/contrib/ffmpeg/libavcodec/ppc/dsputil_ppc.c
@@ -3,18 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -24,22 +26,21 @@
#ifdef HAVE_ALTIVEC
#include "dsputil_altivec.h"
-#endif
extern void fdct_altivec(int16_t *block);
+extern void gmc1_altivec(uint8_t *dst, uint8_t *src, int stride, int h,
+ int x16, int y16, int rounder);
extern void idct_put_altivec(uint8_t *dest, int line_size, int16_t *block);
extern void idct_add_altivec(uint8_t *dest, int line_size, int16_t *block);
-extern void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width);
-extern void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1,
- DWTELEM *b2, DWTELEM *b3,
- DWTELEM *b4, DWTELEM *b5,
- int width);
-extern void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
- uint8_t * * block, int b_w, int b_h,
- int src_x, int src_y, int src_stride,
- slice_buffer * sb, int add,
- uint8_t * dst8);
+void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx);
+
+void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx);
+void vc1dsp_init_altivec(DSPContext* c, AVCodecContext *avctx);
+void snow_init_altivec(DSPContext* c, AVCodecContext *avctx);
+void float_init_altivec(DSPContext* c, AVCodecContext *avctx);
+
+#endif
int mm_flags = 0;
@@ -100,7 +101,7 @@ void powerpc_display_perf_report(void)
{
if (perfdata[j][i][powerpc_data_num] != (unsigned long long)0)
av_log(NULL, AV_LOG_INFO,
- " Function \"%s\" (pmc%d):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n",
+ " Function \"%s\" (pmc%d):\n\tmin: %"PRIu64"\n\tmax: %"PRIu64"\n\tavg: %1.2lf (%"PRIu64")\n",
perfname[i],
j+1,
perfdata[j][i][powerpc_data_min],
@@ -174,7 +175,7 @@ POWERPC_PERF_STOP_COUNT(powerpc_clear_blocks_dcbz32, 1);
/* same as above, when dcbzl clear a whole 128B cache line
i.e. the PPC970 aka G5 */
-#ifndef NO_DCBZL
+#ifdef HAVE_DCBZL
void clear_blocks_dcbz128_ppc(DCTELEM *blocks)
{
POWERPC_PERF_DECLARE(powerpc_clear_blocks_dcbz128, 1);
@@ -204,7 +205,7 @@ void clear_blocks_dcbz128_ppc(DCTELEM *blocks)
}
#endif
-#ifndef NO_DCBZL
+#ifdef HAVE_DCBZL
/* check dcbz report how many bytes are set to 0 by dcbz */
/* update 24/06/2003 : replace dcbz by dcbzl to get
the intended effect (Apple "fixed" dcbz)
@@ -248,69 +249,43 @@ long check_dcbzl_effect(void)
}
#endif
-
-void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx);
+static void prefetch_ppc(void *mem, int stride, int h)
+{
+ register const uint8_t *p = mem;
+ do {
+ asm volatile ("dcbt 0,%0" : : "r" (p));
+ p+= stride;
+ } while(--h);
+}
void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
{
// Common optimizations whether Altivec is available or not
-
- switch (check_dcbzl_effect()) {
- case 32:
- c->clear_blocks = clear_blocks_dcbz32_ppc;
- break;
- case 128:
- c->clear_blocks = clear_blocks_dcbz128_ppc;
- break;
- default:
- break;
- }
+ c->prefetch = prefetch_ppc;
+ switch (check_dcbzl_effect()) {
+ case 32:
+ c->clear_blocks = clear_blocks_dcbz32_ppc;
+ break;
+ case 128:
+ c->clear_blocks = clear_blocks_dcbz128_ppc;
+ break;
+ default:
+ break;
+ }
#ifdef HAVE_ALTIVEC
- dsputil_h264_init_ppc(c, avctx);
+ if(ENABLE_H264_DECODER) dsputil_h264_init_ppc(c, avctx);
if (has_altivec()) {
mm_flags |= MM_ALTIVEC;
- // Altivec specific optimisations
- c->pix_abs[0][1] = sad16_x2_altivec;
- c->pix_abs[0][2] = sad16_y2_altivec;
- c->pix_abs[0][3] = sad16_xy2_altivec;
- c->pix_abs[0][0] = sad16_altivec;
- c->pix_abs[1][0] = sad8_altivec;
- c->sad[0]= sad16_altivec;
- c->sad[1]= sad8_altivec;
- c->pix_norm1 = pix_norm1_altivec;
- c->sse[1]= sse8_altivec;
- c->sse[0]= sse16_altivec;
- c->pix_sum = pix_sum_altivec;
- c->diff_pixels = diff_pixels_altivec;
- c->get_pixels = get_pixels_altivec;
-// next one disabled as it's untested.
-#if 0
- c->add_bytes= add_bytes_altivec;
-#endif /* 0 */
- c->put_pixels_tab[0][0] = put_pixels16_altivec;
- /* the two functions do the same thing, so use the same code */
- c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
- c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
- c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
- c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
- c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
- c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
- c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
- c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
-
+ dsputil_init_altivec(c, avctx);
+ if(ENABLE_SNOW_DECODER) snow_init_altivec(c, avctx);
+ if(ENABLE_VC1_DECODER || ENABLE_WMV3_DECODER)
+ vc1dsp_init_altivec(c, avctx);
+ float_init_altivec(c, avctx);
c->gmc1 = gmc1_altivec;
- c->hadamard8_diff[0] = hadamard8_diff16_altivec;
- c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
-
-
- c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
- c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
- c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;
-
#ifdef CONFIG_ENCODERS
if (avctx->dct_algo == FF_DCT_AUTO ||
avctx->dct_algo == FF_DCT_ALTIVEC)
@@ -319,20 +294,16 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
}
#endif //CONFIG_ENCODERS
- if (avctx->lowres==0)
- {
+ if (avctx->lowres==0)
+ {
if ((avctx->idct_algo == FF_IDCT_AUTO) ||
(avctx->idct_algo == FF_IDCT_ALTIVEC))
{
c->idct_put = idct_put_altivec;
c->idct_add = idct_add_altivec;
-#ifndef ALTIVEC_USE_REFERENCE_C_CODE
c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
- c->idct_permutation_type = FF_NO_IDCT_PERM;
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
- }
+ }
#ifdef POWERPC_PERFORMANCE_REPORT
{
@@ -349,11 +320,6 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
}
}
#endif /* POWERPC_PERFORMANCE_REPORT */
- } else
-#endif /* HAVE_ALTIVEC */
- {
- // Non-AltiVec PPC optimisations
-
- // ... pending ...
}
+#endif /* HAVE_ALTIVEC */
}
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h b/contrib/ffmpeg/libavcodec/ppc/dsputil_ppc.h
index 966ffa71a..ab2b05780 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h
+++ b/contrib/ffmpeg/libavcodec/ppc/dsputil_ppc.h
@@ -1,35 +1,26 @@
/*
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _DSPUTIL_PPC_
#define _DSPUTIL_PPC_
-#ifdef CONFIG_DARWIN
-/* The Apple assembler shipped w/ gcc-3.3 knows about DCBZL, previous assemblers don't
- We assume here that the Darwin GCC is from Apple.... */
-#if (__GNUC__ * 100 + __GNUC_MINOR__ < 303)
-#define NO_DCBZL
-#endif
-#else /* CONFIG_DARWIN */
-/* I don't think any non-Apple assembler knows about DCBZL */
-#define NO_DCBZL
-#endif /* CONFIG_DARWIN */
-
#ifdef POWERPC_PERFORMANCE_REPORT
void powerpc_display_perf_report(void);
/* the 604* have 2, the G3* have 4, the G4s have 6,
diff --git a/src/libffmpeg/libavcodec/ppc/fdct_altivec.c b/contrib/ffmpeg/libavcodec/ppc/fdct_altivec.c
index f5778c24e..2418c32bb 100644
--- a/src/libffmpeg/libavcodec/ppc/fdct_altivec.c
+++ b/contrib/ffmpeg/libavcodec/ppc/fdct_altivec.c
@@ -2,18 +2,20 @@
* AltiVec optimized library for the FFMPEG Multimedia System
* Copyright (C) 2003 James Klicman <james@klicman.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -196,12 +198,6 @@ static vector float fdctconsts[3] = {
void fdct_altivec(int16_t *block)
{
POWERPC_PERF_DECLARE(altivec_fdct, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-POWERPC_PERF_START_COUNT(altivec_fdct, 1);
- void ff_jpeg_fdct_islow(int16_t *block);
- ff_jpeg_fdct_islow(block);
-POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
vector signed short *bp;
vector float *cp;
vector float b00, b10, b20, b30, b40, b50, b60, b70;
@@ -492,7 +488,6 @@ POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
/* }}} */
POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
/* vim:set foldmethod=marker foldlevel=0: */
diff --git a/src/libffmpeg/libavcodec/ppc/fft_altivec.c b/contrib/ffmpeg/libavcodec/ppc/fft_altivec.c
index f4ea78359..384a774ff 100644
--- a/src/libffmpeg/libavcodec/ppc/fft_altivec.c
+++ b/contrib/ffmpeg/libavcodec/ppc/fft_altivec.c
@@ -4,18 +4,20 @@
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
* Based on code Copyright (c) 2002 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "../dsputil.h"
@@ -63,88 +65,7 @@
void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
{
POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- int ln = s->nbits;
- int j, np, np2;
- int nblocks, nloops;
- register FFTComplex *p, *q;
- FFTComplex *exptab = s->exptab;
- int l;
- FFTSample tmp_re, tmp_im;
-
-POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
-
- np = 1 << ln;
-
- /* pass 0 */
-
- p=&z[0];
- j=(np >> 1);
- do {
- BF(p[0].re, p[0].im, p[1].re, p[1].im,
- p[0].re, p[0].im, p[1].re, p[1].im);
- p+=2;
- } while (--j != 0);
-
- /* pass 1 */
-
-
- p=&z[0];
- j=np >> 2;
- if (s->inverse) {
- do {
- BF(p[0].re, p[0].im, p[2].re, p[2].im,
- p[0].re, p[0].im, p[2].re, p[2].im);
- BF(p[1].re, p[1].im, p[3].re, p[3].im,
- p[1].re, p[1].im, -p[3].im, p[3].re);
- p+=4;
- } while (--j != 0);
- } else {
- do {
- BF(p[0].re, p[0].im, p[2].re, p[2].im,
- p[0].re, p[0].im, p[2].re, p[2].im);
- BF(p[1].re, p[1].im, p[3].re, p[3].im,
- p[1].re, p[1].im, p[3].im, -p[3].re);
- p+=4;
- } while (--j != 0);
- }
- /* pass 2 .. ln-1 */
-
- nblocks = np >> 3;
- nloops = 1 << 2;
- np2 = np >> 1;
- do {
- p = z;
- q = z + nloops;
- for (j = 0; j < nblocks; ++j) {
- BF(p->re, p->im, q->re, q->im,
- p->re, p->im, q->re, q->im);
-
- p++;
- q++;
- for(l = nblocks; l < np2; l += nblocks) {
- CMUL(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im);
- BF(p->re, p->im, q->re, q->im,
- p->re, p->im, tmp_re, tmp_im);
- p++;
- q++;
- }
-
- p += nloops;
- q += nloops;
- }
- nblocks = nblocks >> 1;
- nloops = nloops << 1;
- } while (nblocks != 0);
-
-POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
-
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
-#ifdef CONFIG_DARWIN
- register const vector float vczero = (const vector float)(0.);
-#else
- register const vector float vczero = (const vector float){0.,0.,0.,0.};
-#endif
+ register const vector float vczero = (const vector float)vec_splat_u32(0.);
int ln = s->nbits;
int j, np, np2;
@@ -242,6 +163,4 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
} while (nblocks != 0);
POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
-
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
diff --git a/contrib/ffmpeg/libavcodec/ppc/float_altivec.c b/contrib/ffmpeg/libavcodec/ppc/float_altivec.c
new file mode 100644
index 000000000..c6e43dec2
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ppc/float_altivec.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+
+#include "dsputil_altivec.h"
+
+static void vector_fmul_altivec(float *dst, const float *src, int len)
+{
+ int i;
+ vector float d0, d1, s, zero = (vector float)vec_splat_u32(0);
+ for(i=0; i<len-7; i+=8) {
+ d0 = vec_ld(0, dst+i);
+ s = vec_ld(0, src+i);
+ d1 = vec_ld(16, dst+i);
+ d0 = vec_madd(d0, s, zero);
+ d1 = vec_madd(d1, vec_ld(16,src+i), zero);
+ vec_st(d0, 0, dst+i);
+ vec_st(d1, 16, dst+i);
+ }
+}
+
+static void vector_fmul_reverse_altivec(float *dst, const float *src0,
+ const float *src1, int len)
+{
+ int i;
+ vector float d, s0, s1, h0, l0,
+ s2, s3, zero = (vector float)vec_splat_u32(0);
+ src1 += len-4;
+ for(i=0; i<len-7; i+=8) {
+ s1 = vec_ld(0, src1-i); // [a,b,c,d]
+ s0 = vec_ld(0, src0+i);
+ l0 = vec_mergel(s1, s1); // [c,c,d,d]
+ s3 = vec_ld(-16, src1-i);
+ h0 = vec_mergeh(s1, s1); // [a,a,b,b]
+ s2 = vec_ld(16, src0+i);
+ s1 = vec_mergeh(vec_mergel(l0,h0), // [d,b,d,b]
+ vec_mergeh(l0,h0)); // [c,a,c,a]
+ // [d,c,b,a]
+ l0 = vec_mergel(s3, s3);
+ d = vec_madd(s0, s1, zero);
+ h0 = vec_mergeh(s3, s3);
+ vec_st(d, 0, dst+i);
+ s3 = vec_mergeh(vec_mergel(l0,h0),
+ vec_mergeh(l0,h0));
+ d = vec_madd(s2, s3, zero);
+ vec_st(d, 16, dst+i);
+ }
+}
+
+static void vector_fmul_add_add_altivec(float *dst, const float *src0,
+ const float *src1, const float *src2,
+ int src3, int len, int step)
+{
+ int i;
+ vector float d, s0, s1, s2, t0, t1, edges;
+ vector unsigned char align = vec_lvsr(0,dst),
+ mask = vec_lvsl(0, dst);
+
+ t0 = vec_ld(0, dst);
+#if 0 //FIXME: there is still something wrong
+ if (step == 2) {
+ int y;
+ vector float d0, d1, s3, t2;
+ vector unsigned int sel =
+ vec_mergeh(vec_splat_u32(-1), vec_splat_u32(0));
+ t1 = vec_ld(16, dst);
+ for (i=0,y=0; i<len-3; i+=4,y+=8) {
+
+ s0 = vec_ld(0,src0+i);
+ s1 = vec_ld(0,src1+i);
+ s2 = vec_ld(0,src2+i);
+
+// t0 = vec_ld(0, dst+y); //[x x x|a]
+// t1 = vec_ld(16, dst+y); //[b c d|e]
+ t2 = vec_ld(31, dst+y); //[f g h|x]
+
+ d = vec_madd(s0,s1,s2); // [A B C D]
+
+ // [A A B B]
+
+ // [C C D D]
+
+ d0 = vec_perm(t0, t1, mask); // [a b c d]
+
+ d0 = vec_sel(vec_mergeh(d, d), d0, sel); // [A b B d]
+
+ edges = vec_perm(t1, t0, mask);
+
+ t0 = vec_perm(edges, d0, align); // [x x x|A]
+
+ t1 = vec_perm(d0, edges, align); // [b B d|e]
+
+ vec_stl(t0, 0, dst+y);
+
+ d1 = vec_perm(t1, t2, mask); // [e f g h]
+
+ d1 = vec_sel(vec_mergel(d, d), d1, sel); // [C f D h]
+
+ edges = vec_perm(t2, t1, mask);
+
+ t1 = vec_perm(edges, d1, align); // [b B d|C]
+
+ t2 = vec_perm(d1, edges, align); // [f D h|x]
+
+ vec_stl(t1, 16, dst+y);
+
+ t0 = t1;
+
+ vec_stl(t2, 31, dst+y);
+
+ t1 = t2;
+ }
+ } else
+ #endif
+ if (step == 1 && src3 == 0)
+ for (i=0; i<len-3; i+=4) {
+ t1 = vec_ld(15, dst+i);
+ s0 = vec_ld(0, src0+i);
+ s1 = vec_ld(0, src1+i);
+ s2 = vec_ld(0, src2+i);
+ edges = vec_perm(t1 ,t0, mask);
+ d = vec_madd(s0,s1,s2);
+ t1 = vec_perm(d, edges, align);
+ t0 = vec_perm(edges, d, align);
+ vec_st(t1, 15, dst+i);
+ vec_st(t0, 0, dst+i);
+ t0 = t1;
+ }
+ else
+ ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
+}
+
+void float_to_int16_altivec(int16_t *dst, const float *src, int len)
+{
+ int i;
+ vector float s0, s1;
+ vector signed int t0, t1;
+ vector signed short d0, d1, d;
+ vector unsigned char align;
+ if(((long)dst)&15) //FIXME
+ for(i=0; i<len-7; i+=8) {
+ s0 = vec_ld(0, src+i);
+ s1 = vec_ld(16, src+i);
+ t0 = vec_cts(s0, 0);
+ d0 = vec_ld(0, dst+i);
+ t1 = vec_cts(s1, 0);
+ d1 = vec_ld(15, dst+i);
+ d = vec_packs(t0,t1);
+ d1 = vec_perm(d1, d0, vec_lvsl(0,dst+i));
+ align = vec_lvsr(0, dst+i);
+ d0 = vec_perm(d1, d, align);
+ d1 = vec_perm(d, d1, align);
+ vec_st(d0, 0, dst+i);
+ vec_st(d1,15, dst+i);
+ }
+ else
+ for(i=0; i<len-7; i+=8) {
+ s0 = vec_ld(0, src+i);
+ s1 = vec_ld(16, src+i);
+ t0 = vec_cts(s0, 0);
+ t1 = vec_cts(s1, 0);
+ d = vec_packs(t0,t1);
+ vec_st(d, 0, dst+i);
+ }
+}
+
+void float_init_altivec(DSPContext* c, AVCodecContext *avctx)
+{
+ c->vector_fmul = vector_fmul_altivec;
+ c->vector_fmul_reverse = vector_fmul_reverse_altivec;
+ c->vector_fmul_add_add = vector_fmul_add_add_altivec;
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT))
+ c->float_to_int16 = float_to_int16_altivec;
+}
diff --git a/src/libffmpeg/libavcodec/ppc/gcc_fixes.h b/contrib/ffmpeg/libavcodec/ppc/gcc_fixes.h
index 943905bc5..5a4a55188 100644
--- a/src/libffmpeg/libavcodec/ppc/gcc_fixes.h
+++ b/contrib/ffmpeg/libavcodec/ppc/gcc_fixes.h
@@ -2,6 +2,22 @@
* gcc fixes for altivec.
* Used to workaround broken gcc (FSF gcc-3 pre gcc-3.3)
* and to stay somewhat compatible with Darwin.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _GCC_FIXES_
diff --git a/src/libffmpeg/libavcodec/ppc/gmc_altivec.c b/contrib/ffmpeg/libavcodec/ppc/gmc_altivec.c
index 04978d825..42c936bb3 100644
--- a/src/libffmpeg/libavcodec/ppc/gmc_altivec.c
+++ b/contrib/ffmpeg/libavcodec/ppc/gmc_altivec.c
@@ -3,18 +3,20 @@
* AltiVec-enabled
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -32,32 +34,6 @@
void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder)
{
POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- const int A=(16-x16)*(16-y16);
- const int B=( x16)*(16-y16);
- const int C=(16-x16)*( y16);
- const int D=( x16)*( y16);
- int i;
-
-POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
-
- for(i=0; i<h; i++)
- {
- dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8;
- dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8;
- dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8;
- dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8;
- dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8;
- dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8;
- dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8;
- dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8;
- dst+= stride;
- src+= stride;
- }
-
-POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
-
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
const unsigned short __attribute__ ((aligned(16))) rounder_a[8] =
{rounder, rounder, rounder, rounder,
rounder, rounder, rounder, rounder};
@@ -167,6 +143,4 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
}
POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND);
-
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c b/contrib/ffmpeg/libavcodec/ppc/h264_altivec.c
index 14391e60c..4aa366f97 100755..100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c
+++ b/contrib/ffmpeg/libavcodec/ppc/h264_altivec.c
@@ -1,18 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -34,7 +36,7 @@
#define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
#define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
-#include "dsputil_h264_template_altivec.c"
+#include "h264_template_altivec.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_h264_chroma_mc8_altivec
#undef PREFIX_h264_chroma_mc8_num
@@ -54,7 +56,7 @@
#define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
#define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
-#include "dsputil_h264_template_altivec.c"
+#include "h264_template_altivec.c"
#undef OP_U8_ALTIVEC
#undef PREFIX_h264_chroma_mc8_altivec
#undef PREFIX_h264_chroma_mc8_num
@@ -71,8 +73,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uin
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/8]);\
- uint8_t * const half= (uint8_t*)temp;\
+ DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
}\
@@ -82,15 +83,13 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/8]);\
- uint8_t * const half= (uint8_t*)temp;\
+ DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/8]);\
- uint8_t * const half= (uint8_t*)temp;\
+ DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
}\
@@ -100,94 +99,213 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/8]);\
- uint8_t * const half= (uint8_t*)temp;\
+ DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/4]);\
- uint8_t * const halfH= (uint8_t*)temp;\
- uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/4]);\
- uint8_t * const halfH= (uint8_t*)temp;\
- uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/4]);\
- uint8_t * const halfH= (uint8_t*)temp;\
- uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/4]);\
- uint8_t * const halfH= (uint8_t*)temp;\
- uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4]);\
- int16_t * const tmp= (int16_t*)temp;\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4]);\
- uint8_t * const halfH= (uint8_t*)temp;\
- uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
- int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4]);\
- uint8_t * const halfH= (uint8_t*)temp;\
- uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
- int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4]);\
- uint8_t * const halfV= (uint8_t*)temp;\
- uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
- int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4]);\
- uint8_t * const halfV= (uint8_t*)temp;\
- uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
- int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
}\
+/* this code assume that stride % 16 == 0 */
+void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
+ signed int ABCD[4] __attribute__((aligned(16))) =
+ {((8 - x) * (8 - y)),
+ ((x) * (8 - y)),
+ ((8 - x) * (y)),
+ ((x) * (y))};
+ register int i;
+ vector unsigned char fperm;
+ const vector signed int vABCD = vec_ld(0, ABCD);
+ const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
+ const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
+ const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
+ const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector signed short v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
+ const vector unsigned short v6us = vec_splat_u16(6);
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+ vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
+ vector unsigned char vsrc0uc, vsrc1uc;
+ vector signed short vsrc0ssH, vsrc1ssH;
+ vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc;
+ vector signed short vsrc2ssH, vsrc3ssH, psum;
+ vector unsigned char vdst, ppsum, vfdst, fsum;
+
+ if (((unsigned long)dst) % 16 == 0) {
+ fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17,
+ 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F);
+ } else {
+ fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F);
+ }
+
+ vsrcAuc = vec_ld(0, src);
+
+ if (loadSecond)
+ vsrcBuc = vec_ld(16, src);
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+
+ vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc1uc = vsrcBuc;
+ else
+ vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+
+ vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc0uc);
+ vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc1uc);
+
+ if (!loadSecond) {// -> !reallyBadAlign
+ for (i = 0 ; i < h ; i++) {
+
+
+ vsrcCuc = vec_ld(stride + 0, src);
+
+ vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+ vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+ vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc2uc);
+ vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc3uc);
+
+ psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+ psum = vec_mladd(vB, vsrc1ssH, psum);
+ psum = vec_mladd(vC, vsrc2ssH, psum);
+ psum = vec_mladd(vD, vsrc3ssH, psum);
+ psum = vec_add(v28ss, psum);
+ psum = vec_sra(psum, v6us);
+
+ vdst = vec_ld(0, dst);
+ ppsum = (vector unsigned char)vec_packsu(psum, psum);
+ fsum = vec_perm(vdst, ppsum, fperm);
+
+ vec_st(fsum, 0, dst);
+
+ vsrc0ssH = vsrc2ssH;
+ vsrc1ssH = vsrc3ssH;
+
+ dst += stride;
+ src += stride;
+ }
+ } else {
+ vector unsigned char vsrcDuc;
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(stride + 0, src);
+ vsrcDuc = vec_ld(stride + 16, src);
+
+ vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc3uc = vsrcDuc;
+ else
+ vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+ vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc2uc);
+ vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc3uc);
+
+ psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+ psum = vec_mladd(vB, vsrc1ssH, psum);
+ psum = vec_mladd(vC, vsrc2ssH, psum);
+ psum = vec_mladd(vD, vsrc3ssH, psum);
+ psum = vec_add(v28ss, psum);
+ psum = vec_sr(psum, v6us);
+
+ vdst = vec_ld(0, dst);
+ ppsum = (vector unsigned char)vec_pack(psum, psum);
+ fsum = vec_perm(vdst, ppsum, fperm);
+
+ vec_st(fsum, 0, dst);
+
+ vsrc0ssH = vsrc2ssH;
+ vsrc1ssH = vsrc3ssH;
+
+ dst += stride;
+ src += stride;
+ }
+ }
+}
+
static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
const uint8_t * src2, int dst_stride,
int src_stride1, int h)
@@ -220,8 +338,8 @@ static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
align = vec_lvsr(0, dst);
- tmp1 = vec_perm(edges, d, align);
tmp2 = vec_perm(d, edges, align);
+ tmp1 = vec_perm(edges, d, align);
vec_st(tmp2, 15, dst);
vec_st(tmp1, 0 , dst);
@@ -262,8 +380,8 @@ static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
align = vec_lvsr(0, dst);
- tmp1 = vec_perm(edges, d, align);
tmp2 = vec_perm(d, edges, align);
+ tmp1 = vec_perm(edges, d, align);
vec_st(tmp2, 15, dst);
vec_st(tmp1, 0 , dst);
@@ -285,6 +403,7 @@ void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
#ifdef HAVE_ALTIVEC
if (has_altivec()) {
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
+ c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec;
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
#define dspfunc(PFX, IDX, NUM) \
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c b/contrib/ffmpeg/libavcodec/ppc/h264_template_altivec.c
index 37f4de58f..e8ad67f2f 100755..100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c
+++ b/contrib/ffmpeg/libavcodec/ppc/h264_template_altivec.c
@@ -1,18 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/ppc/idct_altivec.c b/contrib/ffmpeg/libavcodec/ppc/idct_altivec.c
index 93d63cfd3..cee46fc25 100644
--- a/src/libffmpeg/libavcodec/ppc/idct_altivec.c
+++ b/contrib/ffmpeg/libavcodec/ppc/idct_altivec.c
@@ -1,18 +1,20 @@
/*
* Copyright (c) 2001 Michel Lespinasse
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -167,12 +169,6 @@ static const_vector_s16_t constants[5] = {
void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block)
{
POWERPC_PERF_DECLARE(altivec_idct_put_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
- void simple_idct_put(uint8_t *dest, int line_size, int16_t *block);
- simple_idct_put(dest, stride, (int16_t*)block);
-POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
vector_u8_t tmp;
#ifdef POWERPC_PERFORMANCE_REPORT
@@ -195,18 +191,11 @@ POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
COPY (dest, vx7)
POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
void idct_add_altivec(uint8_t* dest, int stride, vector_s16_t* block)
{
POWERPC_PERF_DECLARE(altivec_idct_add_num, 1);
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
- void simple_idct_add(uint8_t *dest, int line_size, int16_t *block);
- simple_idct_add(dest, stride, (int16_t*)block);
-POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
vector_u8_t tmp;
vector_s16_t tmp2, tmp3;
vector_u8_t perm0;
@@ -244,6 +233,5 @@ POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
ADD (dest, vx7, perm1)
POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
diff --git a/contrib/ffmpeg/libavcodec/ppc/mathops.h b/contrib/ffmpeg/libavcodec/ppc/mathops.h
new file mode 100644
index 000000000..6af23f246
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ppc/mathops.h
@@ -0,0 +1,33 @@
+/*
+ * simple math operations
+ * Copyright (c) 2001, 2002 Fabrice Bellard.
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#if defined(ARCH_POWERPC_405)
+/* signed 16x16 -> 32 multiply add accumulate */
+# define MAC16(rt, ra, rb) \
+ asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
+
+/* signed 16x16 -> 32 multiply */
+# define MUL16(ra, rb) \
+ ({ int __rt;
+ asm ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb));
+ __rt; })
+#endif
diff --git a/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c b/contrib/ffmpeg/libavcodec/ppc/mpegvideo_altivec.c
index 7a771a8ec..3822cb20e 100644
--- a/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c
+++ b/contrib/ffmpeg/libavcodec/ppc/mpegvideo_altivec.c
@@ -4,18 +4,20 @@
* dct_unquantize_h263_altivec:
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -50,39 +52,6 @@ do { \
d = vec_mergel(_trans_acl, _trans_bdl); \
} while (0)
-#define TRANSPOSE8(a,b,c,d,e,f,g,h) \
-do { \
- __typeof__(a) _A1, _B1, _C1, _D1, _E1, _F1, _G1, _H1; \
- __typeof__(a) _A2, _B2, _C2, _D2, _E2, _F2, _G2, _H2; \
- \
- _A1 = vec_mergeh (a, e); \
- _B1 = vec_mergel (a, e); \
- _C1 = vec_mergeh (b, f); \
- _D1 = vec_mergel (b, f); \
- _E1 = vec_mergeh (c, g); \
- _F1 = vec_mergel (c, g); \
- _G1 = vec_mergeh (d, h); \
- _H1 = vec_mergel (d, h); \
- \
- _A2 = vec_mergeh (_A1, _E1); \
- _B2 = vec_mergel (_A1, _E1); \
- _C2 = vec_mergeh (_B1, _F1); \
- _D2 = vec_mergel (_B1, _F1); \
- _E2 = vec_mergeh (_C1, _G1); \
- _F2 = vec_mergel (_C1, _G1); \
- _G2 = vec_mergeh (_D1, _H1); \
- _H2 = vec_mergel (_D1, _H1); \
- \
- a = vec_mergeh (_A2, _E2); \
- b = vec_mergel (_A2, _E2); \
- c = vec_mergeh (_B2, _F2); \
- d = vec_mergel (_B2, _F2); \
- e = vec_mergeh (_C2, _G2); \
- f = vec_mergel (_C2, _G2); \
- g = vec_mergeh (_D2, _H2); \
- h = vec_mergel (_D2, _H2); \
-} while (0)
-
// Loads a four-byte value (int or float) from the target address
// into every element in the target vector. Only works if the
@@ -552,19 +521,6 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1);
nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
}
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
- for(;i<=nCoeffs;i++) {
- level = block[i];
- if (level) {
- if (level < 0) {
- level = level * qmul - qadd;
- } else {
- level = level * qmul + qadd;
- }
- block[i] = level;
- }
- }
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
{
register const_vector signed short vczero = (const_vector signed short)vec_splat_s16(0);
short __attribute__ ((aligned(16))) qmul8[] =
@@ -643,7 +599,5 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1);
block[0] = backup_0;
}
}
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-
POWERPC_PERF_STOP_COUNT(altivec_dct_unquantize_h263_num, nCoeffs == 63);
}
diff --git a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c b/contrib/ffmpeg/libavcodec/ppc/mpegvideo_ppc.c
index b391b4294..c5e822f77 100644
--- a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c
+++ b/contrib/ffmpeg/libavcodec/ppc/mpegvideo_ppc.c
@@ -1,18 +1,20 @@
/*
* Copyright (c) 2002 Dieter Shirley
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -46,11 +48,7 @@ void MPV_common_init_ppc(MpegEncContext *s)
{
s->dsp.idct_put = idct_put_altivec;
s->dsp.idct_add = idct_add_altivec;
-#ifndef ALTIVEC_USE_REFERENCE_C_CODE
s->dsp.idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
-#else /* ALTIVEC_USE_REFERENCE_C_CODE */
- s->dsp.idct_permutation_type = FF_NO_IDCT_PERM;
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
}
diff --git a/contrib/ffmpeg/libavcodec/ppc/snow_altivec.c b/contrib/ffmpeg/libavcodec/ppc/snow_altivec.c
new file mode 100644
index 000000000..b15672ffe
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ppc/snow_altivec.c
@@ -0,0 +1,788 @@
+/*
+ * Altivec optimized snow DSP utils
+ * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+#include "dsputil_altivec.h"
+#include "../snow.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+
+
+//FIXME remove this replication
+#define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num)))
+
+static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line)
+{
+ int offset;
+ DWTELEM * buffer;
+
+// av_log(NULL, AV_LOG_DEBUG, "Cache hit: %d\n", line);
+
+ assert(buf->data_stack_top >= 0);
+// assert(!buf->line[line]);
+ if (buf->line[line])
+ return buf->line[line];
+
+ offset = buf->line_width * line;
+ buffer = buf->data_stack[buf->data_stack_top];
+ buf->data_stack_top--;
+ buf->line[line] = buffer;
+
+// av_log(NULL, AV_LOG_DEBUG, "slice_buffer_load_line: line: %d remaining: %d\n", line, buf->data_stack_top + 1);
+
+ return buffer;
+}
+
+
+//altivec code
+
+void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width)
+{
+ const int w2= (width+1)>>1;
+ DECLARE_ALIGNED_16(DWTELEM, temp[(width>>1)]);
+ const int w_l= (width>>1);
+ const int w_r= w2 - 1;
+ int i;
+ vector signed int t1, t2, x, y, tmp1, tmp2;
+ vector signed int *vbuf, *vtmp;
+ vector unsigned char align;
+
+
+
+ { // Lift 0
+ DWTELEM * const ref = b + w2 - 1;
+ DWTELEM b_0 = b[0];
+ vbuf = (vector signed int *)b;
+
+ tmp1 = vec_ld (0, ref);
+ align = vec_lvsl (0, ref);
+ tmp2 = vec_ld (15, ref);
+ t1= vec_perm(tmp1, tmp2, align);
+
+ i = 0;
+
+ for (i=0; i<w_l-15; i+=16) {
+#if 0
+ b[i+0] = b[i+0] - ((3 * (ref[i+0] + ref[i+1]) + 4) >> 3);
+ b[i+1] = b[i+1] - ((3 * (ref[i+1] + ref[i+2]) + 4) >> 3);
+ b[i+2] = b[i+2] - ((3 * (ref[i+2] + ref[i+3]) + 4) >> 3);
+ b[i+3] = b[i+3] - ((3 * (ref[i+3] + ref[i+4]) + 4) >> 3);
+#else
+
+ tmp1 = vec_ld (0, ref+4+i);
+ tmp2 = vec_ld (15, ref+4+i);
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_add(vec_add(y,y),y);
+
+ tmp1 = vec_ld (0, ref+8+i);
+
+ y = vec_add(y, vec_splat_s32(4));
+ y = vec_sra(y, vec_splat_u32(3));
+
+ tmp2 = vec_ld (15, ref+8+i);
+
+ *vbuf = vec_sub(*vbuf, y);
+
+ t1=t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_add(vec_add(y,y),y);
+
+ tmp1 = vec_ld (0, ref+12+i);
+
+ y = vec_add(y, vec_splat_s32(4));
+ y = vec_sra(y, vec_splat_u32(3));
+
+ tmp2 = vec_ld (15, ref+12+i);
+
+ *vbuf = vec_sub(*vbuf, y);
+
+ t1=t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_add(vec_add(y,y),y);
+
+ tmp1 = vec_ld (0, ref+16+i);
+
+ y = vec_add(y, vec_splat_s32(4));
+ y = vec_sra(y, vec_splat_u32(3));
+
+ tmp2 = vec_ld (15, ref+16+i);
+
+ *vbuf = vec_sub(*vbuf, y);
+
+ t1=t2;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_add(vec_add(y,y),y);
+
+ vbuf++;
+
+ y = vec_add(y, vec_splat_s32(4));
+ y = vec_sra(y, vec_splat_u32(3));
+ *vbuf = vec_sub(*vbuf, y);
+
+ t1=t2;
+
+ vbuf++;
+
+#endif
+ }
+
+ snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
+ b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
+ }
+
+ { // Lift 1
+ DWTELEM * const dst = b+w2;
+
+ i = 0;
+ for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
+ dst[i] = dst[i] - (b[i] + b[i + 1]);
+ }
+
+ align = vec_lvsl(0, b+i);
+ tmp1 = vec_ld(0, b+i);
+ vbuf = (vector signed int*) (dst + i);
+ tmp2 = vec_ld(15, b+i);
+
+ t1 = vec_perm(tmp1, tmp2, align);
+
+ for (; i<w_r-3; i+=4) {
+
+#if 0
+ dst[i] = dst[i] - (b[i] + b[i + 1]);
+ dst[i+1] = dst[i+1] - (b[i+1] + b[i + 2]);
+ dst[i+2] = dst[i+2] - (b[i+2] + b[i + 3]);
+ dst[i+3] = dst[i+3] - (b[i+3] + b[i + 4]);
+#else
+
+ tmp1 = vec_ld(0, b+4+i);
+ tmp2 = vec_ld(15, b+4+i);
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1, vec_sld(t1,t2,4));
+ *vbuf = vec_sub (*vbuf, y);
+
+ vbuf++;
+
+ t1 = t2;
+
+#endif
+
+ }
+
+ snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
+ }
+
+ { // Lift 2
+ DWTELEM * const ref = b+w2 - 1;
+ DWTELEM b_0 = b[0];
+ vbuf= (vector signed int *) b;
+
+ tmp1 = vec_ld (0, ref);
+ align = vec_lvsl (0, ref);
+ tmp2 = vec_ld (15, ref);
+ t1= vec_perm(tmp1, tmp2, align);
+
+ i = 0;
+ for (; i<w_l-15; i+=16) {
+#if 0
+ b[i] = b[i] - (((8 -(ref[i] + ref[i+1])) - (b[i] <<2)) >> 4);
+ b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4);
+ b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4);
+ b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4);
+#else
+ tmp1 = vec_ld (0, ref+4+i);
+ tmp2 = vec_ld (15, ref+4+i);
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_sub(vec_splat_s32(8),y);
+
+ tmp1 = vec_ld (0, ref+8+i);
+
+ x = vec_sl(*vbuf,vec_splat_u32(2));
+ y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+
+ tmp2 = vec_ld (15, ref+8+i);
+
+ *vbuf = vec_sub( *vbuf, y);
+
+ t1 = t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_sub(vec_splat_s32(8),y);
+
+ tmp1 = vec_ld (0, ref+12+i);
+
+ x = vec_sl(*vbuf,vec_splat_u32(2));
+ y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+
+ tmp2 = vec_ld (15, ref+12+i);
+
+ *vbuf = vec_sub( *vbuf, y);
+
+ t1 = t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_sub(vec_splat_s32(8),y);
+
+ tmp1 = vec_ld (0, ref+16+i);
+
+ x = vec_sl(*vbuf,vec_splat_u32(2));
+ y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+
+ tmp2 = vec_ld (15, ref+16+i);
+
+ *vbuf = vec_sub( *vbuf, y);
+
+ t1 = t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_sub(vec_splat_s32(8),y);
+
+ t1 = t2;
+
+ x = vec_sl(*vbuf,vec_splat_u32(2));
+ y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+ *vbuf = vec_sub( *vbuf, y);
+
+ vbuf++;
+
+#endif
+ }
+
+ snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
+ b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
+ }
+
+ { // Lift 3
+ DWTELEM * const src = b+w2;
+
+ vbuf = (vector signed int *)b;
+ vtmp = (vector signed int *)temp;
+
+ i = 0;
+ align = vec_lvsl(0, src);
+
+ for (; i<w_r-3; i+=4) {
+#if 0
+ temp[i] = src[i] - ((-3*(b[i] + b[i+1]))>>1);
+ temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1);
+ temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1);
+ temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1);
+#else
+ tmp1 = vec_ld(0,src+i);
+ t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4));
+ tmp2 = vec_ld(15,src+i);
+ t1 = vec_sub(vec_splat_s32(0),t1); //bad!
+ t1 = vec_add(t1,vec_add(t1,t1));
+ t2 = vec_perm(tmp1 ,tmp2 ,align);
+ t1 = vec_sra(t1,vec_splat_u32(1));
+ vbuf++;
+ *vtmp = vec_sub(t2,t1);
+ vtmp++;
+
+#endif
+
+ }
+
+ snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1);
+ }
+
+ {
+ //Interleave
+ int a;
+ vector signed int *t = (vector signed int *)temp,
+ *v = (vector signed int *)b;
+
+ snow_interleave_line_header(&i, width, b, temp);
+
+ for (; (i & 0xE) != 0xE; i-=2){
+ b[i+1] = temp[i>>1];
+ b[i] = b[i>>1];
+ }
+ for (i-=14; i>=0; i-=16){
+ a=i/4;
+
+ v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]);
+ v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]);
+ v[a+1]=vec_mergel(v[a>>1],t[a>>1]);
+ v[a]=vec_mergeh(v[a>>1],t[a>>1]);
+
+ }
+
+ }
+}
+
+void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width)
+{
+ int i, w4 = width/4;
+ vector signed int *v0, *v1,*v2,*v3,*v4,*v5;
+ vector signed int t1, t2;
+
+ v0=(vector signed int *)b0;
+ v1=(vector signed int *)b1;
+ v2=(vector signed int *)b2;
+ v3=(vector signed int *)b3;
+ v4=(vector signed int *)b4;
+ v5=(vector signed int *)b5;
+
+ for (i=0; i< w4;i++)
+ {
+
+ #if 0
+ b4[i] -= (3*(b3[i] + b5[i])+4)>>3;
+ b3[i] -= ((b2[i] + b4[i]));
+ b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4;
+ b1[i] += (3*(b0[i] + b2[i]))>>1;
+ #else
+ t1 = vec_add(v3[i], v5[i]);
+ t2 = vec_add(t1, vec_add(t1,t1));
+ t1 = vec_add(t2, vec_splat_s32(4));
+ v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3)));
+
+ v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i]));
+
+ t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i]));
+ t2 = vec_sl(v2[i], vec_splat_u32(2));
+ v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4)));
+ t1 = vec_add(v0[i], v2[i]);
+ t2 = vec_add(t1, vec_add(t1,t1));
+ v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1)));
+
+ #endif
+ }
+
+ for(i*=4; i < width; i++)
+ {
+ b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
+ b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
+ b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
+ b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
+ }
+}
+
+#define LOAD_BLOCKS \
+ tmp1 = vec_ld(0, &block[3][y*src_stride]);\
+ align = vec_lvsl(0, &block[3][y*src_stride]);\
+ tmp2 = vec_ld(15, &block[3][y*src_stride]);\
+\
+ b3 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, &block[2][y*src_stride]);\
+ align = vec_lvsl(0, &block[2][y*src_stride]);\
+ tmp2 = vec_ld(15, &block[2][y*src_stride]);\
+\
+ b2 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, &block[1][y*src_stride]);\
+ align = vec_lvsl(0, &block[1][y*src_stride]);\
+ tmp2 = vec_ld(15, &block[1][y*src_stride]);\
+\
+ b1 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, &block[0][y*src_stride]);\
+ align = vec_lvsl(0, &block[0][y*src_stride]);\
+ tmp2 = vec_ld(15, &block[0][y*src_stride]);\
+\
+ b0 = vec_perm(tmp1,tmp2,align);
+
+#define LOAD_OBMCS \
+ tmp1 = vec_ld(0, obmc1);\
+ align = vec_lvsl(0, obmc1);\
+ tmp2 = vec_ld(15, obmc1);\
+\
+ ob1 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, obmc2);\
+ align = vec_lvsl(0, obmc2);\
+ tmp2 = vec_ld(15, obmc2);\
+\
+ ob2 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, obmc3);\
+ align = vec_lvsl(0, obmc3);\
+ tmp2 = vec_ld(15, obmc3);\
+\
+ ob3 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, obmc4);\
+ align = vec_lvsl(0, obmc4);\
+ tmp2 = vec_ld(15, obmc4);\
+\
+ ob4 = vec_perm(tmp1,tmp2,align);
+
+/* interleave logic
+ * h1 <- [ a,b,a,b, a,b,a,b, a,b,a,b, a,b,a,b ]
+ * h2 <- [ c,d,c,d, c,d,c,d, c,d,c,d, c,d,c,d ]
+ * h <- [ a,b,c,d, a,b,c,d, a,b,c,d, a,b,c,d ]
+ */
+
+#define STEPS_0_1\
+ h1 = (vector unsigned short)\
+ vec_mergeh(ob1, ob2);\
+\
+ h2 = (vector unsigned short)\
+ vec_mergeh(ob3, ob4);\
+\
+ ih = (vector unsigned char)\
+ vec_mergeh(h1,h2);\
+\
+ l1 = (vector unsigned short) vec_mergeh(b3, b2);\
+\
+ ih1 = (vector unsigned char) vec_mergel(h1, h2);\
+\
+ l2 = (vector unsigned short) vec_mergeh(b1, b0);\
+\
+ il = (vector unsigned char) vec_mergeh(l1, l2);\
+\
+ v[0] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
+\
+ il1 = (vector unsigned char) vec_mergel(l1, l2);\
+\
+ v[1] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
+
+#define FINAL_STEP_SCALAR\
+ for(x=0; x<b_w; x++)\
+ if(add){\
+ vbuf[x] += dst[x + src_x];\
+ vbuf[x] = (vbuf[x] + (1<<(FRAC_BITS-1))) >> FRAC_BITS;\
+ if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31);\
+ dst8[x + y*src_stride] = vbuf[x];\
+ }else{\
+ dst[x + src_x] -= vbuf[x];\
+ }
+
+static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc,
+ const int obmc_stride,
+ uint8_t * * block, int b_w,
+ int b_h, int src_x, int src_y,
+ int src_stride, slice_buffer * sb,
+ int add, uint8_t * dst8)
+{
+ int y, x;
+ DWTELEM * dst;
+ vector unsigned short h1, h2, l1, l2;
+ vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+ vector unsigned char b0,b1,b2,b3;
+ vector unsigned char ob1,ob2,ob3,ob4;
+
+ DECLARE_ALIGNED_16(int, vbuf[16]);
+ vector signed int *v = (vector signed int *)vbuf, *d;
+
+ for(y=0; y<b_h; y++){
+ //FIXME ugly missue of obmc_stride
+
+ uint8_t *obmc1= obmc + y*obmc_stride;
+ uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+ uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+ uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+ dst = slice_buffer_get_line(sb, src_y + y);
+ d = (vector signed int *)(dst + src_x);
+
+//FIXME i could avoid some loads!
+
+ // load blocks
+ LOAD_BLOCKS
+
+ // load obmcs
+ LOAD_OBMCS
+
+ // steps 0 1
+ STEPS_0_1
+
+ FINAL_STEP_SCALAR
+
+ }
+
+}
+
+#define STEPS_2_3\
+ h1 = (vector unsigned short) vec_mergel(ob1, ob2);\
+\
+ h2 = (vector unsigned short) vec_mergel(ob3, ob4);\
+\
+ ih = (vector unsigned char) vec_mergeh(h1,h2);\
+\
+ l1 = (vector unsigned short) vec_mergel(b3, b2);\
+\
+ l2 = (vector unsigned short) vec_mergel(b1, b0);\
+\
+ ih1 = (vector unsigned char) vec_mergel(h1,h2);\
+\
+ il = (vector unsigned char) vec_mergeh(l1,l2);\
+\
+ v[2] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
+\
+ il1 = (vector unsigned char) vec_mergel(l1,l2);\
+\
+ v[3] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
+
+
+static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc,
+ const int obmc_stride,
+ uint8_t * * block, int b_w,
+ int b_h, int src_x, int src_y,
+ int src_stride, slice_buffer * sb,
+ int add, uint8_t * dst8)
+{
+ int y, x;
+ DWTELEM * dst;
+ vector unsigned short h1, h2, l1, l2;
+ vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+ vector unsigned char b0,b1,b2,b3;
+ vector unsigned char ob1,ob2,ob3,ob4;
+ DECLARE_ALIGNED_16(int, vbuf[b_w]);
+ vector signed int *v = (vector signed int *)vbuf, *d;
+
+ for(y=0; y<b_h; y++){
+ //FIXME ugly missue of obmc_stride
+
+ uint8_t *obmc1= obmc + y*obmc_stride;
+ uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+ uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+ uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+ dst = slice_buffer_get_line(sb, src_y + y);
+ d = (vector signed int *)(dst + src_x);
+
+ // load blocks
+ LOAD_BLOCKS
+
+ // load obmcs
+ LOAD_OBMCS
+
+ // steps 0 1 2 3
+ STEPS_0_1
+
+ STEPS_2_3
+
+ FINAL_STEP_SCALAR
+
+ }
+}
+
+#define FINAL_STEP_VEC \
+\
+ if(add)\
+ {\
+ for(x=0; x<b_w/4; x++)\
+ {\
+ v[x] = vec_add(v[x], d[x]);\
+ v[x] = vec_sra(vec_add(v[x],\
+ vec_sl( vec_splat_s32(1),\
+ vec_splat_u32(7))),\
+ vec_splat_u32(8));\
+\
+ mask = (vector bool int) vec_sl((vector signed int)\
+ vec_cmpeq(v[x],v[x]),vec_splat_u32(8));\
+ mask = (vector bool int) vec_and(v[x],vec_nor(mask,mask));\
+\
+ mask = (vector bool int)\
+ vec_cmpeq((vector signed int)mask,\
+ (vector signed int)vec_splat_u32(0));\
+\
+ vs = vec_sra(v[x],vec_splat_u32(8));\
+ vs = vec_sra(v[x],vec_splat_u32(8));\
+ vs = vec_sra(v[x],vec_splat_u32(15));\
+\
+ vs = vec_nor(vs,vs);\
+\
+ v[x]= vec_sel(v[x],vs,mask);\
+ }\
+\
+ for(x=0; x<b_w; x++)\
+ dst8[x + y*src_stride] = vbuf[x];\
+\
+ }\
+ else\
+ for(x=0; x<b_w/4; x++)\
+ d[x] = vec_sub(d[x], v[x]);
+
+static void inner_add_yblock_a_bw_8_obmc_16_altivec(uint8_t *obmc,
+ const int obmc_stride,
+ uint8_t * * block, int b_w,
+ int b_h, int src_x, int src_y,
+ int src_stride, slice_buffer * sb,
+ int add, uint8_t * dst8)
+{
+ int y, x;
+ DWTELEM * dst;
+ vector bool int mask;
+ vector signed int vs;
+ vector unsigned short h1, h2, l1, l2;
+ vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+ vector unsigned char b0,b1,b2,b3;
+ vector unsigned char ob1,ob2,ob3,ob4;
+
+ DECLARE_ALIGNED_16(int, vbuf[16]);
+ vector signed int *v = (vector signed int *)vbuf, *d;
+
+ for(y=0; y<b_h; y++){
+ //FIXME ugly missue of obmc_stride
+
+ uint8_t *obmc1= obmc + y*obmc_stride;
+ uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+ uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+ uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+ dst = slice_buffer_get_line(sb, src_y + y);
+ d = (vector signed int *)(dst + src_x);
+
+//FIXME i could avoid some loads!
+
+ // load blocks
+ LOAD_BLOCKS
+
+ // load obmcs
+ LOAD_OBMCS
+
+ // steps 0 1
+ STEPS_0_1
+
+ FINAL_STEP_VEC
+
+ }
+
+}
+
+static void inner_add_yblock_a_bw_16_obmc_32_altivec(uint8_t *obmc,
+ const int obmc_stride,
+ uint8_t * * block, int b_w,
+ int b_h, int src_x, int src_y,
+ int src_stride, slice_buffer * sb,
+ int add, uint8_t * dst8)
+{
+ int y, x;
+ DWTELEM * dst;
+ vector bool int mask;
+ vector signed int vs;
+ vector unsigned short h1, h2, l1, l2;
+ vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+ vector unsigned char b0,b1,b2,b3;
+ vector unsigned char ob1,ob2,ob3,ob4;
+ DECLARE_ALIGNED_16(int, vbuf[b_w]);
+ vector signed int *v = (vector signed int *)vbuf, *d;
+
+ for(y=0; y<b_h; y++){
+ //FIXME ugly missue of obmc_stride
+
+ uint8_t *obmc1= obmc + y*obmc_stride;
+ uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+ uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+ uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+ dst = slice_buffer_get_line(sb, src_y + y);
+ d = (vector signed int *)(dst + src_x);
+
+ // load blocks
+ LOAD_BLOCKS
+
+ // load obmcs
+ LOAD_OBMCS
+
+ // steps 0 1 2 3
+ STEPS_0_1
+
+ STEPS_2_3
+
+ FINAL_STEP_VEC
+
+ }
+}
+
+
+void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
+ uint8_t * * block, int b_w, int b_h,
+ int src_x, int src_y, int src_stride,
+ slice_buffer * sb, int add,
+ uint8_t * dst8)
+{
+ if (src_x&15) {
+ if (b_w == 16)
+ inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
+ b_w, b_h, src_x, src_y,
+ src_stride, sb, add, dst8);
+ else if (b_w == 8)
+ inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
+ b_w, b_h, src_x, src_y,
+ src_stride, sb, add, dst8);
+ else
+ ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
+ src_y, src_stride, sb, add, dst8);
+ } else {
+ if (b_w == 16)
+ inner_add_yblock_a_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
+ b_w, b_h, src_x, src_y,
+ src_stride, sb, add, dst8);
+ else if (b_w == 8)
+ inner_add_yblock_a_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
+ b_w, b_h, src_x, src_y,
+ src_stride, sb, add, dst8);
+ else
+ ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
+ src_y, src_stride, sb, add, dst8);
+ }
+}
+
+
+void snow_init_altivec(DSPContext* c, AVCodecContext *avctx)
+{
+ c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
+ c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
+ c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;
+}
diff --git a/contrib/ffmpeg/libavcodec/ppc/types_altivec.h b/contrib/ffmpeg/libavcodec/ppc/types_altivec.h
new file mode 100644
index 000000000..f29026e04
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ppc/types_altivec.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2006 Guillaume Poirier <gpoirier@mplayerhq.hu>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/***********************************************************************
+ * Vector types
+ **********************************************************************/
+#define vec_u8_t vector unsigned char
+#define vec_s8_t vector signed char
+#define vec_u16_t vector unsigned short
+#define vec_s16_t vector signed short
+#define vec_u32_t vector unsigned int
+#define vec_s32_t vector signed int
+
+/***********************************************************************
+ * Null vector
+ **********************************************************************/
+#define LOAD_ZERO const vec_u8_t zerov = vec_splat_u8( 0 )
+
+#define zero_u8v (vec_u8_t) zerov
+#define zero_s8v (vec_s8_t) zerov
+#define zero_u16v (vec_u16_t) zerov
+#define zero_s16v (vec_s16_t) zerov
+#define zero_u32v (vec_u32_t) zerov
+#define zero_s32v (vec_s32_t) zerov
diff --git a/contrib/ffmpeg/libavcodec/ppc/vc1dsp_altivec.c b/contrib/ffmpeg/libavcodec/ppc/vc1dsp_altivec.c
new file mode 100644
index 000000000..114c9d41f
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ppc/vc1dsp_altivec.c
@@ -0,0 +1,338 @@
+/*
+ * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+
+#include "dsputil_altivec.h"
+
+// main steps of 8x8 transform
+#define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
+do { \
+ t0 = vec_sl(vec_add(s0, s4), vec_2); \
+ t0 = vec_add(vec_sl(t0, vec_1), t0); \
+ t0 = vec_add(t0, vec_rnd); \
+ t1 = vec_sl(vec_sub(s0, s4), vec_2); \
+ t1 = vec_add(vec_sl(t1, vec_1), t1); \
+ t1 = vec_add(t1, vec_rnd); \
+ t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \
+ t2 = vec_add(t2, vec_sl(s2, vec_4)); \
+ t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \
+ t3 = vec_sub(t3, vec_sl(s6, vec_4)); \
+ t4 = vec_add(t0, t2); \
+ t5 = vec_add(t1, t3); \
+ t6 = vec_sub(t1, t3); \
+ t7 = vec_sub(t0, t2); \
+\
+ t0 = vec_sl(vec_add(s1, s3), vec_4); \
+ t0 = vec_add(t0, vec_sl(s5, vec_3)); \
+ t0 = vec_add(t0, vec_sl(s7, vec_2)); \
+ t0 = vec_add(t0, vec_sub(s5, s3)); \
+\
+ t1 = vec_sl(vec_sub(s1, s5), vec_4); \
+ t1 = vec_sub(t1, vec_sl(s7, vec_3)); \
+ t1 = vec_sub(t1, vec_sl(s3, vec_2)); \
+ t1 = vec_sub(t1, vec_add(s1, s7)); \
+\
+ t2 = vec_sl(vec_sub(s7, s3), vec_4); \
+ t2 = vec_add(t2, vec_sl(s1, vec_3)); \
+ t2 = vec_add(t2, vec_sl(s5, vec_2)); \
+ t2 = vec_add(t2, vec_sub(s1, s7)); \
+\
+ t3 = vec_sl(vec_sub(s5, s7), vec_4); \
+ t3 = vec_sub(t3, vec_sl(s3, vec_3)); \
+ t3 = vec_add(t3, vec_sl(s1, vec_2)); \
+ t3 = vec_sub(t3, vec_add(s3, s5)); \
+\
+ s0 = vec_add(t4, t0); \
+ s1 = vec_add(t5, t1); \
+ s2 = vec_add(t6, t2); \
+ s3 = vec_add(t7, t3); \
+ s4 = vec_sub(t7, t3); \
+ s5 = vec_sub(t6, t2); \
+ s6 = vec_sub(t5, t1); \
+ s7 = vec_sub(t4, t0); \
+}while(0)
+
+#define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \
+do { \
+ s0 = vec_sra(s0, vec_3); \
+ s1 = vec_sra(s1, vec_3); \
+ s2 = vec_sra(s2, vec_3); \
+ s3 = vec_sra(s3, vec_3); \
+ s4 = vec_sra(s4, vec_3); \
+ s5 = vec_sra(s5, vec_3); \
+ s6 = vec_sra(s6, vec_3); \
+ s7 = vec_sra(s7, vec_3); \
+}while(0)
+
+#define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \
+do { \
+ s0 = vec_sra(s0, vec_7); \
+ s1 = vec_sra(s1, vec_7); \
+ s2 = vec_sra(s2, vec_7); \
+ s3 = vec_sra(s3, vec_7); \
+ s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \
+ s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \
+ s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \
+ s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \
+}while(0)
+
+/* main steps of 4x4 transform */
+#define STEP4(s0, s1, s2, s3, vec_rnd) \
+do { \
+ t1 = vec_add(vec_sl(s0, vec_4), s0); \
+ t1 = vec_add(t1, vec_rnd); \
+ t2 = vec_add(vec_sl(s2, vec_4), s2); \
+ t0 = vec_add(t1, t2); \
+ t1 = vec_sub(t1, t2); \
+ t3 = vec_sl(vec_sub(s3, s1), vec_1); \
+ t3 = vec_add(t3, vec_sl(t3, vec_2)); \
+ t2 = vec_add(t3, vec_sl(s1, vec_5)); \
+ t3 = vec_add(t3, vec_sl(s3, vec_3)); \
+ t3 = vec_add(t3, vec_sl(s3, vec_2)); \
+ s0 = vec_add(t0, t2); \
+ s1 = vec_sub(t1, t3); \
+ s2 = vec_add(t1, t3); \
+ s3 = vec_sub(t0, t2); \
+}while (0)
+
+#define SHIFT_HOR4(s0, s1, s2, s3) \
+ s0 = vec_sra(s0, vec_3); \
+ s1 = vec_sra(s1, vec_3); \
+ s2 = vec_sra(s2, vec_3); \
+ s3 = vec_sra(s3, vec_3);
+
+#define SHIFT_VERT4(s0, s1, s2, s3) \
+ s0 = vec_sra(s0, vec_7); \
+ s1 = vec_sra(s1, vec_7); \
+ s2 = vec_sra(s2, vec_7); \
+ s3 = vec_sra(s3, vec_7);
+
+/** Do inverse transform on 8x8 block
+*/
+static void vc1_inv_trans_8x8_altivec(DCTELEM block[64])
+{
+ vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
+ vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
+ vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
+ vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
+ const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
+ const vector unsigned int vec_7 = vec_splat_u32(7);
+ const vector unsigned int vec_5 = vec_splat_u32(5);
+ const vector unsigned int vec_4 = vec_splat_u32(4);
+ const vector signed int vec_4s = vec_splat_s32(4);
+ const vector unsigned int vec_3 = vec_splat_u32(3);
+ const vector unsigned int vec_2 = vec_splat_u32(2);
+ const vector signed int vec_1s = vec_splat_s32(1);
+ const vector unsigned int vec_1 = vec_splat_u32(1);
+
+
+ src0 = vec_ld( 0, block);
+ src1 = vec_ld( 16, block);
+ src2 = vec_ld( 32, block);
+ src3 = vec_ld( 48, block);
+ src4 = vec_ld( 64, block);
+ src5 = vec_ld( 80, block);
+ src6 = vec_ld( 96, block);
+ src7 = vec_ld(112, block);
+
+ TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
+ s0 = vec_unpackl(src0);
+ s1 = vec_unpackl(src1);
+ s2 = vec_unpackl(src2);
+ s3 = vec_unpackl(src3);
+ s4 = vec_unpackl(src4);
+ s5 = vec_unpackl(src5);
+ s6 = vec_unpackl(src6);
+ s7 = vec_unpackl(src7);
+ s8 = vec_unpackh(src0);
+ s9 = vec_unpackh(src1);
+ sA = vec_unpackh(src2);
+ sB = vec_unpackh(src3);
+ sC = vec_unpackh(src4);
+ sD = vec_unpackh(src5);
+ sE = vec_unpackh(src6);
+ sF = vec_unpackh(src7);
+ STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
+ SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
+ STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
+ SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
+ src0 = vec_pack(s8, s0);
+ src1 = vec_pack(s9, s1);
+ src2 = vec_pack(sA, s2);
+ src3 = vec_pack(sB, s3);
+ src4 = vec_pack(sC, s4);
+ src5 = vec_pack(sD, s5);
+ src6 = vec_pack(sE, s6);
+ src7 = vec_pack(sF, s7);
+ TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
+
+ s0 = vec_unpackl(src0);
+ s1 = vec_unpackl(src1);
+ s2 = vec_unpackl(src2);
+ s3 = vec_unpackl(src3);
+ s4 = vec_unpackl(src4);
+ s5 = vec_unpackl(src5);
+ s6 = vec_unpackl(src6);
+ s7 = vec_unpackl(src7);
+ s8 = vec_unpackh(src0);
+ s9 = vec_unpackh(src1);
+ sA = vec_unpackh(src2);
+ sB = vec_unpackh(src3);
+ sC = vec_unpackh(src4);
+ sD = vec_unpackh(src5);
+ sE = vec_unpackh(src6);
+ sF = vec_unpackh(src7);
+ STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
+ SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
+ STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
+ SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
+ src0 = vec_pack(s8, s0);
+ src1 = vec_pack(s9, s1);
+ src2 = vec_pack(sA, s2);
+ src3 = vec_pack(sB, s3);
+ src4 = vec_pack(sC, s4);
+ src5 = vec_pack(sD, s5);
+ src6 = vec_pack(sE, s6);
+ src7 = vec_pack(sF, s7);
+
+ vec_st(src0, 0, block);
+ vec_st(src1, 16, block);
+ vec_st(src2, 32, block);
+ vec_st(src3, 48, block);
+ vec_st(src4, 64, block);
+ vec_st(src5, 80, block);
+ vec_st(src6, 96, block);
+ vec_st(src7,112, block);
+}
+
+/** Do inverse transform on 8x4 part of block
+*/
+static void vc1_inv_trans_8x4_altivec(DCTELEM block[64], int n)
+{
+ vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
+ vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
+ vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
+ vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
+ const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
+ const vector unsigned int vec_7 = vec_splat_u32(7);
+ const vector unsigned int vec_5 = vec_splat_u32(5);
+ const vector unsigned int vec_4 = vec_splat_u32(4);
+ const vector signed int vec_4s = vec_splat_s32(4);
+ const vector unsigned int vec_3 = vec_splat_u32(3);
+ const vector unsigned int vec_2 = vec_splat_u32(2);
+ const vector unsigned int vec_1 = vec_splat_u32(1);
+
+ src0 = vec_ld( 0, block);
+ src1 = vec_ld( 16, block);
+ src2 = vec_ld( 32, block);
+ src3 = vec_ld( 48, block);
+ src4 = vec_ld( 64, block);
+ src5 = vec_ld( 80, block);
+ src6 = vec_ld( 96, block);
+ src7 = vec_ld(112, block);
+
+ TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
+ s0 = vec_unpackl(src0);
+ s1 = vec_unpackl(src1);
+ s2 = vec_unpackl(src2);
+ s3 = vec_unpackl(src3);
+ s4 = vec_unpackl(src4);
+ s5 = vec_unpackl(src5);
+ s6 = vec_unpackl(src6);
+ s7 = vec_unpackl(src7);
+ s8 = vec_unpackh(src0);
+ s9 = vec_unpackh(src1);
+ sA = vec_unpackh(src2);
+ sB = vec_unpackh(src3);
+ sC = vec_unpackh(src4);
+ sD = vec_unpackh(src5);
+ sE = vec_unpackh(src6);
+ sF = vec_unpackh(src7);
+ STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
+ SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
+ STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
+ SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
+ src0 = vec_pack(s8, s0);
+ src1 = vec_pack(s9, s1);
+ src2 = vec_pack(sA, s2);
+ src3 = vec_pack(sB, s3);
+ src4 = vec_pack(sC, s4);
+ src5 = vec_pack(sD, s5);
+ src6 = vec_pack(sE, s6);
+ src7 = vec_pack(sF, s7);
+ TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
+
+ if(!n){ // upper half of block
+ s0 = vec_unpackh(src0);
+ s1 = vec_unpackh(src1);
+ s2 = vec_unpackh(src2);
+ s3 = vec_unpackh(src3);
+ s8 = vec_unpackl(src0);
+ s9 = vec_unpackl(src1);
+ sA = vec_unpackl(src2);
+ sB = vec_unpackl(src3);
+ STEP4(s0, s1, s2, s3, vec_64);
+ SHIFT_VERT4(s0, s1, s2, s3);
+ STEP4(s8, s9, sA, sB, vec_64);
+ SHIFT_VERT4(s8, s9, sA, sB);
+ src0 = vec_pack(s0, s8);
+ src1 = vec_pack(s1, s9);
+ src2 = vec_pack(s2, sA);
+ src3 = vec_pack(s3, sB);
+
+ vec_st(src0, 0, block);
+ vec_st(src1, 16, block);
+ vec_st(src2, 32, block);
+ vec_st(src3, 48, block);
+ } else { //lower half of block
+ s0 = vec_unpackh(src4);
+ s1 = vec_unpackh(src5);
+ s2 = vec_unpackh(src6);
+ s3 = vec_unpackh(src7);
+ s8 = vec_unpackl(src4);
+ s9 = vec_unpackl(src5);
+ sA = vec_unpackl(src6);
+ sB = vec_unpackl(src7);
+ STEP4(s0, s1, s2, s3, vec_64);
+ SHIFT_VERT4(s0, s1, s2, s3);
+ STEP4(s8, s9, sA, sB, vec_64);
+ SHIFT_VERT4(s8, s9, sA, sB);
+ src4 = vec_pack(s0, s8);
+ src5 = vec_pack(s1, s9);
+ src6 = vec_pack(s2, sA);
+ src7 = vec_pack(s3, sB);
+
+ vec_st(src4, 64, block);
+ vec_st(src5, 80, block);
+ vec_st(src6, 96, block);
+ vec_st(src7,112, block);
+ }
+}
+
+
+void vc1dsp_init_altivec(DSPContext* dsp, AVCodecContext *avctx) {
+ dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec;
+ dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
+}
diff --git a/contrib/ffmpeg/libavcodec/ps2/dsputil_mmi.c b/contrib/ffmpeg/libavcodec/ps2/dsputil_mmi.c
new file mode 100644
index 000000000..0d72ae88c
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ps2/dsputil_mmi.c
@@ -0,0 +1,163 @@
+/*
+ * MMI optimized DSP utils
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * MMI optimization by Leon van Stuivenberg
+ * clear_blocks_mmi() by BroadQ
+ */
+
+#include "../dsputil.h"
+#include "mmi.h"
+
+void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block);
+void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block);
+void ff_mmi_idct(DCTELEM *block);
+
+static void clear_blocks_mmi(DCTELEM * blocks)
+{
+ asm volatile(
+ ".set noreorder \n"
+ "addiu $9, %0, 768 \n"
+ "nop \n"
+ "1: \n"
+ "sq $0, 0(%0) \n"
+ "move $8, %0 \n"
+ "addi %0, %0, 64 \n"
+ "sq $0, 16($8) \n"
+ "slt $10, %0, $9 \n"
+ "sq $0, 32($8) \n"
+ "bnez $10, 1b \n"
+ "sq $0, 48($8) \n"
+ ".set reorder \n"
+ : "+r" (blocks) :: "$8", "$9", "memory" );
+}
+
+
+static void get_pixels_mmi(DCTELEM *block, const uint8_t *pixels, int line_size)
+{
+ asm volatile(
+ ".set push \n\t"
+ ".set mips3 \n\t"
+ "ld $8, 0(%0) \n\t"
+ "add %0, %0, %2 \n\t"
+ "ld $9, 0(%0) \n\t"
+ "add %0, %0, %2 \n\t"
+ "ld $10, 0(%0) \n\t"
+ "pextlb $8, $0, $8 \n\t"
+ "sq $8, 0(%1) \n\t"
+ "add %0, %0, %2 \n\t"
+ "ld $8, 0(%0) \n\t"
+ "pextlb $9, $0, $9 \n\t"
+ "sq $9, 16(%1) \n\t"
+ "add %0, %0, %2 \n\t"
+ "ld $9, 0(%0) \n\t"
+ "pextlb $10, $0, $10 \n\t"
+ "sq $10, 32(%1) \n\t"
+ "add %0, %0, %2 \n\t"
+ "ld $10, 0(%0) \n\t"
+ "pextlb $8, $0, $8 \n\t"
+ "sq $8, 48(%1) \n\t"
+ "add %0, %0, %2 \n\t"
+ "ld $8, 0(%0) \n\t"
+ "pextlb $9, $0, $9 \n\t"
+ "sq $9, 64(%1) \n\t"
+ "add %0, %0, %2 \n\t"
+ "ld $9, 0(%0) \n\t"
+ "pextlb $10, $0, $10 \n\t"
+ "sq $10, 80(%1) \n\t"
+ "pextlb $8, $0, $8 \n\t"
+ "sq $8, 96(%1) \n\t"
+ "pextlb $9, $0, $9 \n\t"
+ "sq $9, 112(%1) \n\t"
+ ".set pop \n\t"
+ : "+r" (pixels) : "r" (block), "r" (line_size) : "$8", "$9", "$10", "memory" );
+}
+
+
+static void put_pixels8_mmi(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ asm volatile(
+ ".set push \n\t"
+ ".set mips3 \n\t"
+ "1: \n\t"
+ "ldr $8, 0(%1) \n\t"
+ "addiu %2, %2, -1 \n\t"
+ "ldl $8, 7(%1) \n\t"
+ "add %1, %1, %3 \n\t"
+ "sd $8, 0(%0) \n\t"
+ "add %0, %0, %3 \n\t"
+ "bgtz %2, 1b \n\t"
+ ".set pop \n\t"
+ : "+r" (block), "+r" (pixels), "+r" (h) : "r" (line_size)
+ : "$8", "memory" );
+}
+
+
+static void put_pixels16_mmi(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+ asm volatile (
+ ".set push \n\t"
+ ".set mips3 \n\t"
+ "1: \n\t"
+ "ldr $8, 0(%1) \n\t"
+ "add $11, %1, %3 \n\t"
+ "ldl $8, 7(%1) \n\t"
+ "add $10, %0, %3 \n\t"
+ "ldr $9, 8(%1) \n\t"
+ "ldl $9, 15(%1) \n\t"
+ "ldr $12, 0($11) \n\t"
+ "add %1, $11, %3 \n\t"
+ "ldl $12, 7($11) \n\t"
+ "pcpyld $8, $9, $8 \n\t"
+ "sq $8, 0(%0) \n\t"
+ "ldr $13, 8($11) \n\t"
+ "addiu %2, %2, -2 \n\t"
+ "ldl $13, 15($11) \n\t"
+ "add %0, $10, %3 \n\t"
+ "pcpyld $12, $13, $12 \n\t"
+ "sq $12, 0($10) \n\t"
+ "bgtz %2, 1b \n\t"
+ ".set pop \n\t"
+ : "+r" (block), "+r" (pixels), "+r" (h) : "r" (line_size)
+ : "$8", "$9", "$10", "$11", "$12", "$13", "memory" );
+}
+
+
+void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx)
+{
+ const int idct_algo= avctx->idct_algo;
+
+ c->clear_blocks = clear_blocks_mmi;
+
+ c->put_pixels_tab[1][0] = put_pixels8_mmi;
+ c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmi;
+
+ c->put_pixels_tab[0][0] = put_pixels16_mmi;
+ c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmi;
+
+ c->get_pixels = get_pixels_mmi;
+
+ if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_PS2){
+ c->idct_put= ff_mmi_idct_put;
+ c->idct_add= ff_mmi_idct_add;
+ c->idct = ff_mmi_idct;
+ c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
+ }
+}
+
diff --git a/contrib/ffmpeg/libavcodec/ps2/idct_mmi.c b/contrib/ffmpeg/libavcodec/ps2/idct_mmi.c
new file mode 100644
index 000000000..dfe9b3726
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ps2/idct_mmi.c
@@ -0,0 +1,363 @@
+/*
+ * Originally provided by Intel at Application Note AP-922.
+ *
+ * Column code adapted from Peter Gubanov.
+ * Copyright (c) 2000-2001 Peter Gubanov <peter@elecard.net.ru>
+ * http://www.elecard.com/peter/idct.shtml
+ * rounding trick copyright (c) 2000 Michel Lespinasse <walken@zoy.org>
+ *
+ * MMI port and (c) 2002 by Leon van Stuivenberg
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+*/
+
+#include "../common.h"
+#include "../dsputil.h"
+#include "mmi.h"
+
+#define BITS_INV_ACC 5 // 4 or 5 for IEEE
+#define SHIFT_INV_ROW (16 - BITS_INV_ACC)
+#define SHIFT_INV_COL (1 + BITS_INV_ACC)
+
+#define TG1 6518
+#define TG2 13573
+#define TG3 21895
+#define CS4 23170
+
+#define ROUNDER_0 0
+#define ROUNDER_1 16
+
+#define TAB_i_04 (32+0)
+#define TAB_i_17 (32+64)
+#define TAB_i_26 (32+128)
+#define TAB_i_35 (32+192)
+
+#define TG_1_16 (32+256+0)
+#define TG_2_16 (32+256+16)
+#define TG_3_16 (32+256+32)
+#define COS_4_16 (32+256+48)
+
+#define CLIPMAX (32+256+64+0)
+
+static short consttable[] align16 = {
+/* rounder 0*/ // assume SHIFT_INV_ROW == 11
+ 0x3ff, 1, 0x3ff, 1, 0x3ff, 1, 0x3ff, 1,
+/* rounder 1*/
+ 0x3ff, 0, 0x3ff, 0, 0x3ff, 0, 0x3ff, 0,
+/* row 0/4*/
+ 16384, 21407, -16384, -21407, 22725, 19266, -22725, -12873,
+ 8867, 16384, 8867, 16384, 4520, 12873, -4520, 19266,
+ 16384, -8867, 16384, -8867, 12873, -22725, 19266, -22725,
+ 21407, -16384, -21407, 16384, 19266, 4520, -12873, 4520,
+/* row 1/7*/
+ 22725, 29692, -22725, -29692, 31521, 26722, -31521, -17855,
+ 12299, 22725, 12299, 22725, 6270, 17855, -6270, 26722,
+ 22725, -12299, 22725, -12299, 17855, -31521, 26722, -31521,
+ 29692, -22725, -29692, 22725, 26722, 6270, -17855, 6270,
+/* row 2/6*/
+ 21407, 27969, -21407, -27969, 29692, 25172, -29692, -16819,
+ 11585, 21407, 11585, 21407, 5906, 16819, -5906, 25172,
+ 21407, -11585, 21407, -11585, 16819, -29692, 25172, -29692,
+ 27969, -21407, -27969, 21407, 25172, 5906, -16819, 5906,
+/*row 3/5*/
+ 19266, 25172, -19266, -25172, 26722, 22654, -26722, -15137,
+ 10426, 19266, 10426, 19266, 5315, 15137, -5315, 22654,
+ 19266, -10426, 19266, -10426, 15137, -26722, 22654, -26722,
+ 25172, -19266, -25172, 19266, 22654, 5315, -15137, 5315,
+/*column constants*/
+ TG1, TG1, TG1, TG1, TG1, TG1, TG1, TG1,
+ TG2, TG2, TG2, TG2, TG2, TG2, TG2, TG2,
+ TG3, TG3, TG3, TG3, TG3, TG3, TG3, TG3,
+ CS4, CS4, CS4, CS4, CS4, CS4, CS4, CS4,
+/* clamp */
+ 255, 255, 255, 255, 255, 255, 255, 255
+};
+
+
+#define DCT_8_INV_ROW1(blk, rowoff, taboff, rnd, outreg) { \
+ lq(blk, rowoff, $16); /* r16 = x7 x5 x3 x1 x6 x4 x2 x0 */ \
+ /*slot*/ \
+ lq($24, 0+taboff, $17); /* r17 = w */ \
+ /*delay slot $16*/ \
+ lq($24, 16+taboff, $18);/* r18 = w */ \
+ prevh($16, $2); /* r2 = x1 x3 x5 x7 x0 x2 x4 x6 */ \
+ lq($24, 32+taboff, $19);/* r19 = w */ \
+ phmadh($17, $16, $17); /* r17 = b1"b0'a1"a0' */ \
+ lq($24, 48+taboff, $20);/* r20 = w */ \
+ phmadh($18, $2, $18); /* r18 = b1'b0"a1'a0" */ \
+ phmadh($19, $16, $19); /* r19 = b3"b2'a3"a2' */ \
+ phmadh($20, $2, $20); /* r20 = b3'b2"a3'a2" */ \
+ paddw($17, $18, $17); /* r17 = (b1)(b0)(a1)(a0) */ \
+ paddw($19, $20, $19); /* r19 = (b3)(b2)(a3)(a2) */ \
+ pcpyld($19, $17, $18); /* r18 = (a3)(a2)(a1)(a0) */ \
+ pcpyud($17, $19, $20); /* r20 = (b3)(b2)(b1)(b0) */ \
+ paddw($18, rnd, $18); /* r18 = (a3)(a2)(a1)(a0) */\
+ paddw($18, $20, $17); /* r17 = ()()()(a0+b0) */ \
+ psubw($18, $20, $20); /* r20 = ()()()(a0-b0) */ \
+ psraw($17, SHIFT_INV_ROW, $17); /* r17 = (y3 y2 y1 y0) */ \
+ psraw($20, SHIFT_INV_ROW, $20); /* r20 = (y4 y5 y6 y7) */ \
+ ppach($20, $17, outreg);/* out = y4 y5 y6 y7 y3 y2 y1 y0 Note order */ \
+\
+ prevh(outreg, $2); \
+ pcpyud($2, $2, $2); \
+ pcpyld($2, outreg, outreg); \
+}
+
+
+#define DCT_8_INV_COL8() \
+\
+ lq($24, TG_3_16, $2); /* r2 = tn3 */ \
+\
+ pmulth($11, $2, $17); /* r17 = x3 * tn3 (6420) */ \
+ psraw($17, 15, $17); \
+ pmfhl_uw($3); /* r3 = 7531 */ \
+ psraw($3, 15, $3); \
+ pinteh($3, $17, $17); /* r17 = x3 * tn3 */ \
+ psubh($17, $13, $17); /* r17 = tm35 */ \
+\
+ pmulth($13, $2, $18); /* r18 = x5 * tn3 (6420) */ \
+ psraw($18, 15, $18); \
+ pmfhl_uw($3); /* r3 = 7531 */ \
+ psraw($3, 15, $3); \
+ pinteh($3, $18, $18); /* r18 = x5 * tn3 */ \
+ paddh($18, $11, $18); /* r18 = tp35 */ \
+\
+ lq($24, TG_1_16, $2); /* r2 = tn1 */ \
+\
+ pmulth($15, $2, $19); /* r19 = x7 * tn1 (6420) */ \
+ psraw($19, 15, $19); \
+ pmfhl_uw($3); /* r3 = 7531 */ \
+ psraw($3, 15, $3); \
+ pinteh($3, $19, $19); /* r19 = x7 * tn1 */ \
+ paddh($19, $9, $19); /* r19 = tp17 */ \
+\
+ pmulth($9, $2, $20); /* r20 = x1 * tn1 (6420) */ \
+ psraw($20, 15, $20); \
+ pmfhl_uw($3); /* r3 = 7531 */ \
+ psraw($3, 15, $3); \
+ pinteh($3, $20, $20); /* r20 = x1 * tn1 */ \
+ psubh($20, $15, $20); /* r20 = tm17 */ \
+\
+ psubh($19, $18, $3); /* r3 = t1 */ \
+ paddh($20, $17, $16); /* r16 = t2 */ \
+ psubh($20, $17, $23); /* r23 = b3 */ \
+ paddh($19, $18, $20); /* r20 = b0 */ \
+\
+ lq($24, COS_4_16, $2); /* r2 = cs4 */ \
+\
+ paddh($3, $16, $21); /* r21 = t1+t2 */ \
+ psubh($3, $16, $22); /* r22 = t1-t2 */ \
+\
+ pmulth($21, $2, $21); /* r21 = cs4 * (t1+t2) 6420 */ \
+ psraw($21, 15, $21); \
+ pmfhl_uw($3); /* r3 = 7531 */ \
+ psraw($3, 15, $3); \
+ pinteh($3, $21, $21); /* r21 = b1 */ \
+\
+ pmulth($22, $2, $22); /* r22 = cs4 * (t1-t2) 6420 */ \
+ psraw($22, 15, $22); \
+ pmfhl_uw($3); /* r3 = 7531 */ \
+ psraw($3, 15, $3); \
+ pinteh($3, $22, $22); /* r22 = b2 */ \
+\
+ lq($24, TG_2_16, $2); /* r2 = tn2 */ \
+\
+ pmulth($10, $2, $17); /* r17 = x2 * tn2 (6420) */ \
+ psraw($17, 15, $17); \
+ pmfhl_uw($3); /* r3 = 7531 */ \
+ psraw($3, 15, $3); \
+ pinteh($3, $17, $17); /* r17 = x3 * tn3 */ \
+ psubh($17, $14, $17); /* r17 = tm26 */ \
+\
+ pmulth($14, $2, $18); /* r18 = x6 * tn2 (6420) */ \
+ psraw($18, 15, $18); \
+ pmfhl_uw($3); /* r3 = 7531 */ \
+ psraw($3, 15, $3); \
+ pinteh($3, $18, $18); /* r18 = x6 * tn2 */ \
+ paddh($18, $10, $18); /* r18 = tp26 */ \
+\
+ paddh($8, $12, $2); /* r2 = tp04 */ \
+ psubh($8, $12, $3); /* r3 = tm04 */ \
+\
+ paddh($2, $18, $16); /* r16 = a0 */ \
+ psubh($2, $18, $19); /* r19 = a3 */ \
+ psubh($3, $17, $18); /* r18 = a2 */ \
+ paddh($3, $17, $17); /* r17 = a1 */
+
+
+#define DCT_8_INV_COL8_STORE(blk) \
+\
+ paddh($16, $20, $2); /* y0 a0+b0 */ \
+ psubh($16, $20, $16); /* y7 a0-b0 */ \
+ psrah($2, SHIFT_INV_COL, $2); \
+ psrah($16, SHIFT_INV_COL, $16); \
+ sq($2, 0, blk); \
+ sq($16, 112, blk); \
+\
+ paddh($17, $21, $3); /* y1 a1+b1 */ \
+ psubh($17, $21, $17); /* y6 a1-b1 */ \
+ psrah($3, SHIFT_INV_COL, $3); \
+ psrah($17, SHIFT_INV_COL, $17); \
+ sq($3, 16, blk); \
+ sq($17, 96, blk); \
+\
+ paddh($18, $22, $2); /* y2 a2+b2 */ \
+ psubh($18, $22, $18); /* y5 a2-b2 */ \
+ psrah($2, SHIFT_INV_COL, $2); \
+ psrah($18, SHIFT_INV_COL, $18); \
+ sq($2, 32, blk); \
+ sq($18, 80, blk); \
+\
+ paddh($19, $23, $3); /* y3 a3+b3 */ \
+ psubh($19, $23, $19); /* y4 a3-b3 */ \
+ psrah($3, SHIFT_INV_COL, $3); \
+ psrah($19, SHIFT_INV_COL, $19); \
+ sq($3, 48, blk); \
+ sq($19, 64, blk);
+
+
+
+#define DCT_8_INV_COL8_PMS() \
+ paddh($16, $20, $2); /* y0 a0+b0 */ \
+ psubh($16, $20, $20); /* y7 a0-b0 */ \
+ psrah($2, SHIFT_INV_COL, $16); \
+ psrah($20, SHIFT_INV_COL, $20); \
+\
+ paddh($17, $21, $3); /* y1 a1+b1 */ \
+ psubh($17, $21, $21); /* y6 a1-b1 */ \
+ psrah($3, SHIFT_INV_COL, $17); \
+ psrah($21, SHIFT_INV_COL, $21); \
+\
+ paddh($18, $22, $2); /* y2 a2+b2 */ \
+ psubh($18, $22, $22); /* y5 a2-b2 */ \
+ psrah($2, SHIFT_INV_COL, $18); \
+ psrah($22, SHIFT_INV_COL, $22); \
+\
+ paddh($19, $23, $3); /* y3 a3+b3 */ \
+ psubh($19, $23, $23); /* y4 a3-b3 */ \
+ psrah($3, SHIFT_INV_COL, $19); \
+ psrah($23, SHIFT_INV_COL, $23);
+
+#define PUT(rs) \
+ pminh(rs, $11, $2); \
+ pmaxh($2, $0, $2); \
+ ppacb($0, $2, $2); \
+ sd3(2, 0, 4); \
+ __asm__ __volatile__ ("add $4, $5, $4");
+
+#define DCT_8_INV_COL8_PUT() \
+ PUT($16); \
+ PUT($17); \
+ PUT($18); \
+ PUT($19); \
+ PUT($23); \
+ PUT($22); \
+ PUT($21); \
+ PUT($20);
+
+#define ADD(rs) \
+ ld3(4, 0, 2); \
+ pextlb($0, $2, $2); \
+ paddh($2, rs, $2); \
+ pminh($2, $11, $2); \
+ pmaxh($2, $0, $2); \
+ ppacb($0, $2, $2); \
+ sd3(2, 0, 4); \
+ __asm__ __volatile__ ("add $4, $5, $4");
+
+/*fixme: schedule*/
+#define DCT_8_INV_COL8_ADD() \
+ ADD($16); \
+ ADD($17); \
+ ADD($18); \
+ ADD($19); \
+ ADD($23); \
+ ADD($22); \
+ ADD($21); \
+ ADD($20);
+
+
+void ff_mmi_idct(int16_t * block)
+{
+ /* $4 = block */
+ __asm__ __volatile__("la $24, %0"::"m"(consttable[0]));
+ lq($24, ROUNDER_0, $8);
+ lq($24, ROUNDER_1, $7);
+ DCT_8_INV_ROW1($4, 0, TAB_i_04, $8, $8);
+ DCT_8_INV_ROW1($4, 16, TAB_i_17, $7, $9);
+ DCT_8_INV_ROW1($4, 32, TAB_i_26, $7, $10);
+ DCT_8_INV_ROW1($4, 48, TAB_i_35, $7, $11);
+ DCT_8_INV_ROW1($4, 64, TAB_i_04, $7, $12);
+ DCT_8_INV_ROW1($4, 80, TAB_i_35, $7, $13);
+ DCT_8_INV_ROW1($4, 96, TAB_i_26, $7, $14);
+ DCT_8_INV_ROW1($4, 112, TAB_i_17, $7, $15);
+ DCT_8_INV_COL8();
+ DCT_8_INV_COL8_STORE($4);
+
+ //let savedtemp regs be saved
+ __asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
+}
+
+
+void ff_mmi_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ /* $4 = dest, $5 = line_size, $6 = block */
+ __asm__ __volatile__("la $24, %0"::"m"(consttable[0]));
+ lq($24, ROUNDER_0, $8);
+ lq($24, ROUNDER_1, $7);
+ DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
+ DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9);
+ DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10);
+ DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11);
+ DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12);
+ DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13);
+ DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14);
+ DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15);
+ DCT_8_INV_COL8();
+ lq($24, CLIPMAX, $11);
+ DCT_8_INV_COL8_PMS();
+ DCT_8_INV_COL8_PUT();
+
+ //let savedtemp regs be saved
+ __asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
+}
+
+
+void ff_mmi_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ /* $4 = dest, $5 = line_size, $6 = block */
+ __asm__ __volatile__("la $24, %0"::"m"(consttable[0]));
+ lq($24, ROUNDER_0, $8);
+ lq($24, ROUNDER_1, $7);
+ DCT_8_INV_ROW1($6, 0, TAB_i_04, $8, $8);
+ DCT_8_INV_ROW1($6, 16, TAB_i_17, $7, $9);
+ DCT_8_INV_ROW1($6, 32, TAB_i_26, $7, $10);
+ DCT_8_INV_ROW1($6, 48, TAB_i_35, $7, $11);
+ DCT_8_INV_ROW1($6, 64, TAB_i_04, $7, $12);
+ DCT_8_INV_ROW1($6, 80, TAB_i_35, $7, $13);
+ DCT_8_INV_ROW1($6, 96, TAB_i_26, $7, $14);
+ DCT_8_INV_ROW1($6, 112, TAB_i_17, $7, $15);
+ DCT_8_INV_COL8();
+ lq($24, CLIPMAX, $11);
+ DCT_8_INV_COL8_PMS();
+ DCT_8_INV_COL8_ADD();
+
+ //let savedtemp regs be saved
+ __asm__ __volatile__(" ":::"$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23");
+}
+
diff --git a/contrib/ffmpeg/libavcodec/ps2/mmi.h b/contrib/ffmpeg/libavcodec/ps2/mmi.h
new file mode 100644
index 000000000..e2e49a86c
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ps2/mmi.h
@@ -0,0 +1,172 @@
+/*
+ * copyright (c) 2002 Leon van Stuivenberg
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __mmi_H
+#define __mmi_H
+
+#define align16 __attribute__ ((aligned (16)))
+
+/*
+#define r0 $zero
+#define r1 $at //assembler!
+#define r2 $v0 //return
+#define r3 $v1 //return
+#define r4 $a0 //arg
+#define r5 $a1 //arg
+#define r6 $a2 //arg
+#define r7 $a3 //arg
+#define r8 $t0 //temp
+#define r9 $t1 //temp
+#define r10 $t2 //temp
+#define r11 $t3 //temp
+#define r12 $t4 //temp
+#define r13 $t5 //temp
+#define r14 $t6 //temp
+#define r15 $t7 //temp
+#define r16 $s0 //saved temp
+#define r17 $s1 //saved temp
+#define r18 $s2 //saved temp
+#define r19 $s3 //saved temp
+#define r20 $s4 //saved temp
+#define r21 $s5 //saved temp
+#define r22 $s6 //saved temp
+#define r23 $s7 //saved temp
+#define r24 $t8 //temp
+#define r25 $t9 //temp
+#define r26 $k0 //kernel
+#define r27 $k1 //kernel
+#define r28 $gp //global ptr
+#define r29 $sp //stack ptr
+#define r30 $fp //frame ptr
+#define r31 $ra //return addr
+*/
+
+
+#define lq(base, off, reg) \
+ __asm__ __volatile__ ("lq " #reg ", %0("#base ")" : : "i" (off) )
+
+#define lq2(mem, reg) \
+ __asm__ __volatile__ ("lq " #reg ", %0" : : "r" (mem))
+
+#define sq(reg, off, base) \
+ __asm__ __volatile__ ("sq " #reg ", %0("#base ")" : : "i" (off) )
+
+/*
+#define ld(base, off, reg) \
+ __asm__ __volatile__ ("ld " #reg ", " #off "("#base ")")
+*/
+
+#define ld3(base, off, reg) \
+ __asm__ __volatile__ (".word %0" : : "i" ( 0xdc000000 | (base<<21) | (reg<<16) | (off)))
+
+#define ldr3(base, off, reg) \
+ __asm__ __volatile__ (".word %0" : : "i" ( 0x6c000000 | (base<<21) | (reg<<16) | (off)))
+
+#define ldl3(base, off, reg) \
+ __asm__ __volatile__ (".word %0" : : "i" ( 0x68000000 | (base<<21) | (reg<<16) | (off)))
+
+/*
+#define sd(reg, off, base) \
+ __asm__ __volatile__ ("sd " #reg ", " #off "("#base ")")
+*/
+//seems assembler has bug encoding mnemonic 'sd', so DIY
+#define sd3(reg, off, base) \
+ __asm__ __volatile__ (".word %0" : : "i" ( 0xfc000000 | (base<<21) | (reg<<16) | (off)))
+
+#define sw(reg, off, base) \
+ __asm__ __volatile__ ("sw " #reg ", " #off "("#base ")")
+
+#define sq2(reg, mem) \
+ __asm__ __volatile__ ("sq " #reg ", %0" : : "m" (*(mem)))
+
+#define pinth(rs, rt, rd) \
+ __asm__ __volatile__ ("pinth " #rd ", " #rs ", " #rt )
+
+#define phmadh(rs, rt, rd) \
+ __asm__ __volatile__ ("phmadh " #rd ", " #rs ", " #rt )
+
+#define pcpyud(rs, rt, rd) \
+ __asm__ __volatile__ ("pcpyud " #rd ", " #rs ", " #rt )
+
+#define pcpyld(rs, rt, rd) \
+ __asm__ __volatile__ ("pcpyld " #rd ", " #rs ", " #rt )
+
+#define pcpyh(rt, rd) \
+ __asm__ __volatile__ ("pcpyh " #rd ", " #rt )
+
+#define paddw(rs, rt, rd) \
+ __asm__ __volatile__ ("paddw " #rd ", " #rs ", " #rt )
+
+#define pextlw(rs, rt, rd) \
+ __asm__ __volatile__ ("pextlw " #rd ", " #rs ", " #rt )
+
+#define pextuw(rs, rt, rd) \
+ __asm__ __volatile__ ("pextuw " #rd ", " #rs ", " #rt )
+
+#define pextlh(rs, rt, rd) \
+ __asm__ __volatile__ ("pextlh " #rd ", " #rs ", " #rt )
+
+#define pextuh(rs, rt, rd) \
+ __asm__ __volatile__ ("pextuh " #rd ", " #rs ", " #rt )
+
+#define psubw(rs, rt, rd) \
+ __asm__ __volatile__ ("psubw " #rd ", " #rs ", " #rt )
+
+#define psraw(rt, sa, rd) \
+ __asm__ __volatile__ ("psraw " #rd ", " #rt ", %0" : : "i"(sa) )
+
+#define ppach(rs, rt, rd) \
+ __asm__ __volatile__ ("ppach " #rd ", " #rs ", " #rt )
+
+#define ppacb(rs, rt, rd) \
+ __asm__ __volatile__ ("ppacb " #rd ", " #rs ", " #rt )
+
+#define prevh(rt, rd) \
+ __asm__ __volatile__ ("prevh " #rd ", " #rt )
+
+#define pmulth(rs, rt, rd) \
+ __asm__ __volatile__ ("pmulth " #rd ", " #rs ", " #rt )
+
+#define pmaxh(rs, rt, rd) \
+ __asm__ __volatile__ ("pmaxh " #rd ", " #rs ", " #rt )
+
+#define pminh(rs, rt, rd) \
+ __asm__ __volatile__ ("pminh " #rd ", " #rs ", " #rt )
+
+#define pinteh(rs, rt, rd) \
+ __asm__ __volatile__ ("pinteh " #rd ", " #rs ", " #rt )
+
+#define paddh(rs, rt, rd) \
+ __asm__ __volatile__ ("paddh " #rd ", " #rs ", " #rt )
+
+#define psubh(rs, rt, rd) \
+ __asm__ __volatile__ ("psubh " #rd ", " #rs ", " #rt )
+
+#define psrah(rt, sa, rd) \
+ __asm__ __volatile__ ("psrah " #rd ", " #rt ", %0" : : "i"(sa) )
+
+#define pmfhl_uw(rd) \
+ __asm__ __volatile__ ("pmfhl.uw " #rd)
+
+#define pextlb(rs, rt, rd) \
+ __asm__ __volatile__ ("pextlb " #rd ", " #rs ", " #rt )
+
+#endif
+
diff --git a/contrib/ffmpeg/libavcodec/ps2/mpegvideo_mmi.c b/contrib/ffmpeg/libavcodec/ps2/mpegvideo_mmi.c
new file mode 100644
index 000000000..1e5f08aae
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ps2/mpegvideo_mmi.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2000,2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * MMI optimization by Leon van Stuivenberg
+ */
+
+#include "../dsputil.h"
+#include "../mpegvideo.h"
+#include "../avcodec.h"
+
+static void dct_unquantize_h263_mmi(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int level=0, qmul, qadd;
+ int nCoeffs;
+
+ assert(s->block_last_index[n]>=0);
+
+ qadd = (qscale - 1) | 1;
+ qmul = qscale << 1;
+
+ if (s->mb_intra) {
+ if (!s->h263_aic) {
+ if (n < 4)
+ level = block[0] * s->y_dc_scale;
+ else
+ level = block[0] * s->c_dc_scale;
+ }else {
+ qadd = 0;
+ level = block[0];
+ }
+ nCoeffs= 63; //does not allways use zigzag table
+ } else {
+ nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
+ }
+
+ asm volatile(
+ "add $14, $0, %3 \n\t"
+ "pcpyld $8, %0, %0 \n\t"
+ "pcpyh $8, $8 \n\t" //r8 = qmul
+ "pcpyld $9, %1, %1 \n\t"
+ "pcpyh $9, $9 \n\t" //r9 = qadd
+ ".p2align 2 \n\t"
+ "1: \n\t"
+ "lq $10, 0($14) \n\t" //r10 = level
+ "addi $14, $14, 16 \n\t" //block+=8
+ "addi %2, %2, -8 \n\t"
+ "pcgth $11, $0, $10 \n\t" //r11 = level < 0 ? -1 : 0
+ "pcgth $12, $10, $0 \n\t" //r12 = level > 0 ? -1 : 0
+ "por $12, $11, $12 \n\t"
+ "pmulth $10, $10, $8 \n\t"
+ "paddh $13, $9, $11 \n\t"
+ "pxor $13, $13, $11 \n\t" //r13 = level < 0 ? -qadd : qadd
+ "pmfhl.uw $11 \n\t"
+ "pinteh $10, $11, $10 \n\t" //r10 = level * qmul
+ "paddh $10, $10, $13 \n\t"
+ "pand $10, $10, $12 \n\t"
+ "sq $10, -16($14) \n\t"
+ "bgez %2, 1b \n\t"
+ :: "r"(qmul), "r" (qadd), "r" (nCoeffs), "r" (block) : "$8", "$9", "$10", "$11", "$12", "$13", "$14", "memory" );
+
+ if(s->mb_intra)
+ block[0]= level;
+}
+
+
+void MPV_common_init_mmi(MpegEncContext *s)
+{
+ s->dct_unquantize_h263_intra =
+ s->dct_unquantize_h263_inter = dct_unquantize_h263_mmi;
+}
+
+
diff --git a/contrib/ffmpeg/libavcodec/pthread.c b/contrib/ffmpeg/libavcodec/pthread.c
new file mode 100644
index 000000000..4737211cb
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/pthread.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2004 Roman Shaposhnik.
+ *
+ * Many thanks to Steven M. Schultz for providing clever ideas and
+ * to Michael Niedermayer <michaelni@gmx.at> for writing initial
+ * implementation.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#include <pthread.h>
+
+#include "avcodec.h"
+#include "common.h"
+
+typedef int (action_t)(AVCodecContext *c, void *arg);
+
+typedef struct ThreadContext {
+ pthread_t *workers;
+ action_t *func;
+ void **args;
+ int *rets;
+ int rets_count;
+ int job_count;
+
+ pthread_cond_t last_job_cond;
+ pthread_cond_t current_job_cond;
+ pthread_mutex_t current_job_lock;
+ int current_job;
+ int done;
+} ThreadContext;
+
+static void* worker(void *v)
+{
+ AVCodecContext *avctx = v;
+ ThreadContext *c = avctx->thread_opaque;
+ int our_job = c->job_count;
+ int thread_count = avctx->thread_count;
+ int self_id;
+
+ pthread_mutex_lock(&c->current_job_lock);
+ self_id = c->current_job++;
+ for (;;){
+ while (our_job >= c->job_count) {
+ if (c->current_job == thread_count + c->job_count)
+ pthread_cond_signal(&c->last_job_cond);
+
+ pthread_cond_wait(&c->current_job_cond, &c->current_job_lock);
+ our_job = self_id;
+
+ if (c->done) {
+ pthread_mutex_unlock(&c->current_job_lock);
+ return NULL;
+ }
+ }
+ pthread_mutex_unlock(&c->current_job_lock);
+
+ c->rets[our_job%c->rets_count] = c->func(avctx, c->args[our_job]);
+
+ pthread_mutex_lock(&c->current_job_lock);
+ our_job = c->current_job++;
+ }
+}
+
+static always_inline void avcodec_thread_park_workers(ThreadContext *c, int thread_count)
+{
+ pthread_cond_wait(&c->last_job_cond, &c->current_job_lock);
+ pthread_mutex_unlock(&c->current_job_lock);
+}
+
+void avcodec_thread_free(AVCodecContext *avctx)
+{
+ ThreadContext *c = avctx->thread_opaque;
+ int i;
+
+ pthread_mutex_lock(&c->current_job_lock);
+ c->done = 1;
+ pthread_cond_broadcast(&c->current_job_cond);
+ pthread_mutex_unlock(&c->current_job_lock);
+
+ for (i=0; i<avctx->thread_count; i++)
+ pthread_join(c->workers[i], NULL);
+
+ pthread_mutex_destroy(&c->current_job_lock);
+ pthread_cond_destroy(&c->current_job_cond);
+ pthread_cond_destroy(&c->last_job_cond);
+ av_free(c->workers);
+ av_free(c);
+}
+
+int avcodec_thread_execute(AVCodecContext *avctx, action_t* func, void **arg, int *ret, int job_count)
+{
+ ThreadContext *c= avctx->thread_opaque;
+ int dummy_ret;
+
+ if (job_count <= 0)
+ return 0;
+
+ pthread_mutex_lock(&c->current_job_lock);
+
+ c->current_job = avctx->thread_count;
+ c->job_count = job_count;
+ c->args = arg;
+ c->func = func;
+ if (ret) {
+ c->rets = ret;
+ c->rets_count = job_count;
+ } else {
+ c->rets = &dummy_ret;
+ c->rets_count = 1;
+ }
+ pthread_cond_broadcast(&c->current_job_cond);
+
+ avcodec_thread_park_workers(c, avctx->thread_count);
+
+ return 0;
+}
+
+int avcodec_thread_init(AVCodecContext *avctx, int thread_count)
+{
+ int i;
+ ThreadContext *c;
+
+ c = av_mallocz(sizeof(ThreadContext));
+ if (!c)
+ return -1;
+
+ c->workers = av_mallocz(sizeof(pthread_t)*thread_count);
+ if (!c->workers) {
+ av_free(c);
+ return -1;
+ }
+
+ avctx->thread_opaque = c;
+ avctx->thread_count = thread_count;
+ c->current_job = 0;
+ c->job_count = 0;
+ c->done = 0;
+ pthread_cond_init(&c->current_job_cond, NULL);
+ pthread_cond_init(&c->last_job_cond, NULL);
+ pthread_mutex_init(&c->current_job_lock, NULL);
+ pthread_mutex_lock(&c->current_job_lock);
+ for (i=0; i<thread_count; i++) {
+ if(pthread_create(&c->workers[i], NULL, worker, avctx)) {
+ avctx->thread_count = i;
+ pthread_mutex_unlock(&c->current_job_lock);
+ avcodec_thread_free(avctx);
+ return -1;
+ }
+ }
+
+ avcodec_thread_park_workers(c, thread_count);
+
+ avctx->execute = avcodec_thread_execute;
+ return 0;
+}
diff --git a/src/libffmpeg/libavcodec/qdm2.c b/contrib/ffmpeg/libavcodec/qdm2.c
index 81d548386..b9462f3cb 100644
--- a/src/libffmpeg/libavcodec/qdm2.c
+++ b/contrib/ffmpeg/libavcodec/qdm2.c
@@ -5,18 +5,20 @@
* Copyright (c) 2005 Alex Beregszaszi
* Copyright (c) 2005 Roberto Togni
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/qdm2data.h b/contrib/ffmpeg/libavcodec/qdm2data.h
index dafd4f490..6d7d07463 100644
--- a/src/libffmpeg/libavcodec/qdm2data.h
+++ b/contrib/ffmpeg/libavcodec/qdm2data.h
@@ -5,18 +5,20 @@
* Copyright (c) 2005 Alex Beregszaszi
* Copyright (c) 2005 Roberto Togni
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/qdrw.c b/contrib/ffmpeg/libavcodec/qdrw.c
index 846365917..8ebb43c4a 100644
--- a/src/libffmpeg/libavcodec/qdrw.c
+++ b/contrib/ffmpeg/libavcodec/qdrw.c
@@ -2,18 +2,20 @@
* QuickDraw (qdrw) codec
* Copyright (c) 2004 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -136,7 +138,7 @@ static int decode_frame(AVCodecContext *avctx,
static int decode_init(AVCodecContext *avctx){
// QdrawContext * const a = avctx->priv_data;
- if (avcodec_check_dimensions(avctx, avctx->height, avctx->width) < 0) {
+ if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) {
return 1;
}
diff --git a/src/libffmpeg/libavcodec/qpeg.c b/contrib/ffmpeg/libavcodec/qpeg.c
index f7323a871..3c597e8df 100644
--- a/src/libffmpeg/libavcodec/qpeg.c
+++ b/contrib/ffmpeg/libavcodec/qpeg.c
@@ -2,18 +2,20 @@
* QPEG codec
* Copyright (c) 2004 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/qtrle.c b/contrib/ffmpeg/libavcodec/qtrle.c
index 0db003146..d4b314d03 100644
--- a/src/libffmpeg/libavcodec/qtrle.c
+++ b/contrib/ffmpeg/libavcodec/qtrle.c
@@ -2,18 +2,20 @@
* Quicktime Animation (RLE) Video Decoder
* Copyright (C) 2004 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/ra144.c b/contrib/ffmpeg/libavcodec/ra144.c
index 059236dfe..c4f4b813b 100644
--- a/src/libffmpeg/libavcodec/ra144.c
+++ b/contrib/ffmpeg/libavcodec/ra144.c
@@ -2,18 +2,20 @@
* Real Audio 1.0 (14.4K)
* Copyright (c) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/ra144.h b/contrib/ffmpeg/libavcodec/ra144.h
index 4ce2df867..6d477b2f8 100644
--- a/src/libffmpeg/libavcodec/ra144.h
+++ b/contrib/ffmpeg/libavcodec/ra144.h
@@ -2,18 +2,20 @@
* Real Audio 1.0 (14.4K)
* Copyright (c) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/ra288.c b/contrib/ffmpeg/libavcodec/ra288.c
index e2425974e..9ba5209ab 100644
--- a/src/libffmpeg/libavcodec/ra288.c
+++ b/contrib/ffmpeg/libavcodec/ra288.c
@@ -2,18 +2,20 @@
* RealAudio 2.0 (28.8K)
* Copyright (c) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/ra288.h b/contrib/ffmpeg/libavcodec/ra288.h
index 0d67d52bb..8cc290397 100644
--- a/src/libffmpeg/libavcodec/ra288.h
+++ b/contrib/ffmpeg/libavcodec/ra288.h
@@ -2,18 +2,20 @@
* RealAudio 2.0 (28.8K)
* Copyright (c) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/rangecoder.c b/contrib/ffmpeg/libavcodec/rangecoder.c
index 4266cf1b3..1f35d0852 100644
--- a/src/libffmpeg/libavcodec/rangecoder.c
+++ b/contrib/ffmpeg/libavcodec/rangecoder.c
@@ -2,18 +2,20 @@
* Range coder
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/rangecoder.h b/contrib/ffmpeg/libavcodec/rangecoder.h
index 0f56fad59..68bd3b60e 100644
--- a/src/libffmpeg/libavcodec/rangecoder.h
+++ b/contrib/ffmpeg/libavcodec/rangecoder.h
@@ -2,18 +2,20 @@
* Range coder
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/ratecontrol.c b/contrib/ffmpeg/libavcodec/ratecontrol.c
index f4f433add..d96c837e6 100644
--- a/src/libffmpeg/libavcodec/ratecontrol.c
+++ b/contrib/ffmpeg/libavcodec/ratecontrol.c
@@ -3,18 +3,20 @@
*
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -25,7 +27,9 @@
#include "avcodec.h"
#include "dsputil.h"
+#include "ratecontrol.h"
#include "mpegvideo.h"
+#include "eval.h"
#undef NDEBUG // allways check asserts, the speed effect is far too small to disable them
#include <assert.h>
@@ -44,12 +48,70 @@ void ff_write_pass1_stats(MpegEncContext *s){
s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits);
}
+static inline double qp2bits(RateControlEntry *rce, double qp){
+ if(qp<=0.0){
+ av_log(NULL, AV_LOG_ERROR, "qp<=0.0\n");
+ }
+ return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ qp;
+}
+
+static inline double bits2qp(RateControlEntry *rce, double bits){
+ if(bits<0.9){
+ av_log(NULL, AV_LOG_ERROR, "bits<0.9\n");
+ }
+ return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ bits;
+}
+
int ff_rate_control_init(MpegEncContext *s)
{
RateControlContext *rcc= &s->rc_context;
int i;
+ char *error = NULL;
+ static const char *const_names[]={
+ "PI",
+ "E",
+ "iTex",
+ "pTex",
+ "tex",
+ "mv",
+ "fCode",
+ "iCount",
+ "mcVar",
+ "var",
+ "isI",
+ "isP",
+ "isB",
+ "avgQP",
+ "qComp",
+/* "lastIQP",
+ "lastPQP",
+ "lastBQP",
+ "nextNonBQP",*/
+ "avgIITex",
+ "avgPITex",
+ "avgPPTex",
+ "avgBPTex",
+ "avgTex",
+ NULL
+ };
+ static double (*func1[])(void *, double)={
+ (void *)bits2qp,
+ (void *)qp2bits,
+ NULL
+ };
+ static const char *func1_names[]={
+ "bits2qp",
+ "qp2bits",
+ NULL
+ };
emms_c();
+ rcc->rc_eq_eval = ff_parse(s->avctx->rc_eq, const_names, func1, func1_names, NULL, NULL, &error);
+ if (!rcc->rc_eq_eval) {
+ av_log(s->avctx, AV_LOG_ERROR, "Error parsing rc_eq \"%s\": %s\n", s->avctx->rc_eq, error? error : "");
+ return -1;
+ }
+
for(i=0; i<5; i++){
rcc->pred[i].coeff= FF_QP2LAMBDA * 7.0;
rcc->pred[i].count= 1.0;
@@ -191,6 +253,7 @@ void ff_rate_control_uninit(MpegEncContext *s)
RateControlContext *rcc= &s->rc_context;
emms_c();
+ ff_eval_free(rcc->rc_eq_eval);
av_freep(&rcc->entry);
#ifdef CONFIG_XVID
@@ -199,20 +262,6 @@ void ff_rate_control_uninit(MpegEncContext *s)
#endif
}
-static inline double qp2bits(RateControlEntry *rce, double qp){
- if(qp<=0.0){
- av_log(NULL, AV_LOG_ERROR, "qp<=0.0\n");
- }
- return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ qp;
-}
-
-static inline double bits2qp(RateControlEntry *rce, double bits){
- if(bits<0.9){
- av_log(NULL, AV_LOG_ERROR, "bits<0.9\n");
- }
- return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ bits;
-}
-
int ff_vbv_update(MpegEncContext *s, int frame_size){
RateControlContext *rcc= &s->rc_context;
const double fps= 1/av_q2d(s->avctx->time_base);
@@ -287,45 +336,12 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
(rcc->i_cplx_sum[pict_type] + rcc->p_cplx_sum[pict_type]) / (double)rcc->frame_count[pict_type],
0
};
- static const char *const_names[]={
- "PI",
- "E",
- "iTex",
- "pTex",
- "tex",
- "mv",
- "fCode",
- "iCount",
- "mcVar",
- "var",
- "isI",
- "isP",
- "isB",
- "avgQP",
- "qComp",
-/* "lastIQP",
- "lastPQP",
- "lastBQP",
- "nextNonBQP",*/
- "avgIITex",
- "avgPITex",
- "avgPPTex",
- "avgBPTex",
- "avgTex",
- NULL
- };
- static double (*func1[])(void *, double)={
- (void *)bits2qp,
- (void *)qp2bits,
- NULL
- };
- static const char *func1_names[]={
- "bits2qp",
- "qp2bits",
- NULL
- };
- bits= ff_eval(s->avctx->rc_eq, const_values, const_names, func1, func1_names, NULL, NULL, rce);
+ bits= ff_parse_eval(rcc->rc_eq_eval, const_values, rce);
+ if (isnan(bits)) {
+ av_log(s->avctx, AV_LOG_ERROR, "Error evaluating rc_eq \"%s\"\n", s->avctx->rc_eq);
+ return -1;
+ }
rcc->pass1_rc_eq_output_sum+= bits;
bits*=rate_factor;
@@ -363,7 +379,7 @@ static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl
const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type];
if (pict_type==I_TYPE && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==P_TYPE))
- q= last_p_q *ABS(a->i_quant_factor) + a->i_quant_offset;
+ q= last_p_q *FFABS(a->i_quant_factor) + a->i_quant_offset;
else if(pict_type==B_TYPE && a->b_quant_factor>0.0)
q= last_non_b_q* a->b_quant_factor + a->b_quant_offset;
@@ -394,11 +410,11 @@ static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pic
assert(qmin <= qmax);
if(pict_type==B_TYPE){
- qmin= (int)(qmin*ABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
- qmax= (int)(qmax*ABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
+ qmin= (int)(qmin*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
+ qmax= (int)(qmax*FFABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5);
}else if(pict_type==I_TYPE){
- qmin= (int)(qmin*ABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
- qmax= (int)(qmax*ABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
+ qmin= (int)(qmin*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
+ qmax= (int)(qmax*FFABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5);
}
qmin= clip(qmin, 1, FF_LAMBDA_MAX);
@@ -726,6 +742,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
rate_factor= rcc->pass1_wanted_bits/rcc->pass1_rc_eq_output_sum * br_compensation;
q= get_qscale(s, rce, rate_factor, picture_number);
+ if (q < 0)
+ return -1;
assert(q>0.0);
//printf("%f ", q);
@@ -790,12 +808,10 @@ static int init_pass2(MpegEncContext *s)
{
RateControlContext *rcc= &s->rc_context;
AVCodecContext *a= s->avctx;
- int i;
+ int i, toobig;
double fps= 1/av_q2d(s->avctx->time_base);
double complexity[5]={0,0,0,0,0}; // aproximate bits at quant=1
- double avg_quantizer[5];
uint64_t const_bits[5]={0,0,0,0,0}; // quantizer idependant bits
- uint64_t available_bits[5];
uint64_t all_const_bits;
uint64_t all_available_bits= (uint64_t)(s->bit_rate*(double)rcc->num_entries/fps);
double rate_factor=0;
@@ -803,7 +819,7 @@ static int init_pass2(MpegEncContext *s)
//int last_i_frame=-10000000;
const int filter_size= (int)(a->qblur*4) | 1;
double expected_bits;
- double *qscale, *blured_qscale;
+ double *qscale, *blured_qscale, qscale_sum;
/* find complexity & const_bits & decide the pict_types */
for(i=0; i<rcc->num_entries; i++){
@@ -821,37 +837,13 @@ static int init_pass2(MpegEncContext *s)
all_const_bits= const_bits[I_TYPE] + const_bits[P_TYPE] + const_bits[B_TYPE];
if(all_available_bits < all_const_bits){
- av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is to low\n");
+ av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n");
return -1;
}
- /* find average quantizers */
- avg_quantizer[P_TYPE]=0;
- for(step=256*256; step>0.0000001; step*=0.5){
- double expected_bits=0;
- avg_quantizer[P_TYPE]+= step;
-
- avg_quantizer[I_TYPE]= avg_quantizer[P_TYPE]*ABS(s->avctx->i_quant_factor) + s->avctx->i_quant_offset;
- avg_quantizer[B_TYPE]= avg_quantizer[P_TYPE]*ABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
-
- expected_bits=
- + all_const_bits
- + complexity[I_TYPE]/avg_quantizer[I_TYPE]
- + complexity[P_TYPE]/avg_quantizer[P_TYPE]
- + complexity[B_TYPE]/avg_quantizer[B_TYPE];
-
- if(expected_bits < all_available_bits) avg_quantizer[P_TYPE]-= step;
-//printf("%f %lld %f\n", expected_bits, all_available_bits, avg_quantizer[P_TYPE]);
- }
-//printf("qp_i:%f, qp_p:%f, qp_b:%f\n", avg_quantizer[I_TYPE],avg_quantizer[P_TYPE],avg_quantizer[B_TYPE]);
-
- for(i=0; i<5; i++){
- available_bits[i]= const_bits[i] + complexity[i]/avg_quantizer[i];
- }
-//printf("%lld %lld %lld %lld\n", available_bits[I_TYPE], available_bits[P_TYPE], available_bits[B_TYPE], all_available_bits);
-
qscale= av_malloc(sizeof(double)*rcc->num_entries);
blured_qscale= av_malloc(sizeof(double)*rcc->num_entries);
+ toobig = 0;
for(step=256*256; step>0.0000001; step*=0.5){
expected_bits=0;
@@ -905,14 +897,46 @@ static int init_pass2(MpegEncContext *s)
expected_bits += bits;
}
-// printf("%f %d %f\n", expected_bits, (int)all_available_bits, rate_factor);
- if(expected_bits > all_available_bits) rate_factor-= step;
+ /*
+ av_log(s->avctx, AV_LOG_INFO,
+ "expected_bits: %f all_available_bits: %d rate_factor: %f\n",
+ expected_bits, (int)all_available_bits, rate_factor);
+ */
+ if(expected_bits > all_available_bits) {
+ rate_factor-= step;
+ ++toobig;
+ }
}
av_free(qscale);
av_free(blured_qscale);
- if(fabs(expected_bits/all_available_bits - 1.0) > 0.01 ){
- av_log(s->avctx, AV_LOG_ERROR, "Error: 2pass curve failed to converge\n");
+ /* check bitrate calculations and print info */
+ qscale_sum = 0.0;
+ for(i=0; i<rcc->num_entries; i++){
+ /* av_log(s->avctx, AV_LOG_DEBUG, "[lavc rc] entry[%d].new_qscale = %.3f qp = %.3f\n",
+ i, rcc->entry[i].new_qscale, rcc->entry[i].new_qscale / FF_QP2LAMBDA); */
+ qscale_sum += clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA, s->avctx->qmin, s->avctx->qmax);
+ }
+ assert(toobig <= 40);
+ av_log(s->avctx, AV_LOG_DEBUG,
+ "[lavc rc] requested bitrate: %d bps expected bitrate: %d bps\n",
+ s->bit_rate,
+ (int)(expected_bits / ((double)all_available_bits/s->bit_rate)));
+ av_log(s->avctx, AV_LOG_DEBUG,
+ "[lavc rc] estimated target average qp: %.3f\n",
+ (float)qscale_sum / rcc->num_entries);
+ if (toobig == 0) {
+ av_log(s->avctx, AV_LOG_INFO,
+ "[lavc rc] Using all of requested bitrate is not "
+ "necessary for this video with these parameters.\n");
+ } else if (toobig == 40) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "[lavc rc] Error: bitrate too low for this video "
+ "with these parameters.\n");
+ return -1;
+ } else if (fabs(expected_bits/all_available_bits - 1.0) > 0.01) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "[lavc rc] Error: 2pass curve failed to converge\n");
return -1;
}
diff --git a/contrib/ffmpeg/libavcodec/ratecontrol.h b/contrib/ffmpeg/libavcodec/ratecontrol.h
new file mode 100644
index 000000000..c428923a5
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/ratecontrol.h
@@ -0,0 +1,103 @@
+/*
+ * Ratecontrol
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ * Copyright (c) 2002-2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_RATECONTROL_H
+#define AVCODEC_RATECONTROL_H
+
+/**
+ * @file ratecontrol.h
+ * ratecontrol header.
+ */
+
+#include "eval.h"
+
+typedef struct Predictor{
+ double coeff;
+ double count;
+ double decay;
+} Predictor;
+
+typedef struct RateControlEntry{
+ int pict_type;
+ float qscale;
+ int mv_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int misc_bits;
+ int header_bits;
+ uint64_t expected_bits;
+ int new_pict_type;
+ float new_qscale;
+ int mc_mb_var_sum;
+ int mb_var_sum;
+ int i_count;
+ int skip_count;
+ int f_code;
+ int b_code;
+}RateControlEntry;
+
+/**
+ * rate control context.
+ */
+typedef struct RateControlContext{
+ FILE *stats_file;
+ int num_entries; ///< number of RateControlEntries
+ RateControlEntry *entry;
+ double buffer_index; ///< amount of bits in the video/audio buffer
+ Predictor pred[5];
+ double short_term_qsum; ///< sum of recent qscales
+ double short_term_qcount; ///< count of recent qscales
+ double pass1_rc_eq_output_sum;///< sum of the output of the rc equation, this is used for normalization
+ double pass1_wanted_bits; ///< bits which should have been outputed by the pass1 code (including complexity init)
+ double last_qscale;
+ double last_qscale_for[5]; ///< last qscale for a specific pict type, used for max_diff & ipb factor stuff
+ int last_mc_mb_var_sum;
+ int last_mb_var_sum;
+ uint64_t i_cplx_sum[5];
+ uint64_t p_cplx_sum[5];
+ uint64_t mv_bits_sum[5];
+ uint64_t qscale_sum[5];
+ int frame_count[5];
+ int last_non_b_pict_type;
+
+ void *non_lavc_opaque; ///< context for non lavc rc code (for example xvid)
+ float dry_run_qscale; ///< for xvid rc
+ int last_picture_number; ///< for xvid rc
+ AVEvalExpr * rc_eq_eval;
+}RateControlContext;
+
+struct MpegEncContext;
+
+/* rate control */
+int ff_rate_control_init(struct MpegEncContext *s);
+float ff_rate_estimate_qscale(struct MpegEncContext *s, int dry_run);
+void ff_write_pass1_stats(struct MpegEncContext *s);
+void ff_rate_control_uninit(struct MpegEncContext *s);
+int ff_vbv_update(struct MpegEncContext *s, int frame_size);
+void ff_get_2pass_fcode(struct MpegEncContext *s);
+
+int ff_xvid_rate_control_init(struct MpegEncContext *s);
+void ff_xvid_rate_control_uninit(struct MpegEncContext *s);
+float ff_xvid_rate_estimate_qscale(struct MpegEncContext *s, int dry_run);
+
+#endif /* AVCODEC_RATECONTROL_H */
+
diff --git a/src/libffmpeg/libavcodec/raw.c b/contrib/ffmpeg/libavcodec/raw.c
index e777397fe..f4fddf73c 100644
--- a/src/libffmpeg/libavcodec/raw.c
+++ b/contrib/ffmpeg/libavcodec/raw.c
@@ -2,18 +2,20 @@
* Raw Video Codec
* Copyright (c) 2001 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -35,9 +37,10 @@ typedef struct PixelFormatTag {
unsigned int fourcc;
} PixelFormatTag;
-const PixelFormatTag pixelFormatTags[] = {
+static const PixelFormatTag pixelFormatTags[] = {
{ PIX_FMT_YUV420P, MKTAG('I', '4', '2', '0') }, /* Planar formats */
{ PIX_FMT_YUV420P, MKTAG('I', 'Y', 'U', 'V') },
+ { PIX_FMT_YUV420P, MKTAG('Y', 'V', '1', '2') },
{ PIX_FMT_YUV410P, MKTAG('Y', 'U', 'V', '9') },
{ PIX_FMT_YUV411P, MKTAG('Y', '4', '1', 'B') },
{ PIX_FMT_YUV422P, MKTAG('Y', '4', '2', 'B') },
@@ -49,6 +52,10 @@ const PixelFormatTag pixelFormatTags[] = {
{ PIX_FMT_YUV422, MKTAG('Y', '4', '2', '2') },
{ PIX_FMT_UYVY422, MKTAG('U', 'Y', 'V', 'Y') },
{ PIX_FMT_GRAY8, MKTAG('G', 'R', 'E', 'Y') },
+ { PIX_FMT_RGB555, MKTAG('R', 'G', 'B', 15) },
+ { PIX_FMT_BGR555, MKTAG('B', 'G', 'R', 15) },
+ { PIX_FMT_RGB565, MKTAG('R', 'G', 'B', 16) },
+ { PIX_FMT_BGR565, MKTAG('B', 'G', 'R', 16) },
/* quicktime */
{ PIX_FMT_UYVY422, MKTAG('2', 'v', 'u', 'y') },
@@ -90,7 +97,7 @@ static int raw_init_decoder(AVCodecContext *avctx)
switch(avctx->bits_per_sample){
case 8: avctx->pix_fmt= PIX_FMT_PAL8 ; break;
case 15: avctx->pix_fmt= PIX_FMT_RGB555; break;
- case 16: avctx->pix_fmt= PIX_FMT_RGB565; break;
+ case 16: avctx->pix_fmt= PIX_FMT_RGB555; break;
case 24: avctx->pix_fmt= PIX_FMT_BGR24 ; break;
case 32: avctx->pix_fmt= PIX_FMT_RGBA32; break;
}
@@ -141,6 +148,15 @@ static int raw_decode(AVCodecContext *avctx,
}
flip(avctx, picture);
+
+ if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2'))
+ {
+ // swap fields
+ unsigned char *tmp = picture->data[1];
+ picture->data[1] = picture->data[2];
+ picture->data[2] = tmp;
+ }
+
*data_size = sizeof(AVPicture);
return buf_size;
}
@@ -154,7 +170,7 @@ static int raw_close_decoder(AVCodecContext *avctx)
}
/* RAW Encoder Implementation */
-
+#ifdef CONFIG_RAWVIDEO_ENCODER
static int raw_init_encoder(AVCodecContext *avctx)
{
avctx->coded_frame = (AVFrame *)avctx->priv_data;
@@ -172,7 +188,6 @@ static int raw_encode(AVCodecContext *avctx,
avctx->height, frame, buf_size);
}
-#ifdef CONFIG_RAWVIDEO_ENCODER
AVCodec rawvideo_encoder = {
"rawvideo",
CODEC_TYPE_VIDEO,
diff --git a/contrib/ffmpeg/libavcodec/resample.c b/contrib/ffmpeg/libavcodec/resample.c
new file mode 100644
index 000000000..043e812c8
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/resample.c
@@ -0,0 +1,249 @@
+/*
+ * Sample rate convertion for both audio and video
+ * Copyright (c) 2000 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file resample.c
+ * Sample rate convertion for both audio and video.
+ */
+
+#include "avcodec.h"
+
+struct AVResampleContext;
+
+struct ReSampleContext {
+ struct AVResampleContext *resample_context;
+ short *temp[2];
+ int temp_len;
+ float ratio;
+ /* channel convert */
+ int input_channels, output_channels, filter_channels;
+};
+
+/* n1: number of samples */
+static void stereo_to_mono(short *output, short *input, int n1)
+{
+ short *p, *q;
+ int n = n1;
+
+ p = input;
+ q = output;
+ while (n >= 4) {
+ q[0] = (p[0] + p[1]) >> 1;
+ q[1] = (p[2] + p[3]) >> 1;
+ q[2] = (p[4] + p[5]) >> 1;
+ q[3] = (p[6] + p[7]) >> 1;
+ q += 4;
+ p += 8;
+ n -= 4;
+ }
+ while (n > 0) {
+ q[0] = (p[0] + p[1]) >> 1;
+ q++;
+ p += 2;
+ n--;
+ }
+}
+
+/* n1: number of samples */
+static void mono_to_stereo(short *output, short *input, int n1)
+{
+ short *p, *q;
+ int n = n1;
+ int v;
+
+ p = input;
+ q = output;
+ while (n >= 4) {
+ v = p[0]; q[0] = v; q[1] = v;
+ v = p[1]; q[2] = v; q[3] = v;
+ v = p[2]; q[4] = v; q[5] = v;
+ v = p[3]; q[6] = v; q[7] = v;
+ q += 8;
+ p += 4;
+ n -= 4;
+ }
+ while (n > 0) {
+ v = p[0]; q[0] = v; q[1] = v;
+ q += 2;
+ p += 1;
+ n--;
+ }
+}
+
+/* XXX: should use more abstract 'N' channels system */
+static void stereo_split(short *output1, short *output2, short *input, int n)
+{
+ int i;
+
+ for(i=0;i<n;i++) {
+ *output1++ = *input++;
+ *output2++ = *input++;
+ }
+}
+
+static void stereo_mux(short *output, short *input1, short *input2, int n)
+{
+ int i;
+
+ for(i=0;i<n;i++) {
+ *output++ = *input1++;
+ *output++ = *input2++;
+ }
+}
+
+static void ac3_5p1_mux(short *output, short *input1, short *input2, int n)
+{
+ int i;
+ short l,r;
+
+ for(i=0;i<n;i++) {
+ l=*input1++;
+ r=*input2++;
+ *output++ = l; /* left */
+ *output++ = (l/2)+(r/2); /* center */
+ *output++ = r; /* right */
+ *output++ = 0; /* left surround */
+ *output++ = 0; /* right surroud */
+ *output++ = 0; /* low freq */
+ }
+}
+
+ReSampleContext *audio_resample_init(int output_channels, int input_channels,
+ int output_rate, int input_rate)
+{
+ ReSampleContext *s;
+
+ if ( input_channels > 2)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Resampling with input channels greater than 2 unsupported.");
+ return NULL;
+ }
+
+ s = av_mallocz(sizeof(ReSampleContext));
+ if (!s)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for resample context.");
+ return NULL;
+ }
+
+ s->ratio = (float)output_rate / (float)input_rate;
+
+ s->input_channels = input_channels;
+ s->output_channels = output_channels;
+
+ s->filter_channels = s->input_channels;
+ if (s->output_channels < s->filter_channels)
+ s->filter_channels = s->output_channels;
+
+/*
+ * ac3 output is the only case where filter_channels could be greater than 2.
+ * input channels can't be greater than 2, so resample the 2 channels and then
+ * expand to 6 channels after the resampling.
+ */
+ if(s->filter_channels>2)
+ s->filter_channels = 2;
+
+ s->resample_context= av_resample_init(output_rate, input_rate, 16, 10, 0, 1.0);
+
+ return s;
+}
+
+/* resample audio. 'nb_samples' is the number of input samples */
+/* XXX: optimize it ! */
+int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples)
+{
+ int i, nb_samples1;
+ short *bufin[2];
+ short *bufout[2];
+ short *buftmp2[2], *buftmp3[2];
+ int lenout;
+
+ if (s->input_channels == s->output_channels && s->ratio == 1.0 && 0) {
+ /* nothing to do */
+ memcpy(output, input, nb_samples * s->input_channels * sizeof(short));
+ return nb_samples;
+ }
+
+ /* XXX: move those malloc to resample init code */
+ for(i=0; i<s->filter_channels; i++){
+ bufin[i]= (short*) av_malloc( (nb_samples + s->temp_len) * sizeof(short) );
+ memcpy(bufin[i], s->temp[i], s->temp_len * sizeof(short));
+ buftmp2[i] = bufin[i] + s->temp_len;
+ }
+
+ /* make some zoom to avoid round pb */
+ lenout= (int)(nb_samples * s->ratio) + 16;
+ bufout[0]= (short*) av_malloc( lenout * sizeof(short) );
+ bufout[1]= (short*) av_malloc( lenout * sizeof(short) );
+
+ if (s->input_channels == 2 &&
+ s->output_channels == 1) {
+ buftmp3[0] = output;
+ stereo_to_mono(buftmp2[0], input, nb_samples);
+ } else if (s->output_channels >= 2 && s->input_channels == 1) {
+ buftmp3[0] = bufout[0];
+ memcpy(buftmp2[0], input, nb_samples*sizeof(short));
+ } else if (s->output_channels >= 2) {
+ buftmp3[0] = bufout[0];
+ buftmp3[1] = bufout[1];
+ stereo_split(buftmp2[0], buftmp2[1], input, nb_samples);
+ } else {
+ buftmp3[0] = output;
+ memcpy(buftmp2[0], input, nb_samples*sizeof(short));
+ }
+
+ nb_samples += s->temp_len;
+
+ /* resample each channel */
+ nb_samples1 = 0; /* avoid warning */
+ for(i=0;i<s->filter_channels;i++) {
+ int consumed;
+ int is_last= i+1 == s->filter_channels;
+
+ nb_samples1 = av_resample(s->resample_context, buftmp3[i], bufin[i], &consumed, nb_samples, lenout, is_last);
+ s->temp_len= nb_samples - consumed;
+ s->temp[i]= av_realloc(s->temp[i], s->temp_len*sizeof(short));
+ memcpy(s->temp[i], bufin[i] + consumed, s->temp_len*sizeof(short));
+ }
+
+ if (s->output_channels == 2 && s->input_channels == 1) {
+ mono_to_stereo(output, buftmp3[0], nb_samples1);
+ } else if (s->output_channels == 2) {
+ stereo_mux(output, buftmp3[0], buftmp3[1], nb_samples1);
+ } else if (s->output_channels == 6) {
+ ac3_5p1_mux(output, buftmp3[0], buftmp3[1], nb_samples1);
+ }
+
+ for(i=0; i<s->filter_channels; i++)
+ av_free(bufin[i]);
+
+ av_free(bufout[0]);
+ av_free(bufout[1]);
+ return nb_samples1;
+}
+
+void audio_resample_close(ReSampleContext *s)
+{
+ av_resample_close(s->resample_context);
+ av_freep(&s->temp[0]);
+ av_freep(&s->temp[1]);
+ av_free(s);
+}
diff --git a/src/libffmpeg/libavcodec/resample2.c b/contrib/ffmpeg/libavcodec/resample2.c
index 11da57651..3ae0ba855 100644
--- a/src/libffmpeg/libavcodec/resample2.c
+++ b/contrib/ffmpeg/libavcodec/resample2.c
@@ -2,18 +2,20 @@
* audio resampling
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -213,7 +215,7 @@ int av_resample(AVResampleContext *c, short *dst, short *src, int *consumed, int
if(sample_index < 0){
for(i=0; i<c->filter_length; i++)
- val += src[ABS(sample_index + i) % src_size] * filter[i];
+ val += src[FFABS(sample_index + i) % src_size] * filter[i];
}else if(sample_index + c->filter_length > src_size){
break;
}else if(c->linear){
diff --git a/src/libffmpeg/libavcodec/roqvideo.c b/contrib/ffmpeg/libavcodec/roqvideo.c
index 462a4cf72..4595b047c 100644
--- a/src/libffmpeg/libavcodec/roqvideo.c
+++ b/contrib/ffmpeg/libavcodec/roqvideo.c
@@ -1,18 +1,20 @@
/*
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/rpza.c b/contrib/ffmpeg/libavcodec/rpza.c
index 8c0766273..9a996da37 100644
--- a/src/libffmpeg/libavcodec/rpza.c
+++ b/contrib/ffmpeg/libavcodec/rpza.c
@@ -2,18 +2,20 @@
* Quicktime Video (RPZA) Video Decoder
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/rtjpeg.c b/contrib/ffmpeg/libavcodec/rtjpeg.c
index ebf10886b..dbc6cfd88 100644
--- a/src/libffmpeg/libavcodec/rtjpeg.c
+++ b/contrib/ffmpeg/libavcodec/rtjpeg.c
@@ -2,18 +2,20 @@
* RTJpeg decoding functions
* Copyright (c) 2006 Reimar Doeffinger
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "common.h"
@@ -96,31 +98,32 @@ static inline int get_block(GetBitContext *gb, DCTELEM *block, uint8_t *scan,
*/
int rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f,
uint8_t *buf, int buf_size) {
+ DECLARE_ALIGNED_16(DCTELEM, block[64]);
GetBitContext gb;
int w = c->w / 16, h = c->h / 16;
int x, y;
- void *y1 = f->data[0], *y2 = f->data[0] + 8 * f->linesize[0];
- void *u = f->data[1], *v = f->data[2];
+ uint8_t *y1 = f->data[0], *y2 = f->data[0] + 8 * f->linesize[0];
+ uint8_t *u = f->data[1], *v = f->data[2];
init_get_bits(&gb, buf, buf_size * 8);
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
- if (get_block(&gb, c->block, c->scan, c->lquant))
- c->dsp->idct_put(y1, f->linesize[0], c->block);
+ if (get_block(&gb, block, c->scan, c->lquant))
+ c->dsp->idct_put(y1, f->linesize[0], block);
y1 += 8;
- if (get_block(&gb, c->block, c->scan, c->lquant))
- c->dsp->idct_put(y1, f->linesize[0], c->block);
+ if (get_block(&gb, block, c->scan, c->lquant))
+ c->dsp->idct_put(y1, f->linesize[0], block);
y1 += 8;
- if (get_block(&gb, c->block, c->scan, c->lquant))
- c->dsp->idct_put(y2, f->linesize[0], c->block);
+ if (get_block(&gb, block, c->scan, c->lquant))
+ c->dsp->idct_put(y2, f->linesize[0], block);
y2 += 8;
- if (get_block(&gb, c->block, c->scan, c->lquant))
- c->dsp->idct_put(y2, f->linesize[0], c->block);
+ if (get_block(&gb, block, c->scan, c->lquant))
+ c->dsp->idct_put(y2, f->linesize[0], block);
y2 += 8;
- if (get_block(&gb, c->block, c->scan, c->cquant))
- c->dsp->idct_put(u, f->linesize[1], c->block);
+ if (get_block(&gb, block, c->scan, c->cquant))
+ c->dsp->idct_put(u, f->linesize[1], block);
u += 8;
- if (get_block(&gb, c->block, c->scan, c->cquant))
- c->dsp->idct_put(v, f->linesize[2], c->block);
+ if (get_block(&gb, block, c->scan, c->cquant))
+ c->dsp->idct_put(v, f->linesize[2], block);
v += 8;
}
y1 += 2 * 8 * (f->linesize[0] - w);
diff --git a/contrib/ffmpeg/libavcodec/rtjpeg.h b/contrib/ffmpeg/libavcodec/rtjpeg.h
new file mode 100644
index 000000000..daecc8a75
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/rtjpeg.h
@@ -0,0 +1,39 @@
+/*
+ * RTJpeg decoding functions
+ * copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef RTJPEG_H
+#define RTJPEG_H
+
+typedef struct {
+ int w, h;
+ DSPContext *dsp;
+ uint8_t scan[64];
+ uint32_t lquant[64];
+ uint32_t cquant[64];
+} RTJpegContext;
+
+void rtjpeg_decode_init(RTJpegContext *c, DSPContext *dsp,
+ int width, int height,
+ uint32_t *lquant, uint32_t *cquant);
+
+int rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f,
+ uint8_t *buf, int buf_size);
+#endif
diff --git a/src/libffmpeg/libavcodec/rv10.c b/contrib/ffmpeg/libavcodec/rv10.c
index daec2b85b..4b50609c1 100644
--- a/src/libffmpeg/libavcodec/rv10.c
+++ b/contrib/ffmpeg/libavcodec/rv10.c
@@ -3,18 +3,20 @@
* Copyright (c) 2000,2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -383,8 +385,9 @@ static int rv20_decode_picture_header(MpegEncContext *s)
av_log(s->avctx, AV_LOG_DEBUG, "\n");
#endif
#if 0
+ av_log(s->avctx, AV_LOG_DEBUG, "%3dx%03d/%02Xx%02X ", s->width, s->height, s->width/4, s->height/4);
for(i=0; i<s->avctx->extradata_size; i++){
- av_log(s->avctx, AV_LOG_DEBUG, "%2X ", ((uint8_t*)s->avctx->extradata)[i]);
+ av_log(s->avctx, AV_LOG_DEBUG, "%02X ", ((uint8_t*)s->avctx->extradata)[i]);
if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " ");
}
av_log(s->avctx, AV_LOG_DEBUG, "\n");
@@ -431,17 +434,32 @@ static int rv20_decode_picture_header(MpegEncContext *s)
}
if(s->avctx->has_b_frames){
- int f=9;
- int v= s->avctx->extradata_size >= 4 ? ((uint8_t*)s->avctx->extradata)[1] : 0;
+ int f, new_w, new_h;
+ int v= s->avctx->extradata_size >= 4 ? 7&((uint8_t*)s->avctx->extradata)[1] : 0;
if (get_bits(&s->gb, 1)){
av_log(s->avctx, AV_LOG_ERROR, "unknown bit3 set\n");
// return -1;
}
- seq= get_bits(&s->gb, 14)<<1;
+ seq= get_bits(&s->gb, 13)<<2;
+
+ f= get_bits(&s->gb, av_log2(v)+1);
- if(v)
- f= get_bits(&s->gb, av_log2(v));
+ if(f){
+ new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f];
+ new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f];
+ }else{
+ new_w= s->width; //FIXME wrong we of course must save the original in the context
+ new_h= s->height;
+ }
+ if(new_w != s->width || new_h != s->height){
+ av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h);
+ MPV_common_end(s);
+ s->width = s->avctx->width = new_w;
+ s->height = s->avctx->height= new_h;
+ if (MPV_common_init(s) < 0)
+ return -1;
+ }
if(s->avctx->debug & FF_DEBUG_PICT_INFO){
av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, v);
@@ -473,6 +491,7 @@ static int rv20_decode_picture_header(MpegEncContext *s)
av_log(s->avctx, AV_LOG_DEBUG, "messed up order, possible from seeking? skipping current b frame\n");
return FRAME_SKIPPED;
}
+ ff_mpeg4_init_direct_mv(s);
}
}
// printf("%d %d %d %d %d\n", seq, (int)s->time, (int)s->last_non_b_time, s->pp_time, s->pb_time);
@@ -515,26 +534,25 @@ static int rv10_decode_init(AVCodecContext *avctx)
s->width = avctx->width;
s->height = avctx->height;
+ s->h263_long_vectors= ((uint8_t*)avctx->extradata)[3] & 1;
+ avctx->sub_id= BE_32((uint8_t*)avctx->extradata + 4);
+
switch(avctx->sub_id){
case 0x10000000:
s->rv10_version= 0;
- s->h263_long_vectors=0;
s->low_delay=1;
break;
case 0x10002000:
s->rv10_version= 3;
- s->h263_long_vectors=1;
s->low_delay=1;
s->obmc=1;
break;
case 0x10003000:
s->rv10_version= 3;
- s->h263_long_vectors=1;
s->low_delay=1;
break;
case 0x10003001:
s->rv10_version= 3;
- s->h263_long_vectors=0;
s->low_delay=1;
break;
case 0x20001000: /* real rv20 decoder fail on this id */
@@ -594,7 +612,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
uint8_t *buf, int buf_size)
{
MpegEncContext *s = avctx->priv_data;
- int mb_count, mb_pos, left;
+ int mb_count, mb_pos, left, start_mb_x;
init_get_bits(&s->gb, buf, buf_size*8);
if(s->codec_id ==CODEC_ID_RV10)
@@ -639,8 +657,9 @@ static int rv10_decode_packet(AVCodecContext *avctx,
if(s->mb_y==0) s->first_slice_line=1;
}else{
s->first_slice_line=1;
+ s->resync_mb_x= s->mb_x;
}
- s->resync_mb_x= s->mb_x;
+ start_mb_x= s->mb_x;
s->resync_mb_y= s->mb_y;
if(s->h263_aic){
s->y_dc_scale_table=
@@ -699,7 +718,7 @@ static int rv10_decode_packet(AVCodecContext *avctx,
if(ret == SLICE_END) break;
}
- ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
+ ff_er_add_slice(s, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);
return buf_size;
}
diff --git a/contrib/ffmpeg/libavcodec/sh4/dsputil_align.c b/contrib/ffmpeg/libavcodec/sh4/dsputil_align.c
new file mode 100644
index 000000000..7e7e3304b
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/sh4/dsputil_align.c
@@ -0,0 +1,430 @@
+/*
+ * aligned/packed access motion
+ *
+ * Copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "../avcodec.h"
+#include "../dsputil.h"
+
+
+#define LP(p) *(uint32_t*)(p)
+
+
+#define UNPACK(ph,pl,tt0,tt1) do { \
+ uint32_t t0,t1; t0=tt0;t1=tt1; \
+ ph = ( (t0 & ~BYTE_VEC32(0x03))>>2) + ( (t1 & ~BYTE_VEC32(0x03))>>2); \
+ pl = (t0 & BYTE_VEC32(0x03)) + (t1 & BYTE_VEC32(0x03)); } while(0)
+
+#define rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x02))>>2) & BYTE_VEC32(0x03))
+#define no_rnd_PACK(ph,pl,nph,npl) ph + nph + (((pl + npl + BYTE_VEC32(0x01))>>2) & BYTE_VEC32(0x03))
+
+/* little endian */
+#define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)>>(8*ofs))|((b)<<(32-8*ofs)) )
+#define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)>>(8*(ofs+1)))|((b)<<(32-8*(ofs+1))) )
+/* big
+#define MERGE1(a,b,ofs) (ofs==0)?a:( ((a)<<(8*ofs))|((b)>>(32-8*ofs)) )
+#define MERGE2(a,b,ofs) (ofs==3)?b:( ((a)<<(8+8*ofs))|((b)>>(32-8-8*ofs)) )
+*/
+
+
+#define put(d,s) d = s
+#define avg(d,s) d = rnd_avg32(s,d)
+
+#define OP_C4(ofs) \
+ ref-=ofs; \
+ do { \
+ OP(LP(dest),MERGE1(LP(ref),LP(ref+4),ofs)); \
+ ref+=stride; \
+ dest+=stride; \
+ } while(--height)
+
+#define OP_C40() \
+ do { \
+ OP(LP(dest),LP(ref)); \
+ ref+=stride; \
+ dest+=stride; \
+ } while(--height)
+
+
+#define OP put
+
+static void put_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height)
+{
+ switch((int)ref&3){
+ case 0: OP_C40(); return;
+ case 1: OP_C4(1); return;
+ case 2: OP_C4(2); return;
+ case 3: OP_C4(3); return;
+ }
+}
+
+#undef OP
+#define OP avg
+
+static void avg_pixels4_c(uint8_t *dest,const uint8_t *ref, const int stride,int height)
+{
+ switch((int)ref&3){
+ case 0: OP_C40(); return;
+ case 1: OP_C4(1); return;
+ case 2: OP_C4(2); return;
+ case 3: OP_C4(3); return;
+ }
+}
+
+#undef OP
+
+#define OP_C(ofs,sz,avg2) \
+{ \
+ ref-=ofs; \
+ do { \
+ uint32_t t0,t1; \
+ t0 = LP(ref+0); \
+ t1 = LP(ref+4); \
+ OP(LP(dest+0), MERGE1(t0,t1,ofs)); \
+ t0 = LP(ref+8); \
+ OP(LP(dest+4), MERGE1(t1,t0,ofs)); \
+if (sz==16) { \
+ t1 = LP(ref+12); \
+ OP(LP(dest+8), MERGE1(t0,t1,ofs)); \
+ t0 = LP(ref+16); \
+ OP(LP(dest+12), MERGE1(t1,t0,ofs)); \
+} \
+ ref+=stride; \
+ dest+= stride; \
+ } while(--height); \
+}
+
+/* aligned */
+#define OP_C0(sz,avg2) \
+{ \
+ do { \
+ OP(LP(dest+0), LP(ref+0)); \
+ OP(LP(dest+4), LP(ref+4)); \
+if (sz==16) { \
+ OP(LP(dest+8), LP(ref+8)); \
+ OP(LP(dest+12), LP(ref+12)); \
+} \
+ ref+=stride; \
+ dest+= stride; \
+ } while(--height); \
+}
+
+#define OP_X(ofs,sz,avg2) \
+{ \
+ ref-=ofs; \
+ do { \
+ uint32_t t0,t1; \
+ t0 = LP(ref+0); \
+ t1 = LP(ref+4); \
+ OP(LP(dest+0), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
+ t0 = LP(ref+8); \
+ OP(LP(dest+4), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
+if (sz==16) { \
+ t1 = LP(ref+12); \
+ OP(LP(dest+8), avg2(MERGE1(t0,t1,ofs),MERGE2(t0,t1,ofs))); \
+ t0 = LP(ref+16); \
+ OP(LP(dest+12), avg2(MERGE1(t1,t0,ofs),MERGE2(t1,t0,ofs))); \
+} \
+ ref+=stride; \
+ dest+= stride; \
+ } while(--height); \
+}
+
+/* aligned */
+#define OP_Y0(sz,avg2) \
+{ \
+ uint32_t t0,t1,t2,t3,t; \
+\
+ t0 = LP(ref+0); \
+ t1 = LP(ref+4); \
+if (sz==16) { \
+ t2 = LP(ref+8); \
+ t3 = LP(ref+12); \
+} \
+ do { \
+ ref += stride; \
+\
+ t = LP(ref+0); \
+ OP(LP(dest+0), avg2(t0,t)); t0 = t; \
+ t = LP(ref+4); \
+ OP(LP(dest+4), avg2(t1,t)); t1 = t; \
+if (sz==16) { \
+ t = LP(ref+8); \
+ OP(LP(dest+8), avg2(t2,t)); t2 = t; \
+ t = LP(ref+12); \
+ OP(LP(dest+12), avg2(t3,t)); t3 = t; \
+} \
+ dest+= stride; \
+ } while(--height); \
+}
+
+#define OP_Y(ofs,sz,avg2) \
+{ \
+ uint32_t t0,t1,t2,t3,t,w0,w1; \
+\
+ ref-=ofs; \
+ w0 = LP(ref+0); \
+ w1 = LP(ref+4); \
+ t0 = MERGE1(w0,w1,ofs); \
+ w0 = LP(ref+8); \
+ t1 = MERGE1(w1,w0,ofs); \
+if (sz==16) { \
+ w1 = LP(ref+12); \
+ t2 = MERGE1(w0,w1,ofs); \
+ w0 = LP(ref+16); \
+ t3 = MERGE1(w1,w0,ofs); \
+} \
+ do { \
+ ref += stride; \
+\
+ w0 = LP(ref+0); \
+ w1 = LP(ref+4); \
+ t = MERGE1(w0,w1,ofs); \
+ OP(LP(dest+0), avg2(t0,t)); t0 = t; \
+ w0 = LP(ref+8); \
+ t = MERGE1(w1,w0,ofs); \
+ OP(LP(dest+4), avg2(t1,t)); t1 = t; \
+if (sz==16) { \
+ w1 = LP(ref+12); \
+ t = MERGE1(w0,w1,ofs); \
+ OP(LP(dest+8), avg2(t2,t)); t2 = t; \
+ w0 = LP(ref+16); \
+ t = MERGE1(w1,w0,ofs); \
+ OP(LP(dest+12), avg2(t3,t)); t3 = t; \
+} \
+ dest+=stride; \
+ } while(--height); \
+}
+
+#define OP_X0(sz,avg2) OP_X(0,sz,avg2)
+#define OP_XY0(sz,PACK) OP_XY(0,sz,PACK)
+#define OP_XY(ofs,sz,PACK) \
+{ \
+ uint32_t t2,t3,w0,w1; \
+ uint32_t a0,a1,a2,a3,a4,a5,a6,a7; \
+\
+ ref -= ofs; \
+ w0 = LP(ref+0); \
+ w1 = LP(ref+4); \
+ UNPACK(a0,a1,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
+ w0 = LP(ref+8); \
+ UNPACK(a2,a3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
+if (sz==16) { \
+ w1 = LP(ref+12); \
+ UNPACK(a4,a5,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
+ w0 = LP(ref+16); \
+ UNPACK(a6,a7,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
+} \
+ do { \
+ ref+=stride; \
+ w0 = LP(ref+0); \
+ w1 = LP(ref+4); \
+ UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
+ OP(LP(dest+0),PACK(a0,a1,t2,t3)); \
+ a0 = t2; a1 = t3; \
+ w0 = LP(ref+8); \
+ UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
+ OP(LP(dest+4),PACK(a2,a3,t2,t3)); \
+ a2 = t2; a3 = t3; \
+if (sz==16) { \
+ w1 = LP(ref+12); \
+ UNPACK(t2,t3,MERGE1(w0,w1,ofs),MERGE2(w0,w1,ofs)); \
+ OP(LP(dest+8),PACK(a4,a5,t2,t3)); \
+ a4 = t2; a5 = t3; \
+ w0 = LP(ref+16); \
+ UNPACK(t2,t3,MERGE1(w1,w0,ofs),MERGE2(w1,w0,ofs)); \
+ OP(LP(dest+12),PACK(a6,a7,t2,t3)); \
+ a6 = t2; a7 = t3; \
+} \
+ dest+=stride; \
+ } while(--height); \
+}
+
+#define DEFFUNC(op,rnd,xy,sz,OP_N,avgfunc) \
+static void op##_##rnd##_pixels##sz##_##xy (uint8_t * dest, const uint8_t * ref, \
+ const int stride, int height) \
+{ \
+ switch((int)ref&3) { \
+ case 0:OP_N##0(sz,rnd##_##avgfunc); return; \
+ case 1:OP_N(1,sz,rnd##_##avgfunc); return; \
+ case 2:OP_N(2,sz,rnd##_##avgfunc); return; \
+ case 3:OP_N(3,sz,rnd##_##avgfunc); return; \
+ } \
+}
+
+#define OP put
+
+DEFFUNC(put, rnd,o,8,OP_C,avg2)
+DEFFUNC(put, rnd,x,8,OP_X,avg2)
+DEFFUNC(put,no_rnd,x,8,OP_X,avg2)
+DEFFUNC(put, rnd,y,8,OP_Y,avg2)
+DEFFUNC(put,no_rnd,y,8,OP_Y,avg2)
+DEFFUNC(put, rnd,xy,8,OP_XY,PACK)
+DEFFUNC(put,no_rnd,xy,8,OP_XY,PACK)
+DEFFUNC(put, rnd,o,16,OP_C,avg2)
+DEFFUNC(put, rnd,x,16,OP_X,avg2)
+DEFFUNC(put,no_rnd,x,16,OP_X,avg2)
+DEFFUNC(put, rnd,y,16,OP_Y,avg2)
+DEFFUNC(put,no_rnd,y,16,OP_Y,avg2)
+DEFFUNC(put, rnd,xy,16,OP_XY,PACK)
+DEFFUNC(put,no_rnd,xy,16,OP_XY,PACK)
+
+#undef OP
+#define OP avg
+
+DEFFUNC(avg, rnd,o,8,OP_C,avg2)
+DEFFUNC(avg, rnd,x,8,OP_X,avg2)
+DEFFUNC(avg,no_rnd,x,8,OP_X,avg2)
+DEFFUNC(avg, rnd,y,8,OP_Y,avg2)
+DEFFUNC(avg,no_rnd,y,8,OP_Y,avg2)
+DEFFUNC(avg, rnd,xy,8,OP_XY,PACK)
+DEFFUNC(avg,no_rnd,xy,8,OP_XY,PACK)
+DEFFUNC(avg, rnd,o,16,OP_C,avg2)
+DEFFUNC(avg, rnd,x,16,OP_X,avg2)
+DEFFUNC(avg,no_rnd,x,16,OP_X,avg2)
+DEFFUNC(avg, rnd,y,16,OP_Y,avg2)
+DEFFUNC(avg,no_rnd,y,16,OP_Y,avg2)
+DEFFUNC(avg, rnd,xy,16,OP_XY,PACK)
+DEFFUNC(avg,no_rnd,xy,16,OP_XY,PACK)
+
+#undef OP
+
+#define put_no_rnd_pixels8_o put_rnd_pixels8_o
+#define put_no_rnd_pixels16_o put_rnd_pixels16_o
+#define avg_no_rnd_pixels8_o avg_rnd_pixels8_o
+#define avg_no_rnd_pixels16_o avg_rnd_pixels16_o
+
+#define put_pixels8_c put_rnd_pixels8_o
+#define put_pixels16_c put_rnd_pixels16_o
+#define avg_pixels8_c avg_rnd_pixels8_o
+#define avg_pixels16_c avg_rnd_pixels16_o
+#define put_no_rnd_pixels8_c put_rnd_pixels8_o
+#define put_no_rnd_pixels16_c put_rnd_pixels16_o
+#define avg_no_rnd_pixels8_c avg_rnd_pixels8_o
+#define avg_no_rnd_pixels16_c avg_rnd_pixels16_o
+
+#define QPEL
+
+#ifdef QPEL
+
+#include "qpel.c"
+
+#endif
+
+void dsputil_init_align(DSPContext* c, AVCodecContext *avctx)
+{
+ c->put_pixels_tab[0][0] = put_rnd_pixels16_o;
+ c->put_pixels_tab[0][1] = put_rnd_pixels16_x;
+ c->put_pixels_tab[0][2] = put_rnd_pixels16_y;
+ c->put_pixels_tab[0][3] = put_rnd_pixels16_xy;
+ c->put_pixels_tab[1][0] = put_rnd_pixels8_o;
+ c->put_pixels_tab[1][1] = put_rnd_pixels8_x;
+ c->put_pixels_tab[1][2] = put_rnd_pixels8_y;
+ c->put_pixels_tab[1][3] = put_rnd_pixels8_xy;
+
+ c->put_no_rnd_pixels_tab[0][0] = put_no_rnd_pixels16_o;
+ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x;
+ c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y;
+ c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy;
+ c->put_no_rnd_pixels_tab[1][0] = put_no_rnd_pixels8_o;
+ c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x;
+ c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y;
+ c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy;
+
+ c->avg_pixels_tab[0][0] = avg_rnd_pixels16_o;
+ c->avg_pixels_tab[0][1] = avg_rnd_pixels16_x;
+ c->avg_pixels_tab[0][2] = avg_rnd_pixels16_y;
+ c->avg_pixels_tab[0][3] = avg_rnd_pixels16_xy;
+ c->avg_pixels_tab[1][0] = avg_rnd_pixels8_o;
+ c->avg_pixels_tab[1][1] = avg_rnd_pixels8_x;
+ c->avg_pixels_tab[1][2] = avg_rnd_pixels8_y;
+ c->avg_pixels_tab[1][3] = avg_rnd_pixels8_xy;
+
+ c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_o;
+ c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x;
+ c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y;
+ c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy;
+ c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_o;
+ c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x;
+ c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y;
+ c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy;
+
+#ifdef QPEL
+
+#define dspfunc(PFX, IDX, NUM) \
+ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_c; \
+ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_c; \
+ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_c; \
+ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_c; \
+ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_c; \
+ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_c; \
+ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_c; \
+ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_c; \
+ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_c; \
+ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_c; \
+ c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_c; \
+ c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_c; \
+ c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_c; \
+ c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_c; \
+ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_c; \
+ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_c
+
+ dspfunc(put_qpel, 0, 16);
+ dspfunc(put_no_rnd_qpel, 0, 16);
+
+ dspfunc(avg_qpel, 0, 16);
+ /* dspfunc(avg_no_rnd_qpel, 0, 16); */
+
+ dspfunc(put_qpel, 1, 8);
+ dspfunc(put_no_rnd_qpel, 1, 8);
+
+ dspfunc(avg_qpel, 1, 8);
+ /* dspfunc(avg_no_rnd_qpel, 1, 8); */
+
+ dspfunc(put_h264_qpel, 0, 16);
+ dspfunc(put_h264_qpel, 1, 8);
+ dspfunc(put_h264_qpel, 2, 4);
+ dspfunc(avg_h264_qpel, 0, 16);
+ dspfunc(avg_h264_qpel, 1, 8);
+ dspfunc(avg_h264_qpel, 2, 4);
+
+#undef dspfunc
+ c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_c;
+ c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_c;
+ c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_c;
+ c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_c;
+ c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_c;
+ c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_c;
+
+ c->put_mspel_pixels_tab[0]= put_mspel8_mc00_c;
+ c->put_mspel_pixels_tab[1]= put_mspel8_mc10_c;
+ c->put_mspel_pixels_tab[2]= put_mspel8_mc20_c;
+ c->put_mspel_pixels_tab[3]= put_mspel8_mc30_c;
+ c->put_mspel_pixels_tab[4]= put_mspel8_mc02_c;
+ c->put_mspel_pixels_tab[5]= put_mspel8_mc12_c;
+ c->put_mspel_pixels_tab[6]= put_mspel8_mc22_c;
+ c->put_mspel_pixels_tab[7]= put_mspel8_mc32_c;
+
+ c->gmc1 = gmc1_c;
+ c->gmc = gmc_c;
+
+#endif
+}
diff --git a/contrib/ffmpeg/libavcodec/sh4/dsputil_sh4.c b/contrib/ffmpeg/libavcodec/sh4/dsputil_sh4.c
new file mode 100644
index 000000000..b38eb2551
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/sh4/dsputil_sh4.c
@@ -0,0 +1,120 @@
+/*
+ * sh4 dsputil
+ *
+ * Copyright (c) 2003 BERO <bero@geocities.co.jp>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../avcodec.h"
+#include "../dsputil.h"
+
+static void memzero_align8(void *dst,size_t size)
+{
+#if defined(__SH4__) || defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)
+ (char*)dst+=size;
+ size/=8*4;
+ asm(
+#if defined(__SH4__)
+ " fschg\n" //single float mode
+#endif
+ " fldi0 fr0\n"
+ " fldi0 fr1\n"
+ " fschg\n" // double
+ "1: \n" \
+ " dt %1\n"
+ " fmov dr0,@-%0\n"
+ " fmov dr0,@-%0\n"
+ " fmov dr0,@-%0\n"
+ " bf.s 1b\n"
+ " fmov dr0,@-%0\n"
+#if defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)
+ " fschg" //back to single
+#endif
+ : : "r"(dst),"r"(size): "memory" );
+#else
+ double *d = dst;
+ size/=8*4;
+ do {
+ d[0] = 0.0;
+ d[1] = 0.0;
+ d[2] = 0.0;
+ d[3] = 0.0;
+ d+=4;
+ } while(--size);
+#endif
+}
+
+static void clear_blocks_sh4(DCTELEM *blocks)
+{
+// if (((int)blocks&7)==0)
+ memzero_align8(blocks,sizeof(DCTELEM)*6*64);
+}
+
+extern void idct_sh4(DCTELEM *block);
+static void idct_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ idct_sh4(block);
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ for(i=0;i<8;i++) {
+ dest[0] = cm[block[0]];
+ dest[1] = cm[block[1]];
+ dest[2] = cm[block[2]];
+ dest[3] = cm[block[3]];
+ dest[4] = cm[block[4]];
+ dest[5] = cm[block[5]];
+ dest[6] = cm[block[6]];
+ dest[7] = cm[block[7]];
+ dest+=line_size;
+ block+=8;
+ }
+}
+static void idct_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ idct_sh4(block);
+ int i;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ for(i=0;i<8;i++) {
+ dest[0] = cm[dest[0]+block[0]];
+ dest[1] = cm[dest[1]+block[1]];
+ dest[2] = cm[dest[2]+block[2]];
+ dest[3] = cm[dest[3]+block[3]];
+ dest[4] = cm[dest[4]+block[4]];
+ dest[5] = cm[dest[5]+block[5]];
+ dest[6] = cm[dest[6]+block[6]];
+ dest[7] = cm[dest[7]+block[7]];
+ dest+=line_size;
+ block+=8;
+ }
+}
+
+extern void dsputil_init_align(DSPContext* c, AVCodecContext *avctx);
+
+void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx)
+{
+ const int idct_algo= avctx->idct_algo;
+ dsputil_init_align(c,avctx);
+
+ c->clear_blocks = clear_blocks_sh4;
+ if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SH4){
+ c->idct_put = idct_put;
+ c->idct_add = idct_add;
+ c->idct = idct_sh4;
+ c->idct_permutation_type= FF_NO_IDCT_PERM; //FF_SIMPLE_IDCT_PERM; //FF_LIBMPEG2_IDCT_PERM;
+ }
+}
diff --git a/contrib/ffmpeg/libavcodec/sh4/idct_sh4.c b/contrib/ffmpeg/libavcodec/sh4/idct_sh4.c
new file mode 100644
index 000000000..3b8428c3c
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/sh4/idct_sh4.c
@@ -0,0 +1,366 @@
+/*
+ * idct for sh4
+ *
+ * Copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../dsputil.h"
+#define c1 1.38703984532214752434 /* sqrt(2)*cos(1*pi/16) */
+#define c2 1.30656296487637657577 /* sqrt(2)*cos(2*pi/16) */
+#define c3 1.17587560241935884520 /* sqrt(2)*cos(3*pi/16) */
+#define c4 1.00000000000000000000 /* sqrt(2)*cos(4*pi/16) */
+#define c5 0.78569495838710234903 /* sqrt(2)*cos(5*pi/16) */
+#define c6 0.54119610014619712324 /* sqrt(2)*cos(6*pi/16) */
+#define c7 0.27589937928294311353 /* sqrt(2)*cos(7*pi/16) */
+
+static const float even_table[] __attribute__ ((aligned(8))) = {
+ c4, c4, c4, c4,
+ c2, c6,-c6,-c2,
+ c4,-c4,-c4, c4,
+ c6,-c2, c2,-c6
+};
+
+static const float odd_table[] __attribute__ ((aligned(8))) = {
+ c1, c3, c5, c7,
+ c3,-c7,-c1,-c5,
+ c5,-c1, c7, c3,
+ c7,-c5, c3,-c1
+};
+
+#undef c1
+#undef c2
+#undef c3
+#undef c4
+#undef c5
+#undef c6
+#undef c7
+
+#if defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__)
+
+#define load_matrix(table) \
+ __asm__ volatile( \
+ " fschg\n" \
+ " fmov @%0+,xd0\n" \
+ " fmov @%0+,xd2\n" \
+ " fmov @%0+,xd4\n" \
+ " fmov @%0+,xd6\n" \
+ " fmov @%0+,xd8\n" \
+ " fmov @%0+,xd10\n" \
+ " fmov @%0+,xd12\n" \
+ " fmov @%0+,xd14\n" \
+ " fschg\n" \
+ :\
+ : "r"(table)\
+ : "0" \
+ )
+
+#define ftrv() \
+ __asm__ volatile("ftrv xmtrx,fv0" \
+ : "=f"(fr0),"=f"(fr1),"=f"(fr2),"=f"(fr3) \
+ : "0"(fr0), "1"(fr1), "2"(fr2), "3"(fr3) );
+
+#define DEFREG \
+ register float fr0 __asm__("fr0"); \
+ register float fr1 __asm__("fr1"); \
+ register float fr2 __asm__("fr2"); \
+ register float fr3 __asm__("fr3")
+
+#else
+
+/* generic C code for check */
+
+static void ftrv_(const float xf[],float fv[])
+{
+ float f0,f1,f2,f3;
+ f0 = fv[0];
+ f1 = fv[1];
+ f2 = fv[2];
+ f3 = fv[3];
+ fv[0] = xf[0]*f0 + xf[4]*f1 + xf[ 8]*f2 + xf[12]*f3;
+ fv[1] = xf[1]*f0 + xf[5]*f1 + xf[ 9]*f2 + xf[13]*f3;
+ fv[2] = xf[2]*f0 + xf[6]*f1 + xf[10]*f2 + xf[14]*f3;
+ fv[3] = xf[3]*f0 + xf[7]*f1 + xf[11]*f2 + xf[15]*f3;
+}
+
+static void load_matrix_(float xf[],const float table[])
+{
+ int i;
+ for(i=0;i<16;i++) xf[i]=table[i];
+}
+
+#define ftrv() ftrv_(xf,fv)
+#define load_matrix(table) load_matrix_(xf,table)
+
+#define DEFREG \
+ float fv[4],xf[16]
+
+#define fr0 fv[0]
+#define fr1 fv[1]
+#define fr2 fv[2]
+#define fr3 fv[3]
+
+#endif
+
+#if 1
+#define DESCALE(x,n) (x)*(1.0f/(1<<(n)))
+#else
+#define DESCALE(x,n) (((int)(x)+(1<<(n-1)))>>(n))
+#endif
+
+/* this code work worse on gcc cvs. 3.2.3 work fine */
+
+
+#if 1
+//optimized
+
+void idct_sh4(DCTELEM *block)
+{
+ DEFREG;
+
+ int i;
+ float tblock[8*8],*fblock;
+ int ofs1,ofs2,ofs3;
+
+#if defined(__SH4__)
+#error "FIXME!! change to single float"
+#endif
+
+ /* row */
+
+ /* even part */
+ load_matrix(even_table);
+
+ fblock = tblock+4;
+ i = 8;
+ do {
+ fr0 = block[0];
+ fr1 = block[2];
+ fr2 = block[4];
+ fr3 = block[6];
+ block+=8;
+ ftrv();
+ *--fblock = fr3;
+ *--fblock = fr2;
+ *--fblock = fr1;
+ *--fblock = fr0;
+ fblock+=8+4;
+ } while(--i);
+ block-=8*8;
+ fblock-=8*8+4;
+
+ load_matrix(odd_table);
+
+ i = 8;
+
+// ofs1 = sizeof(float)*1;
+// ofs2 = sizeof(float)*2;
+// ofs3 = sizeof(float)*3;
+
+ do {
+ float t0,t1,t2,t3;
+ fr0 = block[1];
+ fr1 = block[3];
+ fr2 = block[5];
+ fr3 = block[7];
+ block+=8;
+ ftrv();
+ t0 = *fblock++;
+ t1 = *fblock++;
+ t2 = *fblock++;
+ t3 = *fblock++;
+ fblock+=4;
+ *--fblock = t0 - fr0;
+ *--fblock = t1 - fr1;
+ *--fblock = t2 - fr2;
+ *--fblock = t3 - fr3;
+ *--fblock = t3 + fr3;
+ *--fblock = t2 + fr2;
+ *--fblock = t1 + fr1;
+ *--fblock = t0 + fr0;
+ fblock+=8;
+ } while(--i);
+ block-=8*8;
+ fblock-=8*8;
+
+ /* col */
+
+ /* even part */
+ load_matrix(even_table);
+
+ ofs1 = sizeof(float)*2*8;
+ ofs2 = sizeof(float)*4*8;
+ ofs3 = sizeof(float)*6*8;
+
+ i = 8;
+
+#define OA(fblock,ofs) *(float*)((char*)fblock + ofs)
+
+ do {
+ fr0 = OA(fblock, 0);
+ fr1 = OA(fblock,ofs1);
+ fr2 = OA(fblock,ofs2);
+ fr3 = OA(fblock,ofs3);
+ ftrv();
+ OA(fblock,0 ) = fr0;
+ OA(fblock,ofs1) = fr1;
+ OA(fblock,ofs2) = fr2;
+ OA(fblock,ofs3) = fr3;
+ fblock++;
+ } while(--i);
+ fblock-=8;
+
+ load_matrix(odd_table);
+
+ i=8;
+ do {
+ float t0,t1,t2,t3;
+ t0 = OA(fblock, 0); /* [8*0] */
+ t1 = OA(fblock,ofs1); /* [8*2] */
+ t2 = OA(fblock,ofs2); /* [8*4] */
+ t3 = OA(fblock,ofs3); /* [8*6] */
+ fblock+=8;
+ fr0 = OA(fblock, 0); /* [8*1] */
+ fr1 = OA(fblock,ofs1); /* [8*3] */
+ fr2 = OA(fblock,ofs2); /* [8*5] */
+ fr3 = OA(fblock,ofs3); /* [8*7] */
+ fblock+=-8+1;
+ ftrv();
+ block[8*0] = DESCALE(t0 + fr0,3);
+ block[8*7] = DESCALE(t0 - fr0,3);
+ block[8*1] = DESCALE(t1 + fr1,3);
+ block[8*6] = DESCALE(t1 - fr1,3);
+ block[8*2] = DESCALE(t2 + fr2,3);
+ block[8*5] = DESCALE(t2 - fr2,3);
+ block[8*3] = DESCALE(t3 + fr3,3);
+ block[8*4] = DESCALE(t3 - fr3,3);
+ block++;
+ } while(--i);
+
+#if defined(__SH4__)
+#error "FIXME!! change to double"
+#endif
+}
+#else
+void idct_sh4(DCTELEM *block)
+{
+ DEFREG;
+
+ int i;
+ float tblock[8*8],*fblock;
+
+ /* row */
+
+ /* even part */
+ load_matrix(even_table);
+
+ fblock = tblock;
+ i = 8;
+ do {
+ fr0 = block[0];
+ fr1 = block[2];
+ fr2 = block[4];
+ fr3 = block[6];
+ block+=8;
+ ftrv();
+ fblock[0] = fr0;
+ fblock[2] = fr1;
+ fblock[4] = fr2;
+ fblock[6] = fr3;
+ fblock+=8;
+ } while(--i);
+ block-=8*8;
+ fblock-=8*8;
+
+ load_matrix(odd_table);
+
+ i = 8;
+
+ do {
+ float t0,t1,t2,t3;
+ fr0 = block[1];
+ fr1 = block[3];
+ fr2 = block[5];
+ fr3 = block[7];
+ block+=8;
+ ftrv();
+ t0 = fblock[0];
+ t1 = fblock[2];
+ t2 = fblock[4];
+ t3 = fblock[6];
+ fblock[0] = t0 + fr0;
+ fblock[7] = t0 - fr0;
+ fblock[1] = t1 + fr1;
+ fblock[6] = t1 - fr1;
+ fblock[2] = t2 + fr2;
+ fblock[5] = t2 - fr2;
+ fblock[3] = t3 + fr3;
+ fblock[4] = t3 - fr3;
+ fblock+=8;
+ } while(--i);
+ block-=8*8;
+ fblock-=8*8;
+
+ /* col */
+
+ /* even part */
+ load_matrix(even_table);
+
+ i = 8;
+
+ do {
+ fr0 = fblock[8*0];
+ fr1 = fblock[8*2];
+ fr2 = fblock[8*4];
+ fr3 = fblock[8*6];
+ ftrv();
+ fblock[8*0] = fr0;
+ fblock[8*2] = fr1;
+ fblock[8*4] = fr2;
+ fblock[8*6] = fr3;
+ fblock++;
+ } while(--i);
+ fblock-=8;
+
+ load_matrix(odd_table);
+
+ i=8;
+ do {
+ float t0,t1,t2,t3;
+ fr0 = fblock[8*1];
+ fr1 = fblock[8*3];
+ fr2 = fblock[8*5];
+ fr3 = fblock[8*7];
+ ftrv();
+ t0 = fblock[8*0];
+ t1 = fblock[8*2];
+ t2 = fblock[8*4];
+ t3 = fblock[8*6];
+ fblock++;
+ block[8*0] = DESCALE(t0 + fr0,3);
+ block[8*7] = DESCALE(t0 - fr0,3);
+ block[8*1] = DESCALE(t1 + fr1,3);
+ block[8*6] = DESCALE(t1 - fr1,3);
+ block[8*2] = DESCALE(t2 + fr2,3);
+ block[8*5] = DESCALE(t2 - fr2,3);
+ block[8*3] = DESCALE(t3 + fr3,3);
+ block[8*4] = DESCALE(t3 - fr3,3);
+ block++;
+ } while(--i);
+}
+#endif
diff --git a/contrib/ffmpeg/libavcodec/sh4/qpel.c b/contrib/ffmpeg/libavcodec/sh4/qpel.c
new file mode 100644
index 000000000..7a73ac50d
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/sh4/qpel.c
@@ -0,0 +1,1600 @@
+/*
+ * This is optimized for sh, which have post increment addressing (*p++).
+ * Some CPU may be index (p[n]) faster than post increment (*p++).
+ *
+ * copyright (c) 2001-2003 BERO <bero@geocities.co.jp>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define LD(adr) *(uint32_t*)(adr)
+
+#define PIXOP2(OPNAME, OP) \
+/*static inline void OPNAME ## _no_rnd_pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
+ OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _pixels8_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
+ OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _pixels4_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
+ OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
+ OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
+ OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _pixels16_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),rnd_avg32(LD32(src1 ),LD32(src2 )) ); \
+ OP(LP(dst+4),rnd_avg32(LD32(src1+4),LD32(src2+4)) ); \
+ OP(LP(dst+8),rnd_avg32(LD32(src1+8),LD32(src2+8)) ); \
+ OP(LP(dst+12),rnd_avg32(LD32(src1+12),LD32(src2+12)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}*/\
+\
+static inline void OPNAME ## _pixels4_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _pixels4_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
+ OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
+ OP(LP(dst+8),no_rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
+ OP(LP(dst+12),no_rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _pixels16_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
+ OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
+ OP(LP(dst+8),rnd_avg32(LD32(src1+8),LP(src2+8)) ); \
+ OP(LP(dst+12),rnd_avg32(LD32(src1+12),LP(src2+12)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do { /* onlye src2 aligned */\
+ OP(LP(dst ),no_rnd_avg32(LD32(src1 ),LP(src2 )) ); \
+ OP(LP(dst+4),no_rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _pixels8_l2_aligned2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),rnd_avg32(LD32(src1 ),LP(src2 )) ); \
+ OP(LP(dst+4),rnd_avg32(LD32(src1+4),LP(src2+4)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
+ OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _pixels8_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
+ OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),no_rnd_avg32(LP(src1 ),LP(src2 )) ); \
+ OP(LP(dst+4),no_rnd_avg32(LP(src1+4),LP(src2+4)) ); \
+ OP(LP(dst+8),no_rnd_avg32(LP(src1+8),LP(src2+8)) ); \
+ OP(LP(dst+12),no_rnd_avg32(LP(src1+12),LP(src2+12)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _pixels16_l2_aligned(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{\
+ do {\
+ OP(LP(dst ),rnd_avg32(LP(src1 ),LP(src2 )) ); \
+ OP(LP(dst+4),rnd_avg32(LP(src1+4),LP(src2+4)) ); \
+ OP(LP(dst+8),rnd_avg32(LP(src1+8),LP(src2+8)) ); \
+ OP(LP(dst+12),rnd_avg32(LP(src1+12),LP(src2+12)) ); \
+ src1+=src_stride1; \
+ src2+=src_stride2; \
+ dst+=dst_stride; \
+ } while(--h); \
+}\
+\
+static inline void OPNAME ## _no_rnd_pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{ OPNAME ## _no_rnd_pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
+\
+static inline void OPNAME ## _pixels16_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{ OPNAME ## _pixels16_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
+\
+static inline void OPNAME ## _no_rnd_pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{ OPNAME ## _no_rnd_pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
+\
+static inline void OPNAME ## _pixels8_l2_aligned1(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, int src_stride1, int src_stride2, int h) \
+{ OPNAME ## _pixels8_l2_aligned2(dst,src2,src1,dst_stride,src_stride2,src_stride1,h); } \
+\
+static inline void OPNAME ## _pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ do { \
+ uint32_t a0,a1,a2,a3; \
+ UNPACK(a0,a1,LP(src1),LP(src2)); \
+ UNPACK(a2,a3,LP(src3),LP(src4)); \
+ OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
+ UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
+ OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
+ src1+=src_stride1;\
+ src2+=src_stride2;\
+ src3+=src_stride3;\
+ src4+=src_stride4;\
+ dst+=dst_stride;\
+ } while(--h); \
+} \
+\
+static inline void OPNAME ## _no_rnd_pixels8_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ do { \
+ uint32_t a0,a1,a2,a3; \
+ UNPACK(a0,a1,LP(src1),LP(src2)); \
+ UNPACK(a2,a3,LP(src3),LP(src4)); \
+ OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
+ UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
+ OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
+ src1+=src_stride1;\
+ src2+=src_stride2;\
+ src3+=src_stride3;\
+ src4+=src_stride4;\
+ dst+=dst_stride;\
+ } while(--h); \
+} \
+\
+static inline void OPNAME ## _pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ do { \
+ uint32_t a0,a1,a2,a3; /* src1 only not aligned */\
+ UNPACK(a0,a1,LD32(src1),LP(src2)); \
+ UNPACK(a2,a3,LP(src3),LP(src4)); \
+ OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
+ UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
+ OP(LP(dst+4),rnd_PACK(a0,a1,a2,a3)); \
+ src1+=src_stride1;\
+ src2+=src_stride2;\
+ src3+=src_stride3;\
+ src4+=src_stride4;\
+ dst+=dst_stride;\
+ } while(--h); \
+} \
+\
+static inline void OPNAME ## _no_rnd_pixels8_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ do { \
+ uint32_t a0,a1,a2,a3; \
+ UNPACK(a0,a1,LD32(src1),LP(src2)); \
+ UNPACK(a2,a3,LP(src3),LP(src4)); \
+ OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
+ UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
+ OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
+ src1+=src_stride1;\
+ src2+=src_stride2;\
+ src3+=src_stride3;\
+ src4+=src_stride4;\
+ dst+=dst_stride;\
+ } while(--h); \
+} \
+\
+static inline void OPNAME ## _pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ do { \
+ uint32_t a0,a1,a2,a3; \
+ UNPACK(a0,a1,LP(src1),LP(src2)); \
+ UNPACK(a2,a3,LP(src3),LP(src4)); \
+ OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
+ UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
+ OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
+ UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
+ OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
+ UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
+ OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
+ src1+=src_stride1;\
+ src2+=src_stride2;\
+ src3+=src_stride3;\
+ src4+=src_stride4;\
+ dst+=dst_stride;\
+ } while(--h); \
+} \
+\
+static inline void OPNAME ## _no_rnd_pixels16_l4_aligned(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ do { \
+ uint32_t a0,a1,a2,a3; \
+ UNPACK(a0,a1,LP(src1),LP(src2)); \
+ UNPACK(a2,a3,LP(src3),LP(src4)); \
+ OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LP(src1+4),LP(src2+4)); \
+ UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
+ OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LP(src1+8),LP(src2+8)); \
+ UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
+ OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LP(src1+12),LP(src2+12)); \
+ UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
+ OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
+ src1+=src_stride1;\
+ src2+=src_stride2;\
+ src3+=src_stride3;\
+ src4+=src_stride4;\
+ dst+=dst_stride;\
+ } while(--h); \
+} \
+\
+static inline void OPNAME ## _pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ do { /* src1 is unaligned */\
+ uint32_t a0,a1,a2,a3; \
+ UNPACK(a0,a1,LD32(src1),LP(src2)); \
+ UNPACK(a2,a3,LP(src3),LP(src4)); \
+ OP(LP(dst),rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
+ UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
+ OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
+ UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
+ OP(LP(dst+8),rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
+ UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
+ OP(LP(dst+12),rnd_PACK(a0,a1,a2,a3)); \
+ src1+=src_stride1;\
+ src2+=src_stride2;\
+ src3+=src_stride3;\
+ src4+=src_stride4;\
+ dst+=dst_stride;\
+ } while(--h); \
+} \
+\
+static inline void OPNAME ## _no_rnd_pixels16_l4_aligned0(uint8_t *dst, const uint8_t *src1, uint8_t *src2, uint8_t *src3, uint8_t *src4,int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
+ do { \
+ uint32_t a0,a1,a2,a3; \
+ UNPACK(a0,a1,LD32(src1),LP(src2)); \
+ UNPACK(a2,a3,LP(src3),LP(src4)); \
+ OP(LP(dst),no_rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LD32(src1+4),LP(src2+4)); \
+ UNPACK(a2,a3,LP(src3+4),LP(src4+4)); \
+ OP(LP(dst+4),no_rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LD32(src1+8),LP(src2+8)); \
+ UNPACK(a2,a3,LP(src3+8),LP(src4+8)); \
+ OP(LP(dst+8),no_rnd_PACK(a0,a1,a2,a3)); \
+ UNPACK(a0,a1,LD32(src1+12),LP(src2+12)); \
+ UNPACK(a2,a3,LP(src3+12),LP(src4+12)); \
+ OP(LP(dst+12),no_rnd_PACK(a0,a1,a2,a3)); \
+ src1+=src_stride1;\
+ src2+=src_stride2;\
+ src3+=src_stride3;\
+ src4+=src_stride4;\
+ dst+=dst_stride;\
+ } while(--h); \
+} \
+\
+
+#define op_avg(a, b) a = rnd_avg32(a,b)
+#define op_put(a, b) a = b
+
+PIXOP2(avg, op_avg)
+PIXOP2(put, op_put)
+#undef op_avg
+#undef op_put
+
+#define avg2(a,b) ((a+b+1)>>1)
+#define avg4(a,b,c,d) ((a+b+c+d+2)>>2)
+
+
+static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder)
+{
+ const int A=(16-x16)*(16-y16);
+ const int B=( x16)*(16-y16);
+ const int C=(16-x16)*( y16);
+ const int D=( x16)*( y16);
+
+ do {
+ int t0,t1,t2,t3;
+ uint8_t *s0 = src;
+ uint8_t *s1 = src+stride;
+ t0 = *s0++; t2 = *s1++;
+ t1 = *s0++; t3 = *s1++;
+ dst[0]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
+ t0 = *s0++; t2 = *s1++;
+ dst[1]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
+ t1 = *s0++; t3 = *s1++;
+ dst[2]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
+ t0 = *s0++; t2 = *s1++;
+ dst[3]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
+ t1 = *s0++; t3 = *s1++;
+ dst[4]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
+ t0 = *s0++; t2 = *s1++;
+ dst[5]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
+ t1 = *s0++; t3 = *s1++;
+ dst[6]= (A*t0 + B*t1 + C*t2 + D*t3 + rounder)>>8;
+ t0 = *s0++; t2 = *s1++;
+ dst[7]= (A*t1 + B*t0 + C*t3 + D*t2 + rounder)>>8;
+ dst+= stride;
+ src+= stride;
+ }while(--h);
+}
+
+static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
+ int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
+{
+ int y, vx, vy;
+ const int s= 1<<shift;
+
+ width--;
+ height--;
+
+ for(y=0; y<h; y++){
+ int x;
+
+ vx= ox;
+ vy= oy;
+ for(x=0; x<8; x++){ //XXX FIXME optimize
+ int src_x, src_y, frac_x, frac_y, index;
+
+ src_x= vx>>16;
+ src_y= vy>>16;
+ frac_x= src_x&(s-1);
+ frac_y= src_y&(s-1);
+ src_x>>=shift;
+ src_y>>=shift;
+
+ if((unsigned)src_x < width){
+ if((unsigned)src_y < height){
+ index= src_x + src_y*stride;
+ dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ + src[index +1]* frac_x )*(s-frac_y)
+ + ( src[index+stride ]*(s-frac_x)
+ + src[index+stride+1]* frac_x )* frac_y
+ + r)>>(shift*2);
+ }else{
+ index= src_x + clip(src_y, 0, height)*stride;
+ dst[y*stride + x]= ( ( src[index ]*(s-frac_x)
+ + src[index +1]* frac_x )*s
+ + r)>>(shift*2);
+ }
+ }else{
+ if((unsigned)src_y < height){
+ index= clip(src_x, 0, width) + src_y*stride;
+ dst[y*stride + x]= ( ( src[index ]*(s-frac_y)
+ + src[index+stride ]* frac_y )*s
+ + r)>>(shift*2);
+ }else{
+ index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride;
+ dst[y*stride + x]= src[index ];
+ }
+ }
+
+ vx+= dxx;
+ vy+= dyx;
+ }
+ ox += dxy;
+ oy += dyy;
+ }
+}
+#define H264_CHROMA_MC(OPNAME, OP)\
+static void OPNAME ## h264_chroma_mc2_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
+ const int A=(8-x)*(8-y);\
+ const int B=( x)*(8-y);\
+ const int C=(8-x)*( y);\
+ const int D=( x)*( y);\
+ \
+ assert(x<8 && y<8 && x>=0 && y>=0);\
+\
+ do {\
+ int t0,t1,t2,t3; \
+ uint8_t *s0 = src; \
+ uint8_t *s1 = src+stride; \
+ t0 = *s0++; t2 = *s1++; \
+ t1 = *s0++; t3 = *s1++; \
+ OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
+ t0 = *s0++; t2 = *s1++; \
+ OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
+ dst+= stride;\
+ src+= stride;\
+ }while(--h);\
+}\
+\
+static void OPNAME ## h264_chroma_mc4_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
+ const int A=(8-x)*(8-y);\
+ const int B=( x)*(8-y);\
+ const int C=(8-x)*( y);\
+ const int D=( x)*( y);\
+ \
+ assert(x<8 && y<8 && x>=0 && y>=0);\
+\
+ do {\
+ int t0,t1,t2,t3; \
+ uint8_t *s0 = src; \
+ uint8_t *s1 = src+stride; \
+ t0 = *s0++; t2 = *s1++; \
+ t1 = *s0++; t3 = *s1++; \
+ OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
+ t0 = *s0++; t2 = *s1++; \
+ OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
+ t1 = *s0++; t3 = *s1++; \
+ OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
+ t0 = *s0++; t2 = *s1++; \
+ OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
+ dst+= stride;\
+ src+= stride;\
+ }while(--h);\
+}\
+\
+static void OPNAME ## h264_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y){\
+ const int A=(8-x)*(8-y);\
+ const int B=( x)*(8-y);\
+ const int C=(8-x)*( y);\
+ const int D=( x)*( y);\
+ \
+ assert(x<8 && y<8 && x>=0 && y>=0);\
+\
+ do {\
+ int t0,t1,t2,t3; \
+ uint8_t *s0 = src; \
+ uint8_t *s1 = src+stride; \
+ t0 = *s0++; t2 = *s1++; \
+ t1 = *s0++; t3 = *s1++; \
+ OP(dst[0], (A*t0 + B*t1 + C*t2 + D*t3));\
+ t0 = *s0++; t2 = *s1++; \
+ OP(dst[1], (A*t1 + B*t0 + C*t3 + D*t2));\
+ t1 = *s0++; t3 = *s1++; \
+ OP(dst[2], (A*t0 + B*t1 + C*t2 + D*t3));\
+ t0 = *s0++; t2 = *s1++; \
+ OP(dst[3], (A*t1 + B*t0 + C*t3 + D*t2));\
+ t1 = *s0++; t3 = *s1++; \
+ OP(dst[4], (A*t0 + B*t1 + C*t2 + D*t3));\
+ t0 = *s0++; t2 = *s1++; \
+ OP(dst[5], (A*t1 + B*t0 + C*t3 + D*t2));\
+ t1 = *s0++; t3 = *s1++; \
+ OP(dst[6], (A*t0 + B*t1 + C*t2 + D*t3));\
+ t0 = *s0++; t2 = *s1++; \
+ OP(dst[7], (A*t1 + B*t0 + C*t3 + D*t2));\
+ dst+= stride;\
+ src+= stride;\
+ }while(--h);\
+}
+
+#define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
+#define op_put(a, b) a = (((b) + 32)>>6)
+
+H264_CHROMA_MC(put_ , op_put)
+H264_CHROMA_MC(avg_ , op_avg)
+#undef op_avg
+#undef op_put
+
+#define QPEL_MC(r, OPNAME, RND, OP) \
+static void OPNAME ## mpeg4_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ do {\
+ uint8_t *s = src; \
+ int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
+ src0= *s++;\
+ src1= *s++;\
+ src2= *s++;\
+ src3= *s++;\
+ src4= *s++;\
+ OP(dst[0], (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));\
+ src5= *s++;\
+ OP(dst[1], (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));\
+ src6= *s++;\
+ OP(dst[2], (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));\
+ src7= *s++;\
+ OP(dst[3], (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));\
+ src8= *s++;\
+ OP(dst[4], (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));\
+ OP(dst[5], (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));\
+ OP(dst[6], (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));\
+ OP(dst[7], (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }while(--h);\
+}\
+\
+static void OPNAME ## mpeg4_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int w=8;\
+ do{\
+ uint8_t *s = src, *d=dst;\
+ int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
+ src0 = *s; s+=srcStride; \
+ src1 = *s; s+=srcStride; \
+ src2 = *s; s+=srcStride; \
+ src3 = *s; s+=srcStride; \
+ src4 = *s; s+=srcStride; \
+ OP(*d, (src0+src1)*20 - (src0+src2)*6 + (src1+src3)*3 - (src2+src4));d+=dstStride;\
+ src5 = *s; s+=srcStride; \
+ OP(*d, (src1+src2)*20 - (src0+src3)*6 + (src0+src4)*3 - (src1+src5));d+=dstStride;\
+ src6 = *s; s+=srcStride; \
+ OP(*d, (src2+src3)*20 - (src1+src4)*6 + (src0+src5)*3 - (src0+src6));d+=dstStride;\
+ src7 = *s; s+=srcStride; \
+ OP(*d, (src3+src4)*20 - (src2+src5)*6 + (src1+src6)*3 - (src0+src7));d+=dstStride;\
+ src8 = *s; \
+ OP(*d, (src4+src5)*20 - (src3+src6)*6 + (src2+src7)*3 - (src1+src8));d+=dstStride;\
+ OP(*d, (src5+src6)*20 - (src4+src7)*6 + (src3+src8)*3 - (src2+src8));d+=dstStride;\
+ OP(*d, (src6+src7)*20 - (src5+src8)*6 + (src4+src8)*3 - (src3+src7));d+=dstStride;\
+ OP(*d, (src7+src8)*20 - (src6+src8)*6 + (src5+src7)*3 - (src4+src6));\
+ dst++;\
+ src++;\
+ }while(--w);\
+}\
+\
+static void OPNAME ## mpeg4_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ do {\
+ uint8_t *s = src;\
+ int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
+ int src9,src10,src11,src12,src13,src14,src15,src16;\
+ src0= *s++;\
+ src1= *s++;\
+ src2= *s++;\
+ src3= *s++;\
+ src4= *s++;\
+ OP(dst[ 0], (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));\
+ src5= *s++;\
+ OP(dst[ 1], (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));\
+ src6= *s++;\
+ OP(dst[ 2], (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));\
+ src7= *s++;\
+ OP(dst[ 3], (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));\
+ src8= *s++;\
+ OP(dst[ 4], (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));\
+ src9= *s++;\
+ OP(dst[ 5], (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));\
+ src10= *s++;\
+ OP(dst[ 6], (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));\
+ src11= *s++;\
+ OP(dst[ 7], (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));\
+ src12= *s++;\
+ OP(dst[ 8], (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));\
+ src13= *s++;\
+ OP(dst[ 9], (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));\
+ src14= *s++;\
+ OP(dst[10], (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));\
+ src15= *s++;\
+ OP(dst[11], (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));\
+ src16= *s++;\
+ OP(dst[12], (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));\
+ OP(dst[13], (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));\
+ OP(dst[14], (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));\
+ OP(dst[15], (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
+ dst+=dstStride;\
+ src+=srcStride;\
+ }while(--h);\
+}\
+\
+static void OPNAME ## mpeg4_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int w=16;\
+ do {\
+ uint8_t *s = src, *d=dst;\
+ int src0,src1,src2,src3,src4,src5,src6,src7,src8;\
+ int src9,src10,src11,src12,src13,src14,src15,src16;\
+ src0 = *s; s+=srcStride; \
+ src1 = *s; s+=srcStride; \
+ src2 = *s; s+=srcStride; \
+ src3 = *s; s+=srcStride; \
+ src4 = *s; s+=srcStride; \
+ OP(*d, (src0 +src1 )*20 - (src0 +src2 )*6 + (src1 +src3 )*3 - (src2 +src4 ));d+=dstStride;\
+ src5 = *s; s+=srcStride; \
+ OP(*d, (src1 +src2 )*20 - (src0 +src3 )*6 + (src0 +src4 )*3 - (src1 +src5 ));d+=dstStride;\
+ src6 = *s; s+=srcStride; \
+ OP(*d, (src2 +src3 )*20 - (src1 +src4 )*6 + (src0 +src5 )*3 - (src0 +src6 ));d+=dstStride;\
+ src7 = *s; s+=srcStride; \
+ OP(*d, (src3 +src4 )*20 - (src2 +src5 )*6 + (src1 +src6 )*3 - (src0 +src7 ));d+=dstStride;\
+ src8 = *s; s+=srcStride; \
+ OP(*d, (src4 +src5 )*20 - (src3 +src6 )*6 + (src2 +src7 )*3 - (src1 +src8 ));d+=dstStride;\
+ src9 = *s; s+=srcStride; \
+ OP(*d, (src5 +src6 )*20 - (src4 +src7 )*6 + (src3 +src8 )*3 - (src2 +src9 ));d+=dstStride;\
+ src10 = *s; s+=srcStride; \
+ OP(*d, (src6 +src7 )*20 - (src5 +src8 )*6 + (src4 +src9 )*3 - (src3 +src10));d+=dstStride;\
+ src11 = *s; s+=srcStride; \
+ OP(*d, (src7 +src8 )*20 - (src6 +src9 )*6 + (src5 +src10)*3 - (src4 +src11));d+=dstStride;\
+ src12 = *s; s+=srcStride; \
+ OP(*d, (src8 +src9 )*20 - (src7 +src10)*6 + (src6 +src11)*3 - (src5 +src12));d+=dstStride;\
+ src13 = *s; s+=srcStride; \
+ OP(*d, (src9 +src10)*20 - (src8 +src11)*6 + (src7 +src12)*3 - (src6 +src13));d+=dstStride;\
+ src14 = *s; s+=srcStride; \
+ OP(*d, (src10+src11)*20 - (src9 +src12)*6 + (src8 +src13)*3 - (src7 +src14));d+=dstStride;\
+ src15 = *s; s+=srcStride; \
+ OP(*d, (src11+src12)*20 - (src10+src13)*6 + (src9 +src14)*3 - (src8 +src15));d+=dstStride;\
+ src16 = *s; \
+ OP(*d, (src12+src13)*20 - (src11+src14)*6 + (src10+src15)*3 - (src9 +src16));d+=dstStride;\
+ OP(*d, (src13+src14)*20 - (src12+src15)*6 + (src11+src16)*3 - (src10+src16));d+=dstStride;\
+ OP(*d, (src14+src15)*20 - (src13+src16)*6 + (src12+src16)*3 - (src11+src15));d+=dstStride;\
+ OP(*d, (src15+src16)*20 - (src14+src16)*6 + (src13+src15)*3 - (src12+src14));\
+ dst++;\
+ src++;\
+ }while(--w);\
+}\
+\
+static void OPNAME ## qpel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels8_c(dst, src, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[64];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
+ OPNAME ## pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## mpeg4_qpel8_h_lowpass(dst, src, stride, stride, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[64];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(half, src, 8, stride, 8);\
+ OPNAME ## pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t half[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
+ OPNAME ## pixels8_l2_aligned(dst, full, half, stride, 16, 8, 8);\
+}\
+\
+static void OPNAME ## qpel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ copy_block9(full, src, 16, stride, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16);\
+}\
+\
+static void OPNAME ## qpel8_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t half[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16);\
+ OPNAME ## pixels8_l2_aligned(dst, full+16, half, stride, 16, 8, 8);\
+}\
+static void ff_ ## OPNAME ## qpel8_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
+}\
+static void ff_ ## OPNAME ## qpel8_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
+}\
+static void ff_ ## OPNAME ## qpel8_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l4_aligned(dst, full+16, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
+}\
+static void ff_ ## OPNAME ## qpel8_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full , 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l4_aligned0(dst, full+17, halfH+8, halfV, halfHV, stride, 16, 8, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_aligned(dst, halfH, halfHV, stride, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[72];\
+ uint8_t halfHV[64];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_aligned(dst, halfH+8, halfHV, stride, 8, 8, 8);\
+}\
+static void ff_ ## OPNAME ## qpel8_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2_aligned(halfH, halfH, full, 8, 8, 16, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
+}\
+static void ff_ ## OPNAME ## qpel8_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ uint8_t halfV[64];\
+ uint8_t halfHV[64];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfV, full+1, 8, 16);\
+ put ## RND ## mpeg4_qpel8_v_lowpass(halfHV, halfH, 8, 8);\
+ OPNAME ## pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);\
+}\
+static void OPNAME ## qpel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[16*9];\
+ uint8_t halfH[72];\
+ copy_block9(full, src, 16, stride, 9);\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, full, 8, 16, 9);\
+ put ## RND ## pixels8_l2_aligned1(halfH, halfH, full+1, 8, 8, 16, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
+}\
+static void OPNAME ## qpel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[72];\
+ put ## RND ## mpeg4_qpel8_h_lowpass(halfH, src, 8, stride, 9);\
+ OPNAME ## mpeg4_qpel8_v_lowpass(dst, halfH, stride, 8);\
+}\
+static void OPNAME ## qpel16_mc00_c (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels16_c(dst, src, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc10_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
+ OPNAME ## pixels16_l2_aligned2(dst, src, half, stride, stride, 16, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc20_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## mpeg4_qpel16_h_lowpass(dst, src, stride, stride, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc30_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(half, src, 16, stride, 16);\
+ OPNAME ## pixels16_l2_aligned2(dst, src+1, half, stride, stride, 16, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc01_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t half[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
+ OPNAME ## pixels16_l2_aligned(dst, full, half, stride, 24, 16, 16);\
+}\
+\
+static void OPNAME ## qpel16_mc02_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ copy_block17(full, src, 24, stride, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24);\
+}\
+\
+static void OPNAME ## qpel16_mc03_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t half[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24);\
+ OPNAME ## pixels16_l2_aligned(dst, full+24, half, stride, 24, 16, 16);\
+}\
+static void ff_ ## OPNAME ## qpel16_mc11_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l4_aligned(dst, full, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc11_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
+}\
+static void ff_ ## OPNAME ## qpel16_mc31_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l4_aligned0(dst, full+1, halfH, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc31_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
+}\
+static void ff_ ## OPNAME ## qpel16_mc13_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l4_aligned(dst, full+24, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc13_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
+}\
+static void ff_ ## OPNAME ## qpel16_mc33_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full , 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l4_aligned0(dst, full+25, halfH+16, halfV, halfHV, stride, 24, 16, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc33_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc21_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_aligned(dst, halfH, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc23_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[272];\
+ uint8_t halfHV[256];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_aligned(dst, halfH+16, halfHV, stride, 16, 16, 16);\
+}\
+static void ff_ ## OPNAME ## qpel16_mc12_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc12_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2_aligned(halfH, halfH, full, 16, 16, 24, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
+}\
+static void ff_ ## OPNAME ## qpel16_mc32_old_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ uint8_t halfV[256];\
+ uint8_t halfHV[256];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfV, full+1, 16, 24);\
+ put ## RND ## mpeg4_qpel16_v_lowpass(halfHV, halfH, 16, 16);\
+ OPNAME ## pixels16_l2_aligned(dst, halfV, halfHV, stride, 16, 16, 16);\
+}\
+static void OPNAME ## qpel16_mc32_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[24*17];\
+ uint8_t halfH[272];\
+ copy_block17(full, src, 24, stride, 17);\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, full, 16, 24, 17);\
+ put ## RND ## pixels16_l2_aligned1(halfH, halfH, full+1, 16, 16, 24, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
+}\
+static void OPNAME ## qpel16_mc22_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t halfH[272];\
+ put ## RND ## mpeg4_qpel16_h_lowpass(halfH, src, 16, stride, 17);\
+ OPNAME ## mpeg4_qpel16_v_lowpass(dst, halfH, stride, 16);\
+}
+
+#define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
+#define op_avg_no_rnd(a, b) a = (((a)+cm[((b) + 15)>>5])>>1)
+#define op_put(a, b) a = cm[((b) + 16)>>5]
+#define op_put_no_rnd(a, b) a = cm[((b) + 15)>>5]
+
+QPEL_MC(0, put_ , _ , op_put)
+QPEL_MC(1, put_no_rnd_, _no_rnd_, op_put_no_rnd)
+QPEL_MC(0, avg_ , _ , op_avg)
+//QPEL_MC(1, avg_no_rnd , _ , op_avg)
+#undef op_avg
+#undef op_avg_no_rnd
+#undef op_put
+#undef op_put_no_rnd
+
+#if 1
+#define H264_LOWPASS(OPNAME, OP, OP2) \
+static inline void OPNAME ## h264_qpel_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ do {\
+ int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
+ uint8_t *s = src-2;\
+ srcB = *s++;\
+ srcA = *s++;\
+ src0 = *s++;\
+ src1 = *s++;\
+ src2 = *s++;\
+ src3 = *s++;\
+ OP(dst[0], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
+ src4 = *s++;\
+ OP(dst[1], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
+ src5 = *s++;\
+ OP(dst[2], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
+ src6 = *s++;\
+ OP(dst[3], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
+ if (w>4) { /* it optimized */ \
+ int src7,src8,src9,src10; \
+ src7 = *s++;\
+ OP(dst[4], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
+ src8 = *s++;\
+ OP(dst[5], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
+ src9 = *s++;\
+ OP(dst[6], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
+ src10 = *s++;\
+ OP(dst[7], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
+ if (w>8) { \
+ int src11,src12,src13,src14,src15,src16,src17,src18; \
+ src11 = *s++;\
+ OP(dst[8] , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
+ src12 = *s++;\
+ OP(dst[9] , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
+ src13 = *s++;\
+ OP(dst[10], (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
+ src14 = *s++;\
+ OP(dst[11], (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
+ src15 = *s++;\
+ OP(dst[12], (src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
+ src16 = *s++;\
+ OP(dst[13], (src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
+ src17 = *s++;\
+ OP(dst[14], (src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
+ src18 = *s++;\
+ OP(dst[15], (src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
+ } \
+ } \
+ dst+=dstStride;\
+ src+=srcStride;\
+ }while(--h);\
+}\
+\
+static inline void OPNAME ## h264_qpel_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride,int w,int h){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ do{\
+ int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
+ uint8_t *s = src-2*srcStride,*d=dst;\
+ srcB = *s; s+=srcStride;\
+ srcA = *s; s+=srcStride;\
+ src0 = *s; s+=srcStride;\
+ src1 = *s; s+=srcStride;\
+ src2 = *s; s+=srcStride;\
+ src3 = *s; s+=srcStride;\
+ OP(*d, (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));d+=dstStride;\
+ src4 = *s; s+=srcStride;\
+ OP(*d, (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));d+=dstStride;\
+ src5 = *s; s+=srcStride;\
+ OP(*d, (src2+src3)*20 - (src1+src4)*5 + (src0+src5));d+=dstStride;\
+ src6 = *s; s+=srcStride;\
+ OP(*d, (src3+src4)*20 - (src2+src5)*5 + (src1+src6));d+=dstStride;\
+ if (h>4) { \
+ int src7,src8,src9,src10; \
+ src7 = *s; s+=srcStride;\
+ OP(*d, (src4+src5)*20 - (src3+src6)*5 + (src2+src7));d+=dstStride;\
+ src8 = *s; s+=srcStride;\
+ OP(*d, (src5+src6)*20 - (src4+src7)*5 + (src3+src8));d+=dstStride;\
+ src9 = *s; s+=srcStride;\
+ OP(*d, (src6+src7)*20 - (src5+src8)*5 + (src4+src9));d+=dstStride;\
+ src10 = *s; s+=srcStride;\
+ OP(*d, (src7+src8)*20 - (src6+src9)*5 + (src5+src10));d+=dstStride;\
+ if (h>8) { \
+ int src11,src12,src13,src14,src15,src16,src17,src18; \
+ src11 = *s; s+=srcStride;\
+ OP(*d , (src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));d+=dstStride;\
+ src12 = *s; s+=srcStride;\
+ OP(*d , (src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));d+=dstStride;\
+ src13 = *s; s+=srcStride;\
+ OP(*d, (src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));d+=dstStride;\
+ src14 = *s; s+=srcStride;\
+ OP(*d, (src11+src12)*20 - (src10+src13)*5 + (src9 +src14));d+=dstStride;\
+ src15 = *s; s+=srcStride;\
+ OP(*d, (src12+src13)*20 - (src11+src14)*5 + (src10+src15));d+=dstStride;\
+ src16 = *s; s+=srcStride;\
+ OP(*d, (src13+src14)*20 - (src12+src15)*5 + (src11+src16));d+=dstStride;\
+ src17 = *s; s+=srcStride;\
+ OP(*d, (src14+src15)*20 - (src13+src16)*5 + (src12+src17));d+=dstStride;\
+ src18 = *s; s+=srcStride;\
+ OP(*d, (src15+src16)*20 - (src14+src17)*5 + (src13+src18));d+=dstStride;\
+ } \
+ } \
+ dst++;\
+ src++;\
+ }while(--w);\
+}\
+\
+static inline void OPNAME ## h264_qpel_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride,int w,int h){\
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\
+ int i;\
+ src -= 2*srcStride;\
+ i= h+5; \
+ do {\
+ int srcB,srcA,src0,src1,src2,src3,src4,src5,src6;\
+ uint8_t *s = src-2;\
+ srcB = *s++;\
+ srcA = *s++;\
+ src0 = *s++;\
+ src1 = *s++;\
+ src2 = *s++;\
+ src3 = *s++;\
+ tmp[0] = ((src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
+ src4 = *s++;\
+ tmp[1] = ((src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
+ src5 = *s++;\
+ tmp[2] = ((src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
+ src6 = *s++;\
+ tmp[3] = ((src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
+ if (w>4) { /* it optimized */ \
+ int src7,src8,src9,src10; \
+ src7 = *s++;\
+ tmp[4] = ((src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
+ src8 = *s++;\
+ tmp[5] = ((src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
+ src9 = *s++;\
+ tmp[6] = ((src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
+ src10 = *s++;\
+ tmp[7] = ((src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
+ if (w>8) { \
+ int src11,src12,src13,src14,src15,src16,src17,src18; \
+ src11 = *s++;\
+ tmp[8] = ((src8 +src9 )*20 - (src7 +src10)*5 + (src6 +src11));\
+ src12 = *s++;\
+ tmp[9] = ((src9 +src10)*20 - (src8 +src11)*5 + (src7 +src12));\
+ src13 = *s++;\
+ tmp[10] = ((src10+src11)*20 - (src9 +src12)*5 + (src8 +src13));\
+ src14 = *s++;\
+ tmp[11] = ((src11+src12)*20 - (src10+src13)*5 + (src9 +src14));\
+ src15 = *s++;\
+ tmp[12] = ((src12+src13)*20 - (src11+src14)*5 + (src10+src15));\
+ src16 = *s++;\
+ tmp[13] = ((src13+src14)*20 - (src12+src15)*5 + (src11+src16));\
+ src17 = *s++;\
+ tmp[14] = ((src14+src15)*20 - (src13+src16)*5 + (src12+src17));\
+ src18 = *s++;\
+ tmp[15] = ((src15+src16)*20 - (src14+src17)*5 + (src13+src18));\
+ } \
+ } \
+ tmp+=tmpStride;\
+ src+=srcStride;\
+ }while(--i);\
+ tmp -= tmpStride*(h+5-2);\
+ i = w; \
+ do {\
+ int tmpB,tmpA,tmp0,tmp1,tmp2,tmp3,tmp4,tmp5,tmp6;\
+ int16_t *s = tmp-2*tmpStride; \
+ uint8_t *d=dst;\
+ tmpB = *s; s+=tmpStride;\
+ tmpA = *s; s+=tmpStride;\
+ tmp0 = *s; s+=tmpStride;\
+ tmp1 = *s; s+=tmpStride;\
+ tmp2 = *s; s+=tmpStride;\
+ tmp3 = *s; s+=tmpStride;\
+ OP2(*d, (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));d+=dstStride;\
+ tmp4 = *s; s+=tmpStride;\
+ OP2(*d, (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));d+=dstStride;\
+ tmp5 = *s; s+=tmpStride;\
+ OP2(*d, (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));d+=dstStride;\
+ tmp6 = *s; s+=tmpStride;\
+ OP2(*d, (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));d+=dstStride;\
+ if (h>4) { \
+ int tmp7,tmp8,tmp9,tmp10; \
+ tmp7 = *s; s+=tmpStride;\
+ OP2(*d, (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));d+=dstStride;\
+ tmp8 = *s; s+=tmpStride;\
+ OP2(*d, (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));d+=dstStride;\
+ tmp9 = *s; s+=tmpStride;\
+ OP2(*d, (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));d+=dstStride;\
+ tmp10 = *s; s+=tmpStride;\
+ OP2(*d, (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));d+=dstStride;\
+ if (h>8) { \
+ int tmp11,tmp12,tmp13,tmp14,tmp15,tmp16,tmp17,tmp18; \
+ tmp11 = *s; s+=tmpStride;\
+ OP2(*d , (tmp8 +tmp9 )*20 - (tmp7 +tmp10)*5 + (tmp6 +tmp11));d+=dstStride;\
+ tmp12 = *s; s+=tmpStride;\
+ OP2(*d , (tmp9 +tmp10)*20 - (tmp8 +tmp11)*5 + (tmp7 +tmp12));d+=dstStride;\
+ tmp13 = *s; s+=tmpStride;\
+ OP2(*d, (tmp10+tmp11)*20 - (tmp9 +tmp12)*5 + (tmp8 +tmp13));d+=dstStride;\
+ tmp14 = *s; s+=tmpStride;\
+ OP2(*d, (tmp11+tmp12)*20 - (tmp10+tmp13)*5 + (tmp9 +tmp14));d+=dstStride;\
+ tmp15 = *s; s+=tmpStride;\
+ OP2(*d, (tmp12+tmp13)*20 - (tmp11+tmp14)*5 + (tmp10+tmp15));d+=dstStride;\
+ tmp16 = *s; s+=tmpStride;\
+ OP2(*d, (tmp13+tmp14)*20 - (tmp12+tmp15)*5 + (tmp11+tmp16));d+=dstStride;\
+ tmp17 = *s; s+=tmpStride;\
+ OP2(*d, (tmp14+tmp15)*20 - (tmp13+tmp16)*5 + (tmp12+tmp17));d+=dstStride;\
+ tmp18 = *s; s+=tmpStride;\
+ OP2(*d, (tmp15+tmp16)*20 - (tmp14+tmp17)*5 + (tmp13+tmp18));d+=dstStride;\
+ } \
+ } \
+ dst++;\
+ tmp++;\
+ }while(--i);\
+}\
+\
+static void OPNAME ## h264_qpel4_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,4,4); \
+}\
+static void OPNAME ## h264_qpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,8,8); \
+}\
+static void OPNAME ## h264_qpel16_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel_h_lowpass(dst,src,dstStride,srcStride,16,16); \
+}\
+\
+static void OPNAME ## h264_qpel4_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,4,4); \
+}\
+static void OPNAME ## h264_qpel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,8,8); \
+}\
+static void OPNAME ## h264_qpel16_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
+ OPNAME ## h264_qpel_v_lowpass(dst,src,dstStride,srcStride,16,16); \
+}\
+static void OPNAME ## h264_qpel4_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,4,4); \
+}\
+static void OPNAME ## h264_qpel8_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,8,8); \
+}\
+static void OPNAME ## h264_qpel16_hv_lowpass(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
+ OPNAME ## h264_qpel_hv_lowpass(dst,tmp,src,dstStride,tmpStride,srcStride,16,16); \
+}\
+
+#define H264_MC(OPNAME, SIZE) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc00_c (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## _c(dst, src, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc10_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src, half, stride, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc20_c(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc30_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t half[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned2(dst, src+1, half, stride, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc01_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t half[SIZE*SIZE];\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid, half, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc02_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ OPNAME ## h264_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc03_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t half[SIZE*SIZE];\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(half, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, full_mid+SIZE, half, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc11_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc31_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
+ copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc13_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc33_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
+ copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc22_c(uint8_t *dst, uint8_t *src, int stride){\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass(dst, tmp, src, stride, SIZE, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc21_c(uint8_t *dst, uint8_t *src, int stride){\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfHV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc23_c(uint8_t *dst, uint8_t *src, int stride){\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ uint8_t halfH[SIZE*SIZE];\
+ uint8_t halfHV[SIZE*SIZE];\
+ put_h264_qpel ## SIZE ## _h_lowpass(halfH, src + stride, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfH, halfHV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc12_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ uint8_t halfV[SIZE*SIZE];\
+ uint8_t halfHV[SIZE*SIZE];\
+ copy_block ## SIZE (full, src - stride*2, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc32_c(uint8_t *dst, uint8_t *src, int stride){\
+ uint8_t full[SIZE*(SIZE+5)];\
+ uint8_t * const full_mid= full + SIZE*2;\
+ int16_t tmp[SIZE*(SIZE+5)];\
+ uint8_t halfV[SIZE*SIZE];\
+ uint8_t halfHV[SIZE*SIZE];\
+ copy_block ## SIZE (full, src - stride*2 + 1, SIZE, stride, SIZE + 5);\
+ put_h264_qpel ## SIZE ## _v_lowpass(halfV, full_mid, SIZE, SIZE);\
+ put_h264_qpel ## SIZE ## _hv_lowpass(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_aligned(dst, halfV, halfHV, stride, SIZE, SIZE, SIZE);\
+}\
+
+#define op_avg(a, b) a = (((a)+cm[((b) + 16)>>5]+1)>>1)
+//#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
+#define op_put(a, b) a = cm[((b) + 16)>>5]
+#define op2_avg(a, b) a = (((a)+cm[((b) + 512)>>10]+1)>>1)
+#define op2_put(a, b) a = cm[((b) + 512)>>10]
+
+H264_LOWPASS(put_ , op_put, op2_put)
+H264_LOWPASS(avg_ , op_avg, op2_avg)
+H264_MC(put_, 4)
+H264_MC(put_, 8)
+H264_MC(put_, 16)
+H264_MC(avg_, 4)
+H264_MC(avg_, 8)
+H264_MC(avg_, 16)
+
+#undef op_avg
+#undef op_put
+#undef op2_avg
+#undef op2_put
+#endif
+
+static void wmv2_mspel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ do{
+ int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
+ uint8_t *s = src;
+ src_1 = s[-1];
+ src0 = *s++;
+ src1 = *s++;
+ src2 = *s++;
+ dst[0]= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4];
+ src3 = *s++;
+ dst[1]= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4];
+ src4 = *s++;
+ dst[2]= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4];
+ src5 = *s++;
+ dst[3]= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4];
+ src6 = *s++;
+ dst[4]= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4];
+ src7 = *s++;
+ dst[5]= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4];
+ src8 = *s++;
+ dst[6]= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4];
+ src9 = *s++;
+ dst[7]= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4];
+ dst+=dstStride;
+ src+=srcStride;
+ }while(--h);
+}
+
+static void wmv2_mspel8_v_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int w){
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+
+ do{
+ int src_1,src0,src1,src2,src3,src4,src5,src6,src7,src8,src9;
+ uint8_t *s = src,*d = dst;
+ src_1 = *(s-srcStride);
+ src0 = *s; s+=srcStride;
+ src1 = *s; s+=srcStride;
+ src2 = *s; s+=srcStride;
+ *d= cm[(9*(src0 + src1) - (src_1 + src2) + 8)>>4]; d+=dstStride;
+ src3 = *s; s+=srcStride;
+ *d= cm[(9*(src1 + src2) - (src0 + src3) + 8)>>4]; d+=dstStride;
+ src4 = *s; s+=srcStride;
+ *d= cm[(9*(src2 + src3) - (src1 + src4) + 8)>>4]; d+=dstStride;
+ src5 = *s; s+=srcStride;
+ *d= cm[(9*(src3 + src4) - (src2 + src5) + 8)>>4]; d+=dstStride;
+ src6 = *s; s+=srcStride;
+ *d= cm[(9*(src4 + src5) - (src3 + src6) + 8)>>4]; d+=dstStride;
+ src7 = *s; s+=srcStride;
+ *d= cm[(9*(src5 + src6) - (src4 + src7) + 8)>>4]; d+=dstStride;
+ src8 = *s; s+=srcStride;
+ *d= cm[(9*(src6 + src7) - (src5 + src8) + 8)>>4]; d+=dstStride;
+ src9 = *s;
+ *d= cm[(9*(src7 + src8) - (src6 + src9) + 8)>>4]; d+=dstStride;
+ src++;
+ dst++;
+ }while(--w);
+}
+
+static void put_mspel8_mc00_c (uint8_t *dst, uint8_t *src, int stride){
+ put_pixels8_c(dst, src, stride, 8);
+}
+
+static void put_mspel8_mc10_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t half[64];
+ wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
+ put_pixels8_l2_aligned2(dst, src, half, stride, stride, 8, 8);
+}
+
+static void put_mspel8_mc20_c(uint8_t *dst, uint8_t *src, int stride){
+ wmv2_mspel8_h_lowpass(dst, src, stride, stride, 8);
+}
+
+static void put_mspel8_mc30_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t half[64];
+ wmv2_mspel8_h_lowpass(half, src, 8, stride, 8);
+ put_pixels8_l2_aligned2(dst, src+1, half, stride, stride, 8, 8);
+}
+
+static void put_mspel8_mc02_c(uint8_t *dst, uint8_t *src, int stride){
+ wmv2_mspel8_v_lowpass(dst, src, stride, stride, 8);
+}
+
+static void put_mspel8_mc12_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t halfH[88];
+ uint8_t halfV[64];
+ uint8_t halfHV[64];
+ wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
+ wmv2_mspel8_v_lowpass(halfV, src, 8, stride, 8);
+ wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
+ put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
+}
+static void put_mspel8_mc32_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t halfH[88];
+ uint8_t halfV[64];
+ uint8_t halfHV[64];
+ wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
+ wmv2_mspel8_v_lowpass(halfV, src+1, 8, stride, 8);
+ wmv2_mspel8_v_lowpass(halfHV, halfH+8, 8, 8, 8);
+ put_pixels8_l2_aligned(dst, halfV, halfHV, stride, 8, 8, 8);
+}
+static void put_mspel8_mc22_c(uint8_t *dst, uint8_t *src, int stride){
+ uint8_t halfH[88];
+ wmv2_mspel8_h_lowpass(halfH, src-stride, 8, stride, 11);
+ wmv2_mspel8_v_lowpass(dst, halfH+8, stride, 8, 8);
+}
diff --git a/src/libffmpeg/libavcodec/shorten.c b/contrib/ffmpeg/libavcodec/shorten.c
index fe956bc39..358ecf23f 100644
--- a/src/libffmpeg/libavcodec/shorten.c
+++ b/contrib/ffmpeg/libavcodec/shorten.c
@@ -2,18 +2,20 @@
* Shorten decoder
* Copyright (c) 2005 Jeff Muizelaar
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -334,7 +336,7 @@ static int shorten_decode_frame(AVCodecContext *avctx,
s->nwrap = FFMAX(NWRAP, maxnlpc);
if (allocate_buffers(s))
- return -1;
+ return -1;
init_offset(s);
diff --git a/src/libffmpeg/libavcodec/simple_idct.c b/contrib/ffmpeg/libavcodec/simple_idct.c
index 8fa83bec7..2c026f08f 100644
--- a/src/libffmpeg/libavcodec/simple_idct.c
+++ b/contrib/ffmpeg/libavcodec/simple_idct.c
@@ -3,18 +3,20 @@
*
* Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -182,7 +184,7 @@ static inline void idctSparseColPut (uint8_t *dest, int line_size,
DCTELEM * col)
{
int a0, a1, a2, a3, b0, b1, b2, b3;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* XXX: I did that only to give same values as previous code */
a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4));
@@ -254,7 +256,7 @@ static inline void idctSparseColAdd (uint8_t *dest, int line_size,
DCTELEM * col)
{
int a0, a1, a2, a3, b0, b1, b2, b3;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
/* XXX: I did that only to give same values as previous code */
a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4));
@@ -429,7 +431,7 @@ void simple_idct(DCTELEM *block)
static inline void idct4col(uint8_t *dest, int line_size, const DCTELEM *col)
{
int c0, c1, c2, c3, a0, a1, a2, a3;
- const uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
a0 = col[8*0];
a1 = col[8*2];
@@ -509,7 +511,7 @@ void simple_idct248_put(uint8_t *dest, int line_size, DCTELEM *block)
static inline void idct4col_add(uint8_t *dest, int line_size, const DCTELEM *col)
{
int c0, c1, c2, c3, a0, a1, a2, a3;
- const uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
a0 = col[8*0];
a1 = col[8*1];
@@ -537,7 +539,7 @@ static inline void idct4col_add(uint8_t *dest, int line_size, const DCTELEM *col
static inline void idct4row(DCTELEM *row)
{
int c0, c1, c2, c3, a0, a1, a2, a3;
- //const uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ //const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
a0 = row[0];
a1 = row[1];
diff --git a/src/libffmpeg/libavcodec/simple_idct.h b/contrib/ffmpeg/libavcodec/simple_idct.h
index 64f410f0d..c4b453329 100644
--- a/src/libffmpeg/libavcodec/simple_idct.h
+++ b/contrib/ffmpeg/libavcodec/simple_idct.h
@@ -3,18 +3,20 @@
*
* Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/smacker.c b/contrib/ffmpeg/libavcodec/smacker.c
index 162c68ada..2f2185848 100644
--- a/src/libffmpeg/libavcodec/smacker.c
+++ b/contrib/ffmpeg/libavcodec/smacker.c
@@ -2,18 +2,20 @@
* Smacker decoder
* Copyright (c) 2006 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -520,7 +522,7 @@ static int decode_init(AVCodecContext *avctx)
c->pic.data[0] = NULL;
- if (avcodec_check_dimensions(avctx, avctx->height, avctx->width) < 0) {
+ if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) {
return 1;
}
@@ -550,14 +552,10 @@ static int decode_end(AVCodecContext *avctx)
{
SmackVContext * const smk = (SmackVContext *)avctx->priv_data;
- if(smk->mmap_tbl)
- av_free(smk->mmap_tbl);
- if(smk->mclr_tbl)
- av_free(smk->mclr_tbl);
- if(smk->full_tbl)
- av_free(smk->full_tbl);
- if(smk->type_tbl)
- av_free(smk->type_tbl);
+ av_freep(&smk->mmap_tbl);
+ av_freep(&smk->mclr_tbl);
+ av_freep(&smk->full_tbl);
+ av_freep(&smk->type_tbl);
if (smk->pic.data[0])
avctx->release_buffer(avctx, &smk->pic);
diff --git a/src/libffmpeg/libavcodec/smc.c b/contrib/ffmpeg/libavcodec/smc.c
index a08beeacd..77fae328b 100644
--- a/src/libffmpeg/libavcodec/smc.c
+++ b/contrib/ffmpeg/libavcodec/smc.c
@@ -2,18 +2,20 @@
* Quicktime Graphics (SMC) Video Decoder
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/snow.c b/contrib/ffmpeg/libavcodec/snow.c
index 05ad44726..6bc9a8f1a 100644
--- a/src/libffmpeg/libavcodec/snow.c
+++ b/contrib/ffmpeg/libavcodec/snow.c
@@ -1,18 +1,20 @@
/*
* Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -588,7 +590,7 @@ static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signe
int i;
if(v){
- const int a= ABS(v);
+ const int a= FFABS(v);
const int e= av_log2(a);
#if 1
const int el= FFMIN(e, 10);
@@ -1664,7 +1666,7 @@ static int encode_subband_c0run(SnowContext *s, SubBand *b, DWTELEM *src, DWTELE
p= parent[px + py*2*stride];
}
if(/*ll|*/l|lt|t|rt|p){
- int context= av_log2(/*ABS(ll) + */3*ABS(l) + ABS(lt) + 2*ABS(t) + ABS(rt) + ABS(p));
+ int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
put_rac(&s->c, &b->state[0][context], !!v);
}else{
@@ -1680,11 +1682,11 @@ static int encode_subband_c0run(SnowContext *s, SubBand *b, DWTELEM *src, DWTELE
}
}
if(v){
- int context= av_log2(/*ABS(ll) + */3*ABS(l) + ABS(lt) + 2*ABS(t) + ABS(rt) + ABS(p));
- int l2= 2*ABS(l) + (l<0);
- int t2= 2*ABS(t) + (t<0);
+ int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
+ int l2= 2*FFABS(l) + (l<0);
+ int t2= 2*FFABS(t) + (t<0);
- put_symbol2(&s->c, b->state[context + 2], ABS(v)-1, context-4);
+ put_symbol2(&s->c, b->state[context + 2], FFABS(v)-1, context-4);
put_rac(&s->c, &b->state[0][16 + 1 + 3 + quant3bA[l2&0xFF] + 3*quant3bA[t2&0xFF]], v<0);
}
}
@@ -1747,7 +1749,7 @@ static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, i
}
}
if(/*ll|*/l|lt|t|rt|p){
- int context= av_log2(/*ABS(ll) + */3*(l>>1) + (lt>>1) + (t&~1) + (rt>>1) + (p>>1));
+ int context= av_log2(/*FFABS(ll) + */3*(l>>1) + (lt>>1) + (t&~1) + (rt>>1) + (p>>1));
v=get_rac(&s->c, &b->state[0][context]);
if(v){
@@ -1900,7 +1902,7 @@ static int pix_sum(uint8_t * pix, int line_size, int w)
static int pix_norm1(uint8_t * pix, int line_size, int w)
{
int s, i, j;
- uint32_t *sq = squareTbl + 256;
+ uint32_t *sq = ff_squareTbl + 256;
s = 0;
for (i = 0; i < w; i++) {
@@ -1975,7 +1977,6 @@ static inline void pred_mv(SnowContext *s, int *mx, int *my, int ref,
#define P_MV1 P[9]
#define FLAG_QPEL 1 //must be 1
-#ifdef CONFIG_ENCODERS
static int encode_q_branch(SnowContext *s, int level, int x, int y){
uint8_t p_buffer[1024];
uint8_t i_buffer[1024];
@@ -2015,8 +2016,8 @@ static int encode_q_branch(SnowContext *s, int level, int x, int y){
const int shift= 1+qpel;
MotionEstContext *c= &s->m.me;
int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
- int mx_context= av_log2(2*ABS(left->mx - top->mx));
- int my_context= av_log2(2*ABS(left->my - top->my));
+ int mx_context= av_log2(2*FFABS(left->mx - top->mx));
+ int my_context= av_log2(2*FFABS(left->my - top->my));
int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
int ref, best_ref, ref_score, ref_mx, ref_my;
@@ -2204,7 +2205,6 @@ static int encode_q_branch(SnowContext *s, int level, int x, int y){
return score;
}
}
-#endif
static always_inline int same_block(BlockNode *a, BlockNode *b){
if((a->type&BLOCK_INTRA) && (b->type&BLOCK_INTRA)){
@@ -2229,8 +2229,8 @@ static void encode_q_branch2(SnowContext *s, int level, int x, int y){
int pcr= left->color[2];
int pmx, pmy;
int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
- int mx_context= av_log2(2*ABS(left->mx - top->mx)) + 16*!!b->ref;
- int my_context= av_log2(2*ABS(left->my - top->my)) + 16*!!b->ref;
+ int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 16*!!b->ref;
+ int my_context= av_log2(2*FFABS(left->my - top->my)) + 16*!!b->ref;
int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
if(s->keyframe){
@@ -2293,8 +2293,8 @@ static void decode_q_branch(SnowContext *s, int level, int x, int y){
int my= mid_pred(left->my, top->my, tr->my);
int ref = 0;
int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
- int mx_context= av_log2(2*ABS(left->mx - top->mx)) + 0*av_log2(2*ABS(tr->mx - top->mx));
- int my_context= av_log2(2*ABS(left->my - top->my)) + 0*av_log2(2*ABS(tr->my - top->my));
+ int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
+ int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0;
@@ -2319,13 +2319,12 @@ static void decode_q_branch(SnowContext *s, int level, int x, int y){
}
}
-#ifdef CONFIG_ENCODERS
-static void encode_blocks(SnowContext *s){
+static void encode_blocks(SnowContext *s, int search){
int x, y;
int w= s->b_width;
int h= s->b_height;
- if(s->avctx->me_method == ME_ITER && !s->keyframe)
+ if(s->avctx->me_method == ME_ITER && !s->keyframe && search)
iterative_me(s);
for(y=0; y<h; y++){
@@ -2334,14 +2333,13 @@ static void encode_blocks(SnowContext *s){
return;
}
for(x=0; x<w; x++){
- if(s->avctx->me_method == ME_ITER)
+ if(s->avctx->me_method == ME_ITER || !search)
encode_q_branch2(s, 0, x, y);
else
encode_q_branch (s, 0, x, y);
}
}
}
-#endif
static void decode_blocks(SnowContext *s){
int x, y;
@@ -2555,137 +2553,7 @@ void ff_snow_inner_add_yblock(uint8_t *obmc, const int obmc_stride, uint8_t * *
}
//FIXME name clenup (b_w, block_w, b_width stuff)
-static always_inline void add_yblock_buffered(SnowContext *s, slice_buffer * sb, DWTELEM *old_dst, uint8_t *dst8, uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int plane_index){
- DWTELEM * dst = NULL;
- const int b_width = s->b_width << s->block_max_depth;
- const int b_height= s->b_height << s->block_max_depth;
- const int b_stride= b_width;
- BlockNode *lt= &s->block[b_x + b_y*b_stride];
- BlockNode *rt= lt+1;
- BlockNode *lb= lt+b_stride;
- BlockNode *rb= lb+1;
- uint8_t *block[4];
- int tmp_step= src_stride >= 7*MB_SIZE ? MB_SIZE : MB_SIZE*src_stride;
- uint8_t tmp[src_stride*7*MB_SIZE]; //FIXME align
- uint8_t *ptmp;
- int x,y;
-
- if(b_x<0){
- lt= rt;
- lb= rb;
- }else if(b_x + 1 >= b_width){
- rt= lt;
- rb= lb;
- }
- if(b_y<0){
- lt= lb;
- rt= rb;
- }else if(b_y + 1 >= b_height){
- lb= lt;
- rb= rt;
- }
-
- if(src_x<0){ //FIXME merge with prev & always round internal width upto *16
- obmc -= src_x;
- b_w += src_x;
- src_x=0;
- }else if(src_x + b_w > w){
- b_w = w - src_x;
- }
- if(src_y<0){
- obmc -= src_y*obmc_stride;
- b_h += src_y;
- src_y=0;
- }else if(src_y + b_h> h){
- b_h = h - src_y;
- }
-
- if(b_w<=0 || b_h<=0) return;
-
-assert(src_stride > 2*MB_SIZE + 5);
-// old_dst += src_x + src_y*dst_stride;
- dst8+= src_x + src_y*src_stride;
-// src += src_x + src_y*src_stride;
-
- ptmp= tmp + 3*tmp_step;
- block[0]= ptmp;
- ptmp+=tmp_step;
- pred_block(s, block[0], tmp, src_stride, src_x, src_y, b_w, b_h, lt, plane_index, w, h);
-
- if(same_block(lt, rt)){
- block[1]= block[0];
- }else{
- block[1]= ptmp;
- ptmp+=tmp_step;
- pred_block(s, block[1], tmp, src_stride, src_x, src_y, b_w, b_h, rt, plane_index, w, h);
- }
-
- if(same_block(lt, lb)){
- block[2]= block[0];
- }else if(same_block(rt, lb)){
- block[2]= block[1];
- }else{
- block[2]= ptmp;
- ptmp+=tmp_step;
- pred_block(s, block[2], tmp, src_stride, src_x, src_y, b_w, b_h, lb, plane_index, w, h);
- }
-
- if(same_block(lt, rb) ){
- block[3]= block[0];
- }else if(same_block(rt, rb)){
- block[3]= block[1];
- }else if(same_block(lb, rb)){
- block[3]= block[2];
- }else{
- block[3]= ptmp;
- pred_block(s, block[3], tmp, src_stride, src_x, src_y, b_w, b_h, rb, plane_index, w, h);
- }
-#if 0
- for(y=0; y<b_h; y++){
- for(x=0; x<b_w; x++){
- int v= obmc [x + y*obmc_stride] * block[3][x + y*src_stride] * (256/OBMC_MAX);
- if(add) dst[x + y*dst_stride] += v;
- else dst[x + y*dst_stride] -= v;
- }
- }
- for(y=0; y<b_h; y++){
- uint8_t *obmc2= obmc + (obmc_stride>>1);
- for(x=0; x<b_w; x++){
- int v= obmc2[x + y*obmc_stride] * block[2][x + y*src_stride] * (256/OBMC_MAX);
- if(add) dst[x + y*dst_stride] += v;
- else dst[x + y*dst_stride] -= v;
- }
- }
- for(y=0; y<b_h; y++){
- uint8_t *obmc3= obmc + obmc_stride*(obmc_stride>>1);
- for(x=0; x<b_w; x++){
- int v= obmc3[x + y*obmc_stride] * block[1][x + y*src_stride] * (256/OBMC_MAX);
- if(add) dst[x + y*dst_stride] += v;
- else dst[x + y*dst_stride] -= v;
- }
- }
- for(y=0; y<b_h; y++){
- uint8_t *obmc3= obmc + obmc_stride*(obmc_stride>>1);
- uint8_t *obmc4= obmc3+ (obmc_stride>>1);
- for(x=0; x<b_w; x++){
- int v= obmc4[x + y*obmc_stride] * block[0][x + y*src_stride] * (256/OBMC_MAX);
- if(add) dst[x + y*dst_stride] += v;
- else dst[x + y*dst_stride] -= v;
- }
- }
-#else
-{
-
- START_TIMER
-
- s->dsp.inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
- STOP_TIMER("Inner add y block")
-}
-#endif
-}
-
-//FIXME name clenup (b_w, block_w, b_width stuff)
-static always_inline void add_yblock(SnowContext *s, DWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index){
+static always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, DWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index){
const int b_width = s->b_width << s->block_max_depth;
const int b_height= s->b_height << s->block_max_depth;
const int b_stride= b_width;
@@ -2717,7 +2585,7 @@ static always_inline void add_yblock(SnowContext *s, DWTELEM *dst, uint8_t *dst8
if(src_x<0){ //FIXME merge with prev & always round internal width upto *16
obmc -= src_x;
b_w += src_x;
- if(!offset_dst)
+ if(!sliced && !offset_dst)
dst -= src_x;
src_x=0;
}else if(src_x + b_w > w){
@@ -2726,7 +2594,7 @@ static always_inline void add_yblock(SnowContext *s, DWTELEM *dst, uint8_t *dst8
if(src_y<0){
obmc -= src_y*obmc_stride;
b_h += src_y;
- if(!offset_dst)
+ if(!sliced && !offset_dst)
dst -= src_y*dst_stride;
src_y=0;
}else if(src_y + b_h> h){
@@ -2736,7 +2604,7 @@ static always_inline void add_yblock(SnowContext *s, DWTELEM *dst, uint8_t *dst8
if(b_w<=0 || b_h<=0) return;
assert(src_stride > 2*MB_SIZE + 5);
- if(offset_dst)
+ if(!sliced && offset_dst)
dst += src_x + src_y*dst_stride;
dst8+= src_x + src_y*src_stride;
// src += src_x + src_y*src_stride;
@@ -2808,6 +2676,12 @@ assert(src_stride > 2*MB_SIZE + 5);
}
}
#else
+ if(sliced){
+ START_TIMER
+
+ s->dsp.inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
+ STOP_TIMER("inner_add_yblock")
+ }else
for(y=0; y<b_h; y++){
//FIXME ugly missue of obmc_stride
uint8_t *obmc1= obmc + y*obmc_stride;
@@ -2890,14 +2764,14 @@ static always_inline void predict_slice_buffered(SnowContext *s, slice_buffer *
for(mb_x=0; mb_x<=mb_w; mb_x++){
START_TIMER
- add_yblock_buffered(s, sb, old_buffer, dst8, obmc,
+ add_yblock(s, 1, sb, old_buffer, dst8, obmc,
block_w*mb_x - block_w/2,
block_w*mb_y - block_w/2,
block_w, block_w,
w, h,
w, ref_stride, obmc_stride,
mb_x - 1, mb_y - 1,
- add, plane_index);
+ add, 0, plane_index);
STOP_TIMER("add_yblock")
}
@@ -2947,7 +2821,7 @@ static always_inline void predict_slice(SnowContext *s, DWTELEM *buf, int plane_
for(mb_x=0; mb_x<=mb_w; mb_x++){
START_TIMER
- add_yblock(s, buf, dst8, obmc,
+ add_yblock(s, 0, NULL, buf, dst8, obmc,
block_w*mb_x - block_w/2,
block_w*mb_y - block_w/2,
block_w, block_w,
@@ -2998,7 +2872,7 @@ static int get_dc(SnowContext *s, int mb_x, int mb_y, int plane_index){
int x= block_w*mb_x2 + block_w/2;
int y= block_w*mb_y2 + block_w/2;
- add_yblock(s, dst + ((i&1)+(i>>1)*obmc_stride)*block_w, NULL, obmc,
+ add_yblock(s, 0, NULL, dst + ((i&1)+(i>>1)*obmc_stride)*block_w, NULL, obmc,
x, y, block_w, block_w, w, h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
for(y2= FFMAX(y, 0); y2<FFMIN(h, y+block_w); y2++){
@@ -3034,8 +2908,8 @@ static inline int get_block_bits(SnowContext *s, int x, int y, int w){
BlockNode *tl = y && x ? &s->block[index-b_stride-1] : left;
BlockNode *tr = y && x+w<b_stride ? &s->block[index-b_stride+w] : tl;
int dmx, dmy;
-// int mx_context= av_log2(2*ABS(left->mx - top->mx));
-// int my_context= av_log2(2*ABS(left->my - top->my));
+// int mx_context= av_log2(2*FFABS(left->mx - top->mx));
+// int my_context= av_log2(2*FFABS(left->my - top->my));
if(x<0 || x>=b_stride || y>=b_height)
return 0;
@@ -3049,15 +2923,15 @@ static inline int get_block_bits(SnowContext *s, int x, int y, int w){
//FIXME try accurate rate
//FIXME intra and inter predictors if surrounding blocks arent the same type
if(b->type & BLOCK_INTRA){
- return 3+2*( av_log2(2*ABS(left->color[0] - b->color[0]))
- + av_log2(2*ABS(left->color[1] - b->color[1]))
- + av_log2(2*ABS(left->color[2] - b->color[2])));
+ return 3+2*( av_log2(2*FFABS(left->color[0] - b->color[0]))
+ + av_log2(2*FFABS(left->color[1] - b->color[1]))
+ + av_log2(2*FFABS(left->color[2] - b->color[2])));
}else{
pred_mv(s, &dmx, &dmy, b->ref, left, top, tr);
dmx-= b->mx;
dmy-= b->my;
- return 2*(1 + av_log2(2*ABS(dmx)) //FIXME kill the 2* can be merged in lambda
- + av_log2(2*ABS(dmy))
+ return 2*(1 + av_log2(2*FFABS(dmx)) //FIXME kill the 2* can be merged in lambda
+ + av_log2(2*FFABS(dmy))
+ av_log2(2*b->ref));
}
}
@@ -3066,7 +2940,6 @@ static int get_block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index, con
Plane *p= &s->plane[plane_index];
const int block_size = MB_SIZE >> s->block_max_depth;
const int block_w = plane_index ? block_size/2 : block_size;
- const uint8_t *obmc = plane_index ? obmc_tab[s->block_max_depth+1] : obmc_tab[s->block_max_depth];
const int obmc_stride= plane_index ? block_size : 2*block_size;
const int ref_stride= s->current_picture.linesize[plane_index];
uint8_t *dst= s->current_picture.data[plane_index];
@@ -3167,9 +3040,8 @@ static int get_4block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index){
const int ref_stride= s->current_picture.linesize[plane_index];
uint8_t *dst= s->current_picture.data[plane_index];
uint8_t *src= s-> input_picture.data[plane_index];
- const static DWTELEM zero_dst[4096]; //FIXME
+ static const DWTELEM zero_dst[4096]; //FIXME
const int b_stride = s->b_width << s->block_max_depth;
- const int b_height = s->b_height<< s->block_max_depth;
const int w= p->width;
const int h= p->height;
int distortion= 0;
@@ -3182,7 +3054,7 @@ static int get_4block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index){
int x= block_w*mb_x2 + block_w/2;
int y= block_w*mb_y2 + block_w/2;
- add_yblock(s, zero_dst, dst, obmc,
+ add_yblock(s, 0, NULL, zero_dst, dst, obmc,
x, y, block_w, block_w, w, h, /*dst_stride*/0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
//FIXME find a cleaner/simpler way to skip the outside stuff
@@ -3961,13 +3833,13 @@ static int qscale2qlog(int qscale){
+ 61*QROOT/8; //<64 >60
}
-static void ratecontrol_1pass(SnowContext *s, AVFrame *pict)
+static int ratecontrol_1pass(SnowContext *s, AVFrame *pict)
{
/* estimate the frame's complexity as a sum of weighted dwt coefs.
* FIXME we know exact mv bits at this point,
* but ratecontrol isn't set up to include them. */
uint32_t coef_sum= 0;
- int level, orientation;
+ int level, orientation, delta_qlog;
for(level=0; level<s->spatial_decomposition_count; level++){
for(orientation=level ? 1 : 0; orientation<4; orientation++){
@@ -4003,8 +3875,12 @@ static void ratecontrol_1pass(SnowContext *s, AVFrame *pict)
}
pict->quality= ff_rate_estimate_qscale(&s->m, 1);
+ if (pict->quality < 0)
+ return INT_MIN;
s->lambda= pict->quality * 3/2;
- s->qlog= qscale2qlog(pict->quality);
+ delta_qlog= qscale2qlog(pict->quality) - s->qlog;
+ s->qlog+= delta_qlog;
+ return delta_qlog;
}
static void calculate_vissual_weight(SnowContext *s, Plane *p){
@@ -4034,7 +3910,6 @@ static void calculate_vissual_weight(SnowContext *s, Plane *p){
}
}
-#ifdef CONFIG_ENCODERS
static int encode_init(AVCodecContext *avctx)
{
SnowContext *s = avctx->priv_data;
@@ -4122,7 +3997,6 @@ static int encode_init(AVCodecContext *avctx)
return 0;
}
-#endif
static int frame_start(SnowContext *s){
AVFrame tmp;
@@ -4161,7 +4035,6 @@ static int frame_start(SnowContext *s){
return 0;
}
-#ifdef CONFIG_ENCODERS
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
SnowContext *s = avctx->priv_data;
RangeCoder * const c= &s->c;
@@ -4169,6 +4042,8 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
const int width= s->avctx->width;
const int height= s->avctx->height;
int level, orientation, plane_index, i, y;
+ uint8_t rc_header_bak[sizeof(s->header_state)];
+ uint8_t rc_block_bak[sizeof(s->block_state)];
ff_init_range_encoder(c, buf, buf_size);
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
@@ -4187,8 +4062,11 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
s->m.pict_type =
pict->pict_type= s->m.rc_context.entry[avctx->frame_number].new_pict_type;
s->keyframe= pict->pict_type==FF_I_TYPE;
- if(!(avctx->flags&CODEC_FLAG_QSCALE))
+ if(!(avctx->flags&CODEC_FLAG_QSCALE)) {
pict->quality= ff_rate_estimate_qscale(&s->m, 0);
+ if (pict->quality < 0)
+ return -1;
+ }
}else{
s->keyframe= avctx->gop_size==0 || avctx->frame_number % avctx->gop_size == 0;
s->m.pict_type=
@@ -4251,6 +4129,11 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
s->dsp= s->m.dsp;
}
+ if(s->pass1_rc){
+ memcpy(rc_header_bak, s->header_state, sizeof(s->header_state));
+ memcpy(rc_block_bak, s->block_state, sizeof(s->block_state));
+ }
+
redo_frame:
s->m.pict_type = pict->pict_type;
@@ -4258,7 +4141,7 @@ redo_frame:
encode_header(s);
s->m.misc_bits = 8*(s->c.bytestream - s->c.bytestream_start);
- encode_blocks(s);
+ encode_blocks(s, 1);
s->m.mv_bits = 8*(s->c.bytestream - s->c.bytestream_start) - s->m.misc_bits;
for(plane_index=0; plane_index<3; plane_index++){
@@ -4301,8 +4184,19 @@ redo_frame:
ff_spatial_dwt(s->spatial_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
- if(s->pass1_rc && plane_index==0)
- ratecontrol_1pass(s, pict);
+ if(s->pass1_rc && plane_index==0){
+ int delta_qlog = ratecontrol_1pass(s, pict);
+ if (delta_qlog <= INT_MIN)
+ return -1;
+ if(delta_qlog){
+ //reordering qlog in the bitstream would eliminate this reset
+ ff_init_range_encoder(c, buf, buf_size);
+ memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
+ memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
+ encode_header(s);
+ encode_blocks(s, 0);
+ }
+ }
for(level=0; level<s->spatial_decomposition_count; level++){
for(orientation=level ? 1 : 0; orientation<4; orientation++){
@@ -4380,16 +4274,20 @@ STOP_TIMER("pred-conv")}
s->m.current_picture.quality = pict->quality;
s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
if(s->pass1_rc)
- ff_rate_estimate_qscale(&s->m, 0);
+ if (ff_rate_estimate_qscale(&s->m, 0) < 0)
+ return -1;
if(avctx->flags&CODEC_FLAG_PASS1)
ff_write_pass1_stats(&s->m);
s->m.last_pict_type = s->m.pict_type;
+ avctx->frame_bits = s->m.frame_bits;
+ avctx->mv_bits = s->m.mv_bits;
+ avctx->misc_bits = s->m.misc_bits;
+ avctx->p_tex_bits = s->m.p_tex_bits;
emms_c();
return ff_rac_terminate(c);
}
-#endif
static void common_end(SnowContext *s){
int plane_index, level, orientation, i;
@@ -4421,7 +4319,6 @@ static void common_end(SnowContext *s){
}
}
-#ifdef CONFIG_ENCODERS
static int encode_end(AVCodecContext *avctx)
{
SnowContext *s = avctx->priv_data;
@@ -4431,7 +4328,6 @@ static int encode_end(AVCodecContext *avctx)
return 0;
}
-#endif
static int decode_init(AVCodecContext *avctx)
{
@@ -4671,7 +4567,7 @@ int main(){
ff_spatial_idwt(buffer[0], width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
for(i=0; i<width*height; i++)
- if(ABS(buffer[0][i] - buffer[1][i])>20) printf("fsck: %d %d %d\n",i, buffer[0][i], buffer[1][i]);
+ if(FFABS(buffer[0][i] - buffer[1][i])>20) printf("fsck: %d %d %d\n",i, buffer[0][i], buffer[1][i]);
#if 0
printf("testing AC coder\n");
@@ -4681,7 +4577,7 @@ int main(){
for(i=-256; i<256; i++){
START_TIMER
- put_symbol(&s.c, s.header_state, i*i*i/3*ABS(i), 1);
+ put_symbol(&s.c, s.header_state, i*i*i/3*FFABS(i), 1);
STOP_TIMER("put_symbol")
}
ff_rac_terminate(&s.c);
@@ -4695,7 +4591,7 @@ STOP_TIMER("put_symbol")
START_TIMER
j= get_symbol(&s.c, s.header_state, 1);
STOP_TIMER("get_symbol")
- if(j!=i*i*i/3*ABS(i)) printf("fsck: %d != %d\n", i, j);
+ if(j!=i*i*i/3*FFABS(i)) printf("fsck: %d != %d\n", i, j);
}
#endif
{
@@ -4724,9 +4620,9 @@ int64_t g=0;
for(x=0; x<width; x++){
int64_t d= buffer[0][x + y*width];
error += d*d;
- if(ABS(width/2-x)<9 && ABS(height/2-y)<9 && level==2) printf("%8lld ", d);
+ if(FFABS(width/2-x)<9 && FFABS(height/2-y)<9 && level==2) printf("%8"PRId64" ", d);
}
- if(ABS(height/2-y)<9 && level==2) printf("\n");
+ if(FFABS(height/2-y)<9 && level==2) printf("\n");
}
error= (int)(sqrt(error)+0.5);
errors[level][orientation]= error;
@@ -4738,7 +4634,7 @@ int64_t g=0;
for(level=0; level<s.spatial_decomposition_count; level++){
printf(" {");
for(orientation=0; orientation<4; orientation++){
- printf("%8lld,", errors[level][orientation]/g);
+ printf("%8"PRId64",", errors[level][orientation]/g);
}
printf("},\n");
}
@@ -4777,9 +4673,9 @@ int64_t g=0;
for(x=0; x<width; x++){
int64_t d= buffer[0][x + y*width];
error += d*d;
- if(ABS(width/2-x)<9 && ABS(height/2-y)<9) printf("%8lld ", d);
+ if(FFABS(width/2-x)<9 && FFABS(height/2-y)<9) printf("%8"PRId64" ", d);
}
- if(ABS(height/2-y)<9) printf("\n");
+ if(FFABS(height/2-y)<9) printf("\n");
}
}
diff --git a/src/libffmpeg/libavcodec/snow.h b/contrib/ffmpeg/libavcodec/snow.h
index 26b30abe5..f7cee131a 100644
--- a/src/libffmpeg/libavcodec/snow.h
+++ b/contrib/ffmpeg/libavcodec/snow.h
@@ -2,18 +2,20 @@
* Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
* Copyright (C) 2006 Robert Edele <yartrebo@earthlink.net>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -125,9 +127,13 @@ extern void ff_snow_vertical_compose97i(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, D
extern void ff_snow_horizontal_compose97i(DWTELEM *b, int width);
extern void ff_snow_inner_add_yblock(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
+#ifdef CONFIG_SNOW_ENCODER
int w53_32_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
int w97_32_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h);
-
+#else
+static int w53_32_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {assert (0);}
+static int w97_32_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {assert (0);}
+#endif
/* C bits used by mmx/sse2/altivec */
diff --git a/contrib/ffmpeg/libavcodec/sonic.c b/contrib/ffmpeg/libavcodec/sonic.c
new file mode 100644
index 000000000..2f798cc03
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/sonic.c
@@ -0,0 +1,981 @@
+/*
+ * Simple free lossless/lossy audio codec
+ * Copyright (c) 2004 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avcodec.h"
+#include "bitstream.h"
+#include "golomb.h"
+
+/**
+ * @file sonic.c
+ * Simple free lossless/lossy audio codec
+ * Based on Paul Francis Harrison's Bonk (http://www.logarithmic.net/pfh/bonk)
+ * Written and designed by Alex Beregszaszi
+ *
+ * TODO:
+ * - CABAC put/get_symbol
+ * - independent quantizer for channels
+ * - >2 channels support
+ * - more decorrelation types
+ * - more tap_quant tests
+ * - selectable intlist writers/readers (bonk-style, golomb, cabac)
+ */
+
+#define MAX_CHANNELS 2
+
+#define MID_SIDE 0
+#define LEFT_SIDE 1
+#define RIGHT_SIDE 2
+
+typedef struct SonicContext {
+ int lossless, decorrelation;
+
+ int num_taps, downsampling;
+ double quantization;
+
+ int channels, samplerate, block_align, frame_size;
+
+ int *tap_quant;
+ int *int_samples;
+ int *coded_samples[MAX_CHANNELS];
+
+ // for encoding
+ int *tail;
+ int tail_size;
+ int *window;
+ int window_size;
+
+ // for decoding
+ int *predictor_k;
+ int *predictor_state[MAX_CHANNELS];
+} SonicContext;
+
+#define LATTICE_SHIFT 10
+#define SAMPLE_SHIFT 4
+#define LATTICE_FACTOR (1 << LATTICE_SHIFT)
+#define SAMPLE_FACTOR (1 << SAMPLE_SHIFT)
+
+#define BASE_QUANT 0.6
+#define RATE_VARIATION 3.0
+
+static inline int divide(int a, int b)
+{
+ if (a < 0)
+ return -( (-a + b/2)/b );
+ else
+ return (a + b/2)/b;
+}
+
+static inline int shift(int a,int b)
+{
+ return (a+(1<<(b-1))) >> b;
+}
+
+static inline int shift_down(int a,int b)
+{
+ return (a>>b)+((a<0)?1:0);
+}
+
+#if 1
+static inline int intlist_write(PutBitContext *pb, int *buf, int entries, int base_2_part)
+{
+ int i;
+
+ for (i = 0; i < entries; i++)
+ set_se_golomb(pb, buf[i]);
+
+ return 1;
+}
+
+static inline int intlist_read(GetBitContext *gb, int *buf, int entries, int base_2_part)
+{
+ int i;
+
+ for (i = 0; i < entries; i++)
+ buf[i] = get_se_golomb(gb);
+
+ return 1;
+}
+
+#else
+
+#define ADAPT_LEVEL 8
+
+static int bits_to_store(uint64_t x)
+{
+ int res = 0;
+
+ while(x)
+ {
+ res++;
+ x >>= 1;
+ }
+ return res;
+}
+
+static void write_uint_max(PutBitContext *pb, unsigned int value, unsigned int max)
+{
+ int i, bits;
+
+ if (!max)
+ return;
+
+ bits = bits_to_store(max);
+
+ for (i = 0; i < bits-1; i++)
+ put_bits(pb, 1, value & (1 << i));
+
+ if ( (value | (1 << (bits-1))) <= max)
+ put_bits(pb, 1, value & (1 << (bits-1)));
+}
+
+static unsigned int read_uint_max(GetBitContext *gb, int max)
+{
+ int i, bits, value = 0;
+
+ if (!max)
+ return 0;
+
+ bits = bits_to_store(max);
+
+ for (i = 0; i < bits-1; i++)
+ if (get_bits1(gb))
+ value += 1 << i;
+
+ if ( (value | (1<<(bits-1))) <= max)
+ if (get_bits1(gb))
+ value += 1 << (bits-1);
+
+ return value;
+}
+
+static int intlist_write(PutBitContext *pb, int *buf, int entries, int base_2_part)
+{
+ int i, j, x = 0, low_bits = 0, max = 0;
+ int step = 256, pos = 0, dominant = 0, any = 0;
+ int *copy, *bits;
+
+ copy = av_mallocz(4* entries);
+ if (!copy)
+ return -1;
+
+ if (base_2_part)
+ {
+ int energy = 0;
+
+ for (i = 0; i < entries; i++)
+ energy += abs(buf[i]);
+
+ low_bits = bits_to_store(energy / (entries * 2));
+ if (low_bits > 15)
+ low_bits = 15;
+
+ put_bits(pb, 4, low_bits);
+ }
+
+ for (i = 0; i < entries; i++)
+ {
+ put_bits(pb, low_bits, abs(buf[i]));
+ copy[i] = abs(buf[i]) >> low_bits;
+ if (copy[i] > max)
+ max = abs(copy[i]);
+ }
+
+ bits = av_mallocz(4* entries*max);
+ if (!bits)
+ {
+// av_free(copy);
+ return -1;
+ }
+
+ for (i = 0; i <= max; i++)
+ {
+ for (j = 0; j < entries; j++)
+ if (copy[j] >= i)
+ bits[x++] = copy[j] > i;
+ }
+
+ // store bitstream
+ while (pos < x)
+ {
+ int steplet = step >> 8;
+
+ if (pos + steplet > x)
+ steplet = x - pos;
+
+ for (i = 0; i < steplet; i++)
+ if (bits[i+pos] != dominant)
+ any = 1;
+
+ put_bits(pb, 1, any);
+
+ if (!any)
+ {
+ pos += steplet;
+ step += step / ADAPT_LEVEL;
+ }
+ else
+ {
+ int interloper = 0;
+
+ while (((pos + interloper) < x) && (bits[pos + interloper] == dominant))
+ interloper++;
+
+ // note change
+ write_uint_max(pb, interloper, (step >> 8) - 1);
+
+ pos += interloper + 1;
+ step -= step / ADAPT_LEVEL;
+ }
+
+ if (step < 256)
+ {
+ step = 65536 / step;
+ dominant = !dominant;
+ }
+ }
+
+ // store signs
+ for (i = 0; i < entries; i++)
+ if (buf[i])
+ put_bits(pb, 1, buf[i] < 0);
+
+// av_free(bits);
+// av_free(copy);
+
+ return 0;
+}
+
+static int intlist_read(GetBitContext *gb, int *buf, int entries, int base_2_part)
+{
+ int i, low_bits = 0, x = 0;
+ int n_zeros = 0, step = 256, dominant = 0;
+ int pos = 0, level = 0;
+ int *bits = av_mallocz(4* entries);
+
+ if (!bits)
+ return -1;
+
+ if (base_2_part)
+ {
+ low_bits = get_bits(gb, 4);
+
+ if (low_bits)
+ for (i = 0; i < entries; i++)
+ buf[i] = get_bits(gb, low_bits);
+ }
+
+// av_log(NULL, AV_LOG_INFO, "entries: %d, low bits: %d\n", entries, low_bits);
+
+ while (n_zeros < entries)
+ {
+ int steplet = step >> 8;
+
+ if (!get_bits1(gb))
+ {
+ for (i = 0; i < steplet; i++)
+ bits[x++] = dominant;
+
+ if (!dominant)
+ n_zeros += steplet;
+
+ step += step / ADAPT_LEVEL;
+ }
+ else
+ {
+ int actual_run = read_uint_max(gb, steplet-1);
+
+// av_log(NULL, AV_LOG_INFO, "actual run: %d\n", actual_run);
+
+ for (i = 0; i < actual_run; i++)
+ bits[x++] = dominant;
+
+ bits[x++] = !dominant;
+
+ if (!dominant)
+ n_zeros += actual_run;
+ else
+ n_zeros++;
+
+ step -= step / ADAPT_LEVEL;
+ }
+
+ if (step < 256)
+ {
+ step = 65536 / step;
+ dominant = !dominant;
+ }
+ }
+
+ // reconstruct unsigned values
+ n_zeros = 0;
+ for (i = 0; n_zeros < entries; i++)
+ {
+ while(1)
+ {
+ if (pos >= entries)
+ {
+ pos = 0;
+ level += 1 << low_bits;
+ }
+
+ if (buf[pos] >= level)
+ break;
+
+ pos++;
+ }
+
+ if (bits[i])
+ buf[pos] += 1 << low_bits;
+ else
+ n_zeros++;
+
+ pos++;
+ }
+// av_free(bits);
+
+ // read signs
+ for (i = 0; i < entries; i++)
+ if (buf[i] && get_bits1(gb))
+ buf[i] = -buf[i];
+
+// av_log(NULL, AV_LOG_INFO, "zeros: %d pos: %d\n", n_zeros, pos);
+
+ return 0;
+}
+#endif
+
+static void predictor_init_state(int *k, int *state, int order)
+{
+ int i;
+
+ for (i = order-2; i >= 0; i--)
+ {
+ int j, p, x = state[i];
+
+ for (j = 0, p = i+1; p < order; j++,p++)
+ {
+ int tmp = x + shift_down(k[j] * state[p], LATTICE_SHIFT);
+ state[p] += shift_down(k[j]*x, LATTICE_SHIFT);
+ x = tmp;
+ }
+ }
+}
+
+static int predictor_calc_error(int *k, int *state, int order, int error)
+{
+ int i, x = error - shift_down(k[order-1] * state[order-1], LATTICE_SHIFT);
+
+#if 1
+ int *k_ptr = &(k[order-2]),
+ *state_ptr = &(state[order-2]);
+ for (i = order-2; i >= 0; i--, k_ptr--, state_ptr--)
+ {
+ int k_value = *k_ptr, state_value = *state_ptr;
+ x -= shift_down(k_value * state_value, LATTICE_SHIFT);
+ state_ptr[1] = state_value + shift_down(k_value * x, LATTICE_SHIFT);
+ }
+#else
+ for (i = order-2; i >= 0; i--)
+ {
+ x -= shift_down(k[i] * state[i], LATTICE_SHIFT);
+ state[i+1] = state[i] + shift_down(k[i] * x, LATTICE_SHIFT);
+ }
+#endif
+
+ // don't drift too far, to avoid overflows
+ if (x > (SAMPLE_FACTOR<<16)) x = (SAMPLE_FACTOR<<16);
+ if (x < -(SAMPLE_FACTOR<<16)) x = -(SAMPLE_FACTOR<<16);
+
+ state[0] = x;
+
+ return x;
+}
+
+#ifdef CONFIG_ENCODERS
+// Heavily modified Levinson-Durbin algorithm which
+// copes better with quantization, and calculates the
+// actual whitened result as it goes.
+
+static void modified_levinson_durbin(int *window, int window_entries,
+ int *out, int out_entries, int channels, int *tap_quant)
+{
+ int i;
+ int *state = av_mallocz(4* window_entries);
+
+ memcpy(state, window, 4* window_entries);
+
+ for (i = 0; i < out_entries; i++)
+ {
+ int step = (i+1)*channels, k, j;
+ double xx = 0.0, xy = 0.0;
+#if 1
+ int *x_ptr = &(window[step]), *state_ptr = &(state[0]);
+ j = window_entries - step;
+ for (;j>=0;j--,x_ptr++,state_ptr++)
+ {
+ double x_value = *x_ptr, state_value = *state_ptr;
+ xx += state_value*state_value;
+ xy += x_value*state_value;
+ }
+#else
+ for (j = 0; j <= (window_entries - step); j++);
+ {
+ double stepval = window[step+j], stateval = window[j];
+// xx += (double)window[j]*(double)window[j];
+// xy += (double)window[step+j]*(double)window[j];
+ xx += stateval*stateval;
+ xy += stepval*stateval;
+ }
+#endif
+ if (xx == 0.0)
+ k = 0;
+ else
+ k = (int)(floor(-xy/xx * (double)LATTICE_FACTOR / (double)(tap_quant[i]) + 0.5));
+
+ if (k > (LATTICE_FACTOR/tap_quant[i]))
+ k = LATTICE_FACTOR/tap_quant[i];
+ if (-k > (LATTICE_FACTOR/tap_quant[i]))
+ k = -(LATTICE_FACTOR/tap_quant[i]);
+
+ out[i] = k;
+ k *= tap_quant[i];
+
+#if 1
+ x_ptr = &(window[step]);
+ state_ptr = &(state[0]);
+ j = window_entries - step;
+ for (;j>=0;j--,x_ptr++,state_ptr++)
+ {
+ int x_value = *x_ptr, state_value = *state_ptr;
+ *x_ptr = x_value + shift_down(k*state_value,LATTICE_SHIFT);
+ *state_ptr = state_value + shift_down(k*x_value, LATTICE_SHIFT);
+ }
+#else
+ for (j=0; j <= (window_entries - step); j++)
+ {
+ int stepval = window[step+j], stateval=state[j];
+ window[step+j] += shift_down(k * stateval, LATTICE_SHIFT);
+ state[j] += shift_down(k * stepval, LATTICE_SHIFT);
+ }
+#endif
+ }
+
+ av_free(state);
+}
+#endif /* CONFIG_ENCODERS */
+
+static int samplerate_table[] =
+ { 44100, 22050, 11025, 96000, 48000, 32000, 24000, 16000, 8000 };
+
+#ifdef CONFIG_ENCODERS
+
+static inline int code_samplerate(int samplerate)
+{
+ switch (samplerate)
+ {
+ case 44100: return 0;
+ case 22050: return 1;
+ case 11025: return 2;
+ case 96000: return 3;
+ case 48000: return 4;
+ case 32000: return 5;
+ case 24000: return 6;
+ case 16000: return 7;
+ case 8000: return 8;
+ }
+ return -1;
+}
+
+static int sonic_encode_init(AVCodecContext *avctx)
+{
+ SonicContext *s = avctx->priv_data;
+ PutBitContext pb;
+ int i, version = 0;
+
+ if (avctx->channels > MAX_CHANNELS)
+ {
+ av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n");
+ return -1; /* only stereo or mono for now */
+ }
+
+ if (avctx->channels == 2)
+ s->decorrelation = MID_SIDE;
+
+ if (avctx->codec->id == CODEC_ID_SONIC_LS)
+ {
+ s->lossless = 1;
+ s->num_taps = 32;
+ s->downsampling = 1;
+ s->quantization = 0.0;
+ }
+ else
+ {
+ s->num_taps = 128;
+ s->downsampling = 2;
+ s->quantization = 1.0;
+ }
+
+ // max tap 2048
+ if ((s->num_taps < 32) || (s->num_taps > 1024) ||
+ ((s->num_taps>>5)<<5 != s->num_taps))
+ {
+ av_log(avctx, AV_LOG_ERROR, "Invalid number of taps\n");
+ return -1;
+ }
+
+ // generate taps
+ s->tap_quant = av_mallocz(4* s->num_taps);
+ for (i = 0; i < s->num_taps; i++)
+ s->tap_quant[i] = (int)(sqrt(i+1));
+
+ s->channels = avctx->channels;
+ s->samplerate = avctx->sample_rate;
+
+ s->block_align = (int)(2048.0*s->samplerate/44100)/s->downsampling;
+ s->frame_size = s->channels*s->block_align*s->downsampling;
+
+ s->tail = av_mallocz(4* s->num_taps*s->channels);
+ if (!s->tail)
+ return -1;
+ s->tail_size = s->num_taps*s->channels;
+
+ s->predictor_k = av_mallocz(4 * s->num_taps);
+ if (!s->predictor_k)
+ return -1;
+
+ for (i = 0; i < s->channels; i++)
+ {
+ s->coded_samples[i] = av_mallocz(4* s->block_align);
+ if (!s->coded_samples[i])
+ return -1;
+ }
+
+ s->int_samples = av_mallocz(4* s->frame_size);
+
+ s->window_size = ((2*s->tail_size)+s->frame_size);
+ s->window = av_mallocz(4* s->window_size);
+ if (!s->window)
+ return -1;
+
+ avctx->extradata = av_mallocz(16);
+ if (!avctx->extradata)
+ return -1;
+ init_put_bits(&pb, avctx->extradata, 16*8);
+
+ put_bits(&pb, 2, version); // version
+ if (version == 1)
+ {
+ put_bits(&pb, 2, s->channels);
+ put_bits(&pb, 4, code_samplerate(s->samplerate));
+ }
+ put_bits(&pb, 1, s->lossless);
+ if (!s->lossless)
+ put_bits(&pb, 3, SAMPLE_SHIFT); // XXX FIXME: sample precision
+ put_bits(&pb, 2, s->decorrelation);
+ put_bits(&pb, 2, s->downsampling);
+ put_bits(&pb, 5, (s->num_taps >> 5)-1); // 32..1024
+ put_bits(&pb, 1, 0); // XXX FIXME: no custom tap quant table
+
+ flush_put_bits(&pb);
+ avctx->extradata_size = put_bits_count(&pb)/8;
+
+ av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
+ version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling);
+
+ avctx->coded_frame = avcodec_alloc_frame();
+ if (!avctx->coded_frame)
+ return -ENOMEM;
+ avctx->coded_frame->key_frame = 1;
+ avctx->frame_size = s->block_align*s->downsampling;
+
+ return 0;
+}
+
+static int sonic_encode_close(AVCodecContext *avctx)
+{
+ SonicContext *s = avctx->priv_data;
+ int i;
+
+ av_freep(&avctx->coded_frame);
+
+ for (i = 0; i < s->channels; i++)
+ av_free(s->coded_samples[i]);
+
+ av_free(s->predictor_k);
+ av_free(s->tail);
+ av_free(s->tap_quant);
+ av_free(s->window);
+ av_free(s->int_samples);
+
+ return 0;
+}
+
+static int sonic_encode_frame(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size, void *data)
+{
+ SonicContext *s = avctx->priv_data;
+ PutBitContext pb;
+ int i, j, ch, quant = 0, x = 0;
+ short *samples = data;
+
+ init_put_bits(&pb, buf, buf_size*8);
+
+ // short -> internal
+ for (i = 0; i < s->frame_size; i++)
+ s->int_samples[i] = samples[i];
+
+ if (!s->lossless)
+ for (i = 0; i < s->frame_size; i++)
+ s->int_samples[i] = s->int_samples[i] << SAMPLE_SHIFT;
+
+ switch(s->decorrelation)
+ {
+ case MID_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ {
+ s->int_samples[i] += s->int_samples[i+1];
+ s->int_samples[i+1] -= shift(s->int_samples[i], 1);
+ }
+ break;
+ case LEFT_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ s->int_samples[i+1] -= s->int_samples[i];
+ break;
+ case RIGHT_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ s->int_samples[i] -= s->int_samples[i+1];
+ break;
+ }
+
+ memset(s->window, 0, 4* s->window_size);
+
+ for (i = 0; i < s->tail_size; i++)
+ s->window[x++] = s->tail[i];
+
+ for (i = 0; i < s->frame_size; i++)
+ s->window[x++] = s->int_samples[i];
+
+ for (i = 0; i < s->tail_size; i++)
+ s->window[x++] = 0;
+
+ for (i = 0; i < s->tail_size; i++)
+ s->tail[i] = s->int_samples[s->frame_size - s->tail_size + i];
+
+ // generate taps
+ modified_levinson_durbin(s->window, s->window_size,
+ s->predictor_k, s->num_taps, s->channels, s->tap_quant);
+ if (intlist_write(&pb, s->predictor_k, s->num_taps, 0) < 0)
+ return -1;
+
+ for (ch = 0; ch < s->channels; ch++)
+ {
+ x = s->tail_size+ch;
+ for (i = 0; i < s->block_align; i++)
+ {
+ int sum = 0;
+ for (j = 0; j < s->downsampling; j++, x += s->channels)
+ sum += s->window[x];
+ s->coded_samples[ch][i] = sum;
+ }
+ }
+
+ // simple rate control code
+ if (!s->lossless)
+ {
+ double energy1 = 0.0, energy2 = 0.0;
+ for (ch = 0; ch < s->channels; ch++)
+ {
+ for (i = 0; i < s->block_align; i++)
+ {
+ double sample = s->coded_samples[ch][i];
+ energy2 += sample*sample;
+ energy1 += fabs(sample);
+ }
+ }
+
+ energy2 = sqrt(energy2/(s->channels*s->block_align));
+ energy1 = sqrt(2.0)*energy1/(s->channels*s->block_align);
+
+ // increase bitrate when samples are like a gaussian distribution
+ // reduce bitrate when samples are like a two-tailed exponential distribution
+
+ if (energy2 > energy1)
+ energy2 += (energy2-energy1)*RATE_VARIATION;
+
+ quant = (int)(BASE_QUANT*s->quantization*energy2/SAMPLE_FACTOR);
+// av_log(avctx, AV_LOG_DEBUG, "quant: %d energy: %f / %f\n", quant, energy1, energy2);
+
+ if (quant < 1)
+ quant = 1;
+ if (quant > 65535)
+ quant = 65535;
+
+ set_ue_golomb(&pb, quant);
+
+ quant *= SAMPLE_FACTOR;
+ }
+
+ // write out coded samples
+ for (ch = 0; ch < s->channels; ch++)
+ {
+ if (!s->lossless)
+ for (i = 0; i < s->block_align; i++)
+ s->coded_samples[ch][i] = divide(s->coded_samples[ch][i], quant);
+
+ if (intlist_write(&pb, s->coded_samples[ch], s->block_align, 1) < 0)
+ return -1;
+ }
+
+// av_log(avctx, AV_LOG_DEBUG, "used bytes: %d\n", (put_bits_count(&pb)+7)/8);
+
+ flush_put_bits(&pb);
+ return (put_bits_count(&pb)+7)/8;
+}
+#endif //CONFIG_ENCODERS
+
+#ifdef CONFIG_DECODERS
+static int sonic_decode_init(AVCodecContext *avctx)
+{
+ SonicContext *s = avctx->priv_data;
+ GetBitContext gb;
+ int i, version;
+
+ s->channels = avctx->channels;
+ s->samplerate = avctx->sample_rate;
+
+ if (!avctx->extradata)
+ {
+ av_log(avctx, AV_LOG_ERROR, "No mandatory headers present\n");
+ return -1;
+ }
+
+ init_get_bits(&gb, avctx->extradata, avctx->extradata_size);
+
+ version = get_bits(&gb, 2);
+ if (version > 1)
+ {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported Sonic version, please report\n");
+ return -1;
+ }
+
+ if (version == 1)
+ {
+ s->channels = get_bits(&gb, 2);
+ s->samplerate = samplerate_table[get_bits(&gb, 4)];
+ av_log(avctx, AV_LOG_INFO, "Sonicv2 chans: %d samprate: %d\n",
+ s->channels, s->samplerate);
+ }
+
+ if (s->channels > MAX_CHANNELS)
+ {
+ av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n");
+ return -1;
+ }
+
+ s->lossless = get_bits1(&gb);
+ if (!s->lossless)
+ skip_bits(&gb, 3); // XXX FIXME
+ s->decorrelation = get_bits(&gb, 2);
+
+ s->downsampling = get_bits(&gb, 2);
+ s->num_taps = (get_bits(&gb, 5)+1)<<5;
+ if (get_bits1(&gb)) // XXX FIXME
+ av_log(avctx, AV_LOG_INFO, "Custom quant table\n");
+
+ s->block_align = (int)(2048.0*(s->samplerate/44100))/s->downsampling;
+ s->frame_size = s->channels*s->block_align*s->downsampling;
+// avctx->frame_size = s->block_align;
+
+ av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
+ version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling);
+
+ // generate taps
+ s->tap_quant = av_mallocz(4* s->num_taps);
+ for (i = 0; i < s->num_taps; i++)
+ s->tap_quant[i] = (int)(sqrt(i+1));
+
+ s->predictor_k = av_mallocz(4* s->num_taps);
+
+ for (i = 0; i < s->channels; i++)
+ {
+ s->predictor_state[i] = av_mallocz(4* s->num_taps);
+ if (!s->predictor_state[i])
+ return -1;
+ }
+
+ for (i = 0; i < s->channels; i++)
+ {
+ s->coded_samples[i] = av_mallocz(4* s->block_align);
+ if (!s->coded_samples[i])
+ return -1;
+ }
+ s->int_samples = av_mallocz(4* s->frame_size);
+
+ return 0;
+}
+
+static int sonic_decode_close(AVCodecContext *avctx)
+{
+ SonicContext *s = avctx->priv_data;
+ int i;
+
+ av_free(s->int_samples);
+ av_free(s->tap_quant);
+ av_free(s->predictor_k);
+
+ for (i = 0; i < s->channels; i++)
+ {
+ av_free(s->predictor_state[i]);
+ av_free(s->coded_samples[i]);
+ }
+
+ return 0;
+}
+
+static int sonic_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ SonicContext *s = avctx->priv_data;
+ GetBitContext gb;
+ int i, quant, ch, j;
+ short *samples = data;
+
+ if (buf_size == 0) return 0;
+
+// av_log(NULL, AV_LOG_INFO, "buf_size: %d\n", buf_size);
+
+ init_get_bits(&gb, buf, buf_size*8);
+
+ intlist_read(&gb, s->predictor_k, s->num_taps, 0);
+
+ // dequantize
+ for (i = 0; i < s->num_taps; i++)
+ s->predictor_k[i] *= s->tap_quant[i];
+
+ if (s->lossless)
+ quant = 1;
+ else
+ quant = get_ue_golomb(&gb) * SAMPLE_FACTOR;
+
+// av_log(NULL, AV_LOG_INFO, "quant: %d\n", quant);
+
+ for (ch = 0; ch < s->channels; ch++)
+ {
+ int x = ch;
+
+ predictor_init_state(s->predictor_k, s->predictor_state[ch], s->num_taps);
+
+ intlist_read(&gb, s->coded_samples[ch], s->block_align, 1);
+
+ for (i = 0; i < s->block_align; i++)
+ {
+ for (j = 0; j < s->downsampling - 1; j++)
+ {
+ s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, 0);
+ x += s->channels;
+ }
+
+ s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, s->coded_samples[ch][i] * quant);
+ x += s->channels;
+ }
+
+ for (i = 0; i < s->num_taps; i++)
+ s->predictor_state[ch][i] = s->int_samples[s->frame_size - s->channels + ch - i*s->channels];
+ }
+
+ switch(s->decorrelation)
+ {
+ case MID_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ {
+ s->int_samples[i+1] += shift(s->int_samples[i], 1);
+ s->int_samples[i] -= s->int_samples[i+1];
+ }
+ break;
+ case LEFT_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ s->int_samples[i+1] += s->int_samples[i];
+ break;
+ case RIGHT_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ s->int_samples[i] += s->int_samples[i+1];
+ break;
+ }
+
+ if (!s->lossless)
+ for (i = 0; i < s->frame_size; i++)
+ s->int_samples[i] = shift(s->int_samples[i], SAMPLE_SHIFT);
+
+ // internal -> short
+ for (i = 0; i < s->frame_size; i++)
+ {
+ if (s->int_samples[i] > 32767)
+ samples[i] = 32767;
+ else if (s->int_samples[i] < -32768)
+ samples[i] = -32768;
+ else
+ samples[i] = s->int_samples[i];
+ }
+
+ align_get_bits(&gb);
+
+ *data_size = s->frame_size * 2;
+
+ return (get_bits_count(&gb)+7)/8;
+}
+#endif
+
+#ifdef CONFIG_ENCODERS
+AVCodec sonic_encoder = {
+ "sonic",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_SONIC,
+ sizeof(SonicContext),
+ sonic_encode_init,
+ sonic_encode_frame,
+ sonic_encode_close,
+ NULL,
+};
+
+AVCodec sonic_ls_encoder = {
+ "sonicls",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_SONIC_LS,
+ sizeof(SonicContext),
+ sonic_encode_init,
+ sonic_encode_frame,
+ sonic_encode_close,
+ NULL,
+};
+#endif
+
+#ifdef CONFIG_DECODERS
+AVCodec sonic_decoder = {
+ "sonic",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_SONIC,
+ sizeof(SonicContext),
+ sonic_decode_init,
+ NULL,
+ sonic_decode_close,
+ sonic_decode_frame,
+};
+#endif
diff --git a/src/libffmpeg/libavcodec/sp5x.h b/contrib/ffmpeg/libavcodec/sp5x.h
index 72ae1cab1..0d0d3551f 100644
--- a/src/libffmpeg/libavcodec/sp5x.h
+++ b/contrib/ffmpeg/libavcodec/sp5x.h
@@ -2,18 +2,20 @@
* Sunplus JPEG tables
* Copyright (c) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/sparc/dsputil_vis.c b/contrib/ffmpeg/libavcodec/sparc/dsputil_vis.c
index f4ac3883d..5e59ce776 100644
--- a/src/libffmpeg/libavcodec/sparc/dsputil_vis.c
+++ b/contrib/ffmpeg/libavcodec/sparc/dsputil_vis.c
@@ -2,21 +2,20 @@
* dsputil_vis.c
* Copyright (C) 2003 David S. Miller <davem@redhat.com>
*
- * This file is part of ffmpeg, a free MPEG-4 video stream decoder.
- * See http://ffmpeg.sourceforge.net/ for updates.
+ * This file is part of FFmpeg.
*
- * ffmpeg is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published by
- * the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
*
- * ffmpeg is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the Lesser GNU General Public License
- * along with this program; if not, write to the Free Software
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/sparc/vis.h b/contrib/ffmpeg/libavcodec/sparc/vis.h
index dfdf2f619..d4a8ce092 100644
--- a/src/libffmpeg/libavcodec/sparc/vis.h
+++ b/contrib/ffmpeg/libavcodec/sparc/vis.h
@@ -2,21 +2,20 @@
* vis.h
* Copyright (C) 2003 David S. Miller <davem@redhat.com>
*
- * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
- * See http://libmpeg2.sourceforge.net/ for updates.
+ * This file is part of FFmpeg.
*
- * mpeg2dec is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
*
- * mpeg2dec is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/svq1.c b/contrib/ffmpeg/libavcodec/svq1.c
index 98a7a3cd8..5e8616269 100644
--- a/src/libffmpeg/libavcodec/svq1.c
+++ b/contrib/ffmpeg/libavcodec/svq1.c
@@ -3,18 +3,20 @@
* Copyright (C) 2002 the xine project
* Copyright (C) 2002 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* (SVQ1 Decoder)
@@ -617,6 +619,7 @@ static uint16_t svq1_component_checksum (uint16_t *pixels, int pitch,
}
#endif
+#ifdef CONFIG_DECODERS
static void svq1_parse_string (GetBitContext *bitbuf, uint8_t *out) {
uint8_t seed;
int i;
@@ -655,9 +658,9 @@ static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
}
if ((s->f_code ^ 0x10) >= 0x50) {
- char msg[256];
+ uint8_t msg[256];
- svq1_parse_string (bitbuf, (char *) msg);
+ svq1_parse_string (bitbuf, msg);
av_log(s->avctx, AV_LOG_INFO, "embedded message: \"%s\"\n", (char *) msg);
}
@@ -879,7 +882,9 @@ static int svq1_decode_end(AVCodecContext *avctx)
MPV_common_end(s);
return 0;
}
+#endif /* CONFIG_DECODERS */
+#ifdef CONFIG_ENCODERS
static void svq1_write_header(SVQ1Context *s, int frame_type)
{
int i;
@@ -900,7 +905,7 @@ static void svq1_write_header(SVQ1Context *s, int frame_type)
/* no embedded string either */
/* output 5 unknown bits (2 + 2 + 1) */
- put_bits(&s->pb, 5, 0);
+ put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
for (i = 0; i < 7; i++)
{
@@ -1081,7 +1086,6 @@ static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *dec
return best_score;
}
-#ifdef CONFIG_ENCODERS
static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane,
int width, int height, int src_stride, int stride)
@@ -1351,7 +1355,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
init_put_bits(&s->pb, buf, buf_size);
*p = *pict;
- p->pict_type = avctx->frame_number % avctx->gop_size ? P_TYPE : I_TYPE;
+ p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? P_TYPE : I_TYPE;
p->key_frame = p->pict_type == I_TYPE;
svq1_write_header(s, p->pict_type);
@@ -1395,6 +1399,7 @@ static int svq1_encode_end(AVCodecContext *avctx)
#endif //CONFIG_ENCODERS
+#ifdef CONFIG_DECODERS
AVCodec svq1_decoder = {
"svq1",
CODEC_TYPE_VIDEO,
@@ -1408,6 +1413,7 @@ AVCodec svq1_decoder = {
.flush= ff_mpeg_flush,
.pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV410P, -1},
};
+#endif
#ifdef CONFIG_ENCODERS
diff --git a/src/libffmpeg/libavcodec/svq1_cb.h b/contrib/ffmpeg/libavcodec/svq1_cb.h
index ef097457e..a0748bd44 100644
--- a/src/libffmpeg/libavcodec/svq1_cb.h
+++ b/contrib/ffmpeg/libavcodec/svq1_cb.h
@@ -3,18 +3,20 @@
* Copyright (C) 2002 the xine project
* Copyright (C) 2002 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Ported to mplayer by Arpi <arpi@thot.banki.hu>
diff --git a/src/libffmpeg/libavcodec/svq1_vlc.h b/contrib/ffmpeg/libavcodec/svq1_vlc.h
index 4d405334d..56463700f 100644
--- a/src/libffmpeg/libavcodec/svq1_vlc.h
+++ b/contrib/ffmpeg/libavcodec/svq1_vlc.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (C) 2003 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#ifndef SVQ1_VLC_H
#define SVQ1_VLC_H
diff --git a/src/libffmpeg/libavcodec/svq3.c b/contrib/ffmpeg/libavcodec/svq3.c
index cfe7f7d22..edf3b6714 100644
--- a/src/libffmpeg/libavcodec/svq3.c
+++ b/contrib/ffmpeg/libavcodec/svq3.c
@@ -1,18 +1,20 @@
/*
* Copyright (c) 2003 The FFmpeg Project.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*
@@ -145,7 +147,7 @@ static void svq3_luma_dc_dequant_idct_c(DCTELEM *block, int qp){
static void svq3_add_idct_c (uint8_t *dst, DCTELEM *block, int stride, int qp, int dc){
const int qmul= svq3_dequant_coeff[qp];
int i;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
if (dc) {
dc = 13*13*((dc == 1) ? 1538*block[0] : ((qmul*(block[0] >> 3)) / 2));
diff --git a/contrib/ffmpeg/libavcodec/targa.c b/contrib/ffmpeg/libavcodec/targa.c
new file mode 100644
index 000000000..4eb18f87e
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/targa.c
@@ -0,0 +1,254 @@
+/*
+ * Targa (.tga) image decoder
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#include "avcodec.h"
+
+enum TargaCompr{
+ TGA_NODATA = 0, // no image data
+ TGA_PAL = 1, // palettized
+ TGA_RGB = 2, // true-color
+ TGA_BW = 3, // black & white or grayscale
+ TGA_RLE = 8, // flag pointing that data is RLE-coded
+};
+
+typedef struct TargaContext {
+ AVFrame picture;
+
+ int width, height;
+ int bpp;
+ int color_type;
+ int compression_type;
+} TargaContext;
+
+static void targa_decode_rle(AVCodecContext *avctx, TargaContext *s, uint8_t *src, uint8_t *dst, int w, int h, int stride, int bpp)
+{
+ int i, x, y;
+ int depth = (bpp + 1) >> 3;
+ int type, count;
+ int diff;
+
+ diff = stride - w * depth;
+ x = y = 0;
+ while(y < h){
+ type = *src++;
+ count = (type & 0x7F) + 1;
+ type &= 0x80;
+ if((x + count > w) && (x + count + 1 > (h - y) * w)){
+ av_log(avctx, AV_LOG_ERROR, "Packet went out of bounds: position (%i,%i) size %i\n", x, y, count);
+ return;
+ }
+ for(i = 0; i < count; i++){
+ switch(depth){
+ case 1:
+ *dst = *src;
+ break;
+ case 2:
+ *((uint16_t*)dst) = LE_16(src);
+ break;
+ case 3:
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ break;
+ case 4:
+ *((uint32_t*)dst) = LE_32(src);
+ break;
+ }
+ dst += depth;
+ if(!type)
+ src += depth;
+
+ x++;
+ if(x == w){
+ x = 0;
+ y++;
+ dst += diff;
+ }
+ }
+ if(type)
+ src += depth;
+ }
+}
+
+static int decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ TargaContext * const s = avctx->priv_data;
+ AVFrame *picture = data;
+ AVFrame * const p= (AVFrame*)&s->picture;
+ uint8_t *dst;
+ int stride;
+ int idlen, pal, compr, x, y, w, h, bpp, flags;
+ int first_clr, colors, csize;
+
+ /* parse image header */
+ idlen = *buf++;
+ pal = *buf++;
+ compr = *buf++;
+ first_clr = LE_16(buf); buf += 2;
+ colors = LE_16(buf); buf += 2;
+ csize = *buf++;
+ x = LE_16(buf); buf += 2;
+ y = LE_16(buf); buf += 2;
+ w = LE_16(buf); buf += 2;
+ h = LE_16(buf); buf += 2;
+ bpp = *buf++;
+ flags = *buf++;
+ //skip identifier if any
+ buf += idlen;
+ s->bpp = bpp;
+ s->width = w;
+ s->height = h;
+ switch(s->bpp){
+ case 8:
+ avctx->pix_fmt = ((compr & (~TGA_RLE)) == TGA_BW) ? PIX_FMT_GRAY8 : PIX_FMT_PAL8;
+ break;
+ case 15:
+ avctx->pix_fmt = PIX_FMT_RGB555;
+ break;
+ case 16:
+ avctx->pix_fmt = PIX_FMT_RGB555;
+ break;
+ case 24:
+ avctx->pix_fmt = PIX_FMT_BGR24;
+ break;
+ case 32:
+ avctx->pix_fmt = PIX_FMT_RGBA32;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Bit depth %i is not supported\n", s->bpp);
+ return -1;
+ }
+
+ if(s->picture.data[0])
+ avctx->release_buffer(avctx, &s->picture);
+
+ if(avcodec_check_dimensions(avctx, w, h))
+ return -1;
+ if(w != avctx->width || h != avctx->height)
+ avcodec_set_dimensions(avctx, w, h);
+ if(avctx->get_buffer(avctx, p) < 0){
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+ if(flags & 0x20){
+ dst = p->data[0];
+ stride = p->linesize[0];
+ }else{ //image is upside-down
+ dst = p->data[0] + p->linesize[0] * (h - 1);
+ stride = -p->linesize[0];
+ }
+
+ if(avctx->pix_fmt == PIX_FMT_PAL8 && avctx->palctrl){
+ memcpy(p->data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
+ if(avctx->palctrl->palette_changed){
+ p->palette_has_changed = 1;
+ avctx->palctrl->palette_changed = 0;
+ }
+ }
+ if(colors){
+ if((colors + first_clr) > 256){
+ av_log(avctx, AV_LOG_ERROR, "Incorrect palette: %i colors with offset %i\n", colors, first_clr);
+ return -1;
+ }
+ if(csize != 24){
+ av_log(avctx, AV_LOG_ERROR, "Palette entry size %i bits is not supported\n", csize);
+ return -1;
+ }
+ if(avctx->pix_fmt != PIX_FMT_PAL8)//should not occur but skip palette anyway
+ buf += colors * ((csize + 1) >> 3);
+ else{
+ int r, g, b, t;
+ int32_t *pal = ((int32_t*)p->data[1]) + first_clr;
+ for(t = 0; t < colors; t++){
+ r = *buf++;
+ g = *buf++;
+ b = *buf++;
+ *pal++ = (b << 16) | (g << 8) | r;
+ }
+ p->palette_has_changed = 1;
+ avctx->palctrl->palette_changed = 0;
+ }
+ }
+ if((compr & (~TGA_RLE)) == TGA_NODATA)
+ memset(p->data[0], 0, p->linesize[0] * s->height);
+ else{
+ if(compr & TGA_RLE)
+ targa_decode_rle(avctx, s, buf, dst, avctx->width, avctx->height, stride, bpp);
+ else{
+ for(y = 0; y < s->height; y++){
+#ifdef WORDS_BIGENDIAN
+ if((s->bpp + 1) >> 3 == 2){
+ uint16_t *dst16 = (uint16_t*)dst;
+ for(x = 0; x < s->width; x++)
+ dst16[x] = LE_16(buf + x * 2);
+ }else if((s->bpp + 1) >> 3 == 4){
+ uint32_t *dst32 = (uint32_t*)dst;
+ for(x = 0; x < s->width; x++)
+ dst32[x] = LE_32(buf + x * 4);
+ }else
+#endif
+ memcpy(dst, buf, s->width * ((s->bpp + 1) >> 3));
+
+ dst += stride;
+ buf += s->width * ((s->bpp + 1) >> 3);
+ }
+ }
+ }
+
+ *picture= *(AVFrame*)&s->picture;
+ *data_size = sizeof(AVPicture);
+
+ return buf_size;
+}
+
+static int targa_init(AVCodecContext *avctx){
+ TargaContext *s = avctx->priv_data;
+
+ avcodec_get_frame_defaults((AVFrame*)&s->picture);
+ avctx->coded_frame= (AVFrame*)&s->picture;
+ s->picture.data[0] = NULL;
+
+ return 0;
+}
+
+static int targa_end(AVCodecContext *avctx){
+ TargaContext *s = avctx->priv_data;
+
+ if(s->picture.data[0])
+ avctx->release_buffer(avctx, &s->picture);
+
+ return 0;
+}
+
+AVCodec targa_decoder = {
+ "targa",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_TARGA,
+ sizeof(TargaContext),
+ targa_init,
+ NULL,
+ targa_end,
+ decode_frame,
+ 0,
+ NULL
+};
diff --git a/contrib/ffmpeg/libavcodec/tiertexseqv.c b/contrib/ffmpeg/libavcodec/tiertexseqv.c
new file mode 100644
index 000000000..ca3baf09e
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/tiertexseqv.c
@@ -0,0 +1,232 @@
+/*
+ * Tiertex Limited SEQ Video Decoder
+ * Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file tiertexseqv.c
+ * Tiertex Limited SEQ video decoder
+ */
+
+#include "avcodec.h"
+#include "common.h"
+#define ALT_BITSTREAM_READER_LE
+#include "bitstream.h"
+
+
+typedef struct SeqVideoContext {
+ AVCodecContext *avctx;
+ AVFrame frame;
+ unsigned int palette[256];
+ unsigned char block[8 * 8];
+} SeqVideoContext;
+
+
+static unsigned char *seq_unpack_rle_block(unsigned char *src, unsigned char *dst, int dst_size)
+{
+ int i, len, sz;
+ GetBitContext gb;
+ int code_table[64];
+
+ /* get the rle codes (at most 64 bytes) */
+ init_get_bits(&gb, src, 64 * 8);
+ for (i = 0, sz = 0; i < 64 && sz < dst_size; i++) {
+ code_table[i] = get_sbits(&gb, 4);
+ sz += FFABS(code_table[i]);
+ }
+ src += (get_bits_count(&gb) + 7) / 8;
+
+ /* do the rle unpacking */
+ for (i = 0; i < 64 && dst_size > 0; i++) {
+ len = code_table[i];
+ if (len < 0) {
+ len = -len;
+ memset(dst, *src++, FFMIN(len, dst_size));
+ } else {
+ memcpy(dst, src, FFMIN(len, dst_size));
+ src += len;
+ }
+ dst += len;
+ dst_size -= len;
+ }
+ return src;
+}
+
+static unsigned char *seq_decode_op1(SeqVideoContext *seq, unsigned char *src, unsigned char *dst)
+{
+ unsigned char *color_table;
+ int b, i, len, bits;
+ GetBitContext gb;
+
+ len = *src++;
+ if (len & 0x80) {
+ switch (len & 3) {
+ case 1:
+ src = seq_unpack_rle_block(src, seq->block, sizeof(seq->block));
+ for (b = 0; b < 8; b++) {
+ memcpy(dst, &seq->block[b * 8], 8);
+ dst += seq->frame.linesize[0];
+ }
+ break;
+ case 2:
+ src = seq_unpack_rle_block(src, seq->block, sizeof(seq->block));
+ for (i = 0; i < 8; i++) {
+ for (b = 0; b < 8; b++)
+ dst[b * seq->frame.linesize[0]] = seq->block[i * 8 + b];
+ ++dst;
+ }
+ break;
+ }
+ } else {
+ color_table = src;
+ src += len;
+ bits = ff_log2_tab[len - 1] + 1;
+ init_get_bits(&gb, src, bits * 8 * 8); src += bits * 8;
+ for (b = 0; b < 8; b++) {
+ for (i = 0; i < 8; i++)
+ dst[i] = color_table[get_bits(&gb, bits)];
+ dst += seq->frame.linesize[0];
+ }
+ }
+
+ return src;
+}
+
+static unsigned char *seq_decode_op2(SeqVideoContext *seq, unsigned char *src, unsigned char *dst)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ memcpy(dst, src, 8);
+ src += 8;
+ dst += seq->frame.linesize[0];
+ }
+
+ return src;
+}
+
+static unsigned char *seq_decode_op3(SeqVideoContext *seq, unsigned char *src, unsigned char *dst)
+{
+ int pos, offset;
+
+ do {
+ pos = *src++;
+ offset = ((pos >> 3) & 7) * seq->frame.linesize[0] + (pos & 7);
+ dst[offset] = *src++;
+ } while (!(pos & 0x80));
+
+ return src;
+}
+
+static void seqvideo_decode(SeqVideoContext *seq, unsigned char *data, int data_size)
+{
+ GetBitContext gb;
+ int flags, i, j, x, y, op;
+ unsigned char c[3];
+ unsigned char *dst;
+
+ flags = *data++;
+
+ if (flags & 1) {
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 3; j++, data++)
+ c[j] = (*data << 2) | (*data >> 4);
+ seq->palette[i] = (c[0] << 16) | (c[1] << 8) | c[2];
+ }
+ memcpy(seq->frame.data[1], seq->palette, sizeof(seq->palette));
+ seq->frame.palette_has_changed = 1;
+ }
+
+ if (flags & 2) {
+ init_get_bits(&gb, data, 128 * 8); data += 128;
+ for (y = 0; y < 128; y += 8)
+ for (x = 0; x < 256; x += 8) {
+ dst = &seq->frame.data[0][y * seq->frame.linesize[0] + x];
+ op = get_bits(&gb, 2);
+ switch (op) {
+ case 1:
+ data = seq_decode_op1(seq, data, dst);
+ break;
+ case 2:
+ data = seq_decode_op2(seq, data, dst);
+ break;
+ case 3:
+ data = seq_decode_op3(seq, data, dst);
+ break;
+ }
+ }
+ }
+}
+
+static int seqvideo_decode_init(AVCodecContext *avctx)
+{
+ SeqVideoContext *seq = (SeqVideoContext *)avctx->priv_data;
+
+ seq->avctx = avctx;
+ avctx->pix_fmt = PIX_FMT_PAL8;
+ avctx->has_b_frames = 0;
+
+ seq->frame.data[0] = NULL;
+
+ return 0;
+}
+
+static int seqvideo_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+
+ SeqVideoContext *seq = (SeqVideoContext *)avctx->priv_data;
+
+ seq->frame.reference = 1;
+ seq->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
+ if (avctx->reget_buffer(avctx, &seq->frame)) {
+ av_log(seq->avctx, AV_LOG_ERROR, "tiertexseqvideo: reget_buffer() failed\n");
+ return -1;
+ }
+
+ seqvideo_decode(seq, buf, buf_size);
+
+ *data_size = sizeof(AVFrame);
+ *(AVFrame *)data = seq->frame;
+
+ return buf_size;
+}
+
+static int seqvideo_decode_end(AVCodecContext *avctx)
+{
+ SeqVideoContext *seq = (SeqVideoContext *)avctx->priv_data;
+
+ if (seq->frame.data[0])
+ avctx->release_buffer(avctx, &seq->frame);
+
+ return 0;
+}
+
+AVCodec tiertexseqvideo_decoder = {
+ "tiertexseqvideo",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_TIERTEXSEQVIDEO,
+ sizeof(SeqVideoContext),
+ seqvideo_decode_init,
+ NULL,
+ seqvideo_decode_end,
+ seqvideo_decode_frame,
+ CODEC_CAP_DR1,
+};
diff --git a/contrib/ffmpeg/libavcodec/tiff.c b/contrib/ffmpeg/libavcodec/tiff.c
new file mode 100644
index 000000000..5925af1ae
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/tiff.c
@@ -0,0 +1,531 @@
+/*
+ * TIFF image decoder
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#include "avcodec.h"
+#ifdef CONFIG_ZLIB
+#include <zlib.h>
+#endif
+#include "lzw.h"
+
+/* abridged list of TIFF tags */
+enum TiffTags{
+ TIFF_WIDTH = 0x100,
+ TIFF_HEIGHT,
+ TIFF_BPP,
+ TIFF_COMPR,
+ TIFF_INVERT = 0x106,
+ TIFF_STRIP_OFFS = 0x111,
+ TIFF_ROWSPERSTRIP = 0x116,
+ TIFF_STRIP_SIZE,
+ TIFF_PLANAR = 0x11C,
+ TIFF_XPOS = 0x11E,
+ TIFF_YPOS = 0x11F,
+ TIFF_PREDICTOR = 0x13D,
+ TIFF_PAL = 0x140
+};
+
+enum TiffCompr{
+ TIFF_RAW = 1,
+ TIFF_CCITT_RLE,
+ TIFF_G3,
+ TIFF_G4,
+ TIFF_LZW,
+ TIFF_JPEG,
+ TIFF_NEWJPEG,
+ TIFF_ADOBE_DEFLATE,
+ TIFF_PACKBITS = 0x8005,
+ TIFF_DEFLATE = 0x80B2
+};
+
+enum TiffTypes{
+ TIFF_BYTE = 1,
+ TIFF_STRING,
+ TIFF_SHORT,
+ TIFF_LONG,
+ TIFF_LONGLONG
+};
+
+/** sizes of various TIFF field types */
+static const int type_sizes[6] = {
+ 0, 1, 100, 2, 4, 8
+};
+
+typedef struct TiffContext {
+ AVCodecContext *avctx;
+ AVFrame picture;
+
+ int width, height;
+ unsigned int bpp;
+ int le;
+ int compr;
+ int invert;
+
+ int strips, rps;
+ int sot;
+ uint8_t* stripdata;
+ uint8_t* stripsizes;
+ int stripsize, stripoff;
+ LZWState *lzw;
+} TiffContext;
+
+static int tget_short(uint8_t **p, int le){
+ int v = le ? LE_16(*p) : BE_16(*p);
+ *p += 2;
+ return v;
+}
+
+static int tget_long(uint8_t **p, int le){
+ int v = le ? LE_32(*p) : BE_32(*p);
+ *p += 4;
+ return v;
+}
+
+static int tget(uint8_t **p, int type, int le){
+ switch(type){
+ case TIFF_BYTE : return *(*p)++;
+ case TIFF_SHORT: return tget_short(p, le);
+ case TIFF_LONG : return tget_long (p, le);
+ default : return -1;
+ }
+}
+
+static int tiff_unpack_strip(TiffContext *s, uint8_t* dst, int stride, uint8_t *src, int size, int lines){
+ int c, line, pixels, code;
+ uint8_t *ssrc = src;
+ int width = s->width * (s->bpp / 8);
+#ifdef CONFIG_ZLIB
+ uint8_t *zbuf; unsigned long outlen;
+
+ if(s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE){
+ outlen = width * lines;
+ zbuf = av_malloc(outlen);
+ if(uncompress(zbuf, &outlen, src, size) != Z_OK){
+ av_log(s->avctx, AV_LOG_ERROR, "Uncompressing failed (%lu of %lu)\n", outlen, (unsigned long)width * lines);
+ av_free(zbuf);
+ return -1;
+ }
+ src = zbuf;
+ for(line = 0; line < lines; line++){
+ memcpy(dst, src, width);
+ dst += stride;
+ src += width;
+ }
+ av_free(zbuf);
+ return 0;
+ }
+#endif
+ if(s->compr == TIFF_LZW){
+ if(ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF) < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
+ return -1;
+ }
+ }
+ for(line = 0; line < lines; line++){
+ if(src - ssrc > size){
+ av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
+ return -1;
+ }
+ switch(s->compr){
+ case TIFF_RAW:
+ memcpy(dst, src, s->width * (s->bpp / 8));
+ src += s->width * (s->bpp / 8);
+ break;
+ case TIFF_PACKBITS:
+ for(pixels = 0; pixels < width;){
+ code = (int8_t)*src++;
+ if(code >= 0){
+ code++;
+ if(pixels + code > width){
+ av_log(s->avctx, AV_LOG_ERROR, "Copy went out of bounds\n");
+ return -1;
+ }
+ memcpy(dst + pixels, src, code);
+ src += code;
+ pixels += code;
+ }else if(code != -128){ // -127..-1
+ code = (-code) + 1;
+ if(pixels + code > width){
+ av_log(s->avctx, AV_LOG_ERROR, "Run went out of bounds\n");
+ return -1;
+ }
+ c = *src++;
+ memset(dst + pixels, c, code);
+ pixels += code;
+ }
+ }
+ break;
+ case TIFF_LZW:
+ pixels = ff_lzw_decode(s->lzw, dst, width);
+ if(pixels < width){
+ av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n", pixels, width);
+ return -1;
+ }
+ break;
+ }
+ dst += stride;
+ }
+ return 0;
+}
+
+
+static int tiff_decode_tag(TiffContext *s, uint8_t *start, uint8_t *buf, uint8_t *end_buf, AVFrame *pic)
+{
+ int tag, type, count, off, value = 0;
+ uint8_t *src, *dst;
+ int i, j, ssize, soff, stride;
+ int *pal, *rp, *gp, *bp;
+
+ tag = tget_short(&buf, s->le);
+ type = tget_short(&buf, s->le);
+ count = tget_long(&buf, s->le);
+ off = tget_long(&buf, s->le);
+
+ if(count == 1){
+ switch(type){
+ case TIFF_BYTE:
+ case TIFF_SHORT:
+ buf -= 4;
+ value = tget(&buf, type, s->le);
+ buf = NULL;
+ break;
+ case TIFF_LONG:
+ value = off;
+ buf = NULL;
+ break;
+ default:
+ value = -1;
+ buf = start + off;
+ }
+ }else if(type_sizes[type] * count <= 4){
+ buf -= 4;
+ }else{
+ buf = start + off;
+ }
+
+ if(buf && (buf < start || buf > end_buf)){
+ av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
+ return -1;
+ }
+
+ switch(tag){
+ case TIFF_WIDTH:
+ s->width = value;
+ break;
+ case TIFF_HEIGHT:
+ s->height = value;
+ break;
+ case TIFF_BPP:
+ if(count == 1) s->bpp = value;
+ else{
+ switch(type){
+ case TIFF_BYTE:
+ s->bpp = (off & 0xFF) + ((off >> 8) & 0xFF) + ((off >> 16) & 0xFF) + ((off >> 24) & 0xFF);
+ break;
+ case TIFF_SHORT:
+ case TIFF_LONG:
+ s->bpp = 0;
+ for(i = 0; i < count; i++) s->bpp += tget(&buf, type, s->le);
+ break;
+ default:
+ s->bpp = -1;
+ }
+ }
+ switch(s->bpp){
+ case 8:
+ s->avctx->pix_fmt = PIX_FMT_PAL8;
+ break;
+ case 24:
+ s->avctx->pix_fmt = PIX_FMT_RGB24;
+ break;
+ case 16:
+ if(count == 1){
+ s->avctx->pix_fmt = PIX_FMT_GRAY16BE;
+ }else{
+ av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%i)\n", s->bpp);
+ return -1;
+ }
+ break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%i)\n", s->bpp);
+ return -1;
+ }
+ if(s->width != s->avctx->width || s->height != s->avctx->height){
+ if(avcodec_check_dimensions(s->avctx, s->width, s->height))
+ return -1;
+ avcodec_set_dimensions(s->avctx, s->width, s->height);
+ }
+ if(s->picture.data[0])
+ s->avctx->release_buffer(s->avctx, &s->picture);
+ if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+ if(s->bpp == 8){
+ /* make default grayscale pal */
+ pal = s->picture.data[1];
+ for(i = 0; i < 256; i++)
+ pal[i] = i * 0x010101;
+ }
+ break;
+ case TIFF_COMPR:
+ s->compr = value;
+ switch(s->compr){
+ case TIFF_RAW:
+ case TIFF_PACKBITS:
+ case TIFF_LZW:
+ break;
+ case TIFF_DEFLATE:
+ case TIFF_ADOBE_DEFLATE:
+#ifdef CONFIG_ZLIB
+ break;
+#else
+ av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
+ return -1;
+#endif
+ case TIFF_G3:
+ av_log(s->avctx, AV_LOG_ERROR, "CCITT G3 compression is not supported\n");
+ return -1;
+ case TIFF_G4:
+ av_log(s->avctx, AV_LOG_ERROR, "CCITT G4 compression is not supported\n");
+ return -1;
+ case TIFF_CCITT_RLE:
+ av_log(s->avctx, AV_LOG_ERROR, "CCITT RLE compression is not supported\n");
+ return -1;
+ case TIFF_JPEG:
+ case TIFF_NEWJPEG:
+ av_log(s->avctx, AV_LOG_ERROR, "JPEG compression is not supported\n");
+ return -1;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n", s->compr);
+ return -1;
+ }
+ break;
+ case TIFF_ROWSPERSTRIP:
+ if(value < 1){
+ av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n");
+ return -1;
+ }
+ s->rps = value;
+ break;
+ case TIFF_STRIP_OFFS:
+ if(count == 1){
+ s->stripdata = NULL;
+ s->stripoff = value;
+ }else
+ s->stripdata = start + off;
+ s->strips = count;
+ s->sot = type;
+ if(s->stripdata > end_buf){
+ av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
+ return -1;
+ }
+ break;
+ case TIFF_STRIP_SIZE:
+ if(count == 1){
+ s->stripsizes = NULL;
+ s->stripsize = value;
+ s->strips = 1;
+ }else{
+ s->stripsizes = start + off;
+ }
+ s->strips = count;
+ if(s->stripsizes > end_buf){
+ av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
+ return -1;
+ }
+ if(!pic->data[0]){
+ av_log(s->avctx, AV_LOG_ERROR, "Picture initialization missing\n");
+ return -1;
+ }
+ /* now we have the data and may start decoding */
+ stride = pic->linesize[0];
+ dst = pic->data[0];
+ for(i = 0; i < s->height; i += s->rps){
+ if(s->stripsizes)
+ ssize = tget(&s->stripsizes, type, s->le);
+ else
+ ssize = s->stripsize;
+
+ if(s->stripdata){
+ soff = tget(&s->stripdata, s->sot, s->le);
+ }else
+ soff = s->stripoff;
+ src = start + soff;
+ if(tiff_unpack_strip(s, dst, stride, src, ssize, FFMIN(s->rps, s->height - i)) < 0)
+ break;
+ dst += s->rps * stride;
+ }
+ break;
+ case TIFF_PREDICTOR:
+ if(!pic->data[0]){
+ av_log(s->avctx, AV_LOG_ERROR, "Picture initialization missing\n");
+ return -1;
+ }
+ if(value == 2){
+ src = pic->data[0];
+ stride = pic->linesize[0];
+ soff = s->bpp >> 3;
+ ssize = s->width * soff;
+ for(i = 0; i < s->height; i++) {
+ for(j = soff; j < ssize; j++)
+ src[j] += src[j - soff];
+ src += stride;
+ }
+ }
+ break;
+ case TIFF_INVERT:
+ switch(value){
+ case 0:
+ s->invert = 1;
+ break;
+ case 1:
+ s->invert = 0;
+ break;
+ case 2:
+ case 3:
+ break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", value);
+ return -1;
+ }
+ break;
+ case TIFF_PAL:
+ if(s->avctx->pix_fmt != PIX_FMT_PAL8){
+ av_log(s->avctx, AV_LOG_ERROR, "Palette met but this is not palettized format\n");
+ return -1;
+ }
+ pal = s->picture.data[1];
+ off = type_sizes[type];
+ rp = buf;
+ gp = buf + count / 3 * off;
+ bp = buf + count / 3 * off * 2;
+ off = (type_sizes[type] - 1) << 3;
+ for(i = 0; i < count / 3; i++){
+ j = (tget(&rp, type, s->le) >> off) << 16;
+ j |= (tget(&gp, type, s->le) >> off) << 8;
+ j |= tget(&bp, type, s->le) >> off;
+ pal[i] = j;
+ }
+ break;
+ case TIFF_PLANAR:
+ if(value == 2){
+ av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n");
+ return -1;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ TiffContext * const s = avctx->priv_data;
+ AVFrame *picture = data;
+ AVFrame * const p= (AVFrame*)&s->picture;
+ uint8_t *orig_buf = buf, *end_buf = buf + buf_size;
+ int id, le, off;
+ int i, entries;
+
+ //parse image header
+ id = LE_16(buf); buf += 2;
+ if(id == 0x4949) le = 1;
+ else if(id == 0x4D4D) le = 0;
+ else{
+ av_log(avctx, AV_LOG_ERROR, "TIFF header not found\n");
+ return -1;
+ }
+ s->le = le;
+ s->invert = 0;
+ // As TIFF 6.0 specification puts it "An arbitrary but carefully chosen number
+ // that further identifies the file as a TIFF file"
+ if(tget_short(&buf, le) != 42){
+ av_log(avctx, AV_LOG_ERROR, "The answer to life, universe and everything is not correct!\n");
+ return -1;
+ }
+ /* parse image file directory */
+ off = tget_long(&buf, le);
+ if(orig_buf + off + 14 >= end_buf){
+ av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
+ return -1;
+ }
+ buf = orig_buf + off;
+ entries = tget_short(&buf, le);
+ for(i = 0; i < entries; i++){
+ if(tiff_decode_tag(s, orig_buf, buf, end_buf, p) < 0)
+ return -1;
+ buf += 12;
+ }
+
+ if(s->invert){
+ uint8_t *src;
+ int j;
+
+ src = s->picture.data[0];
+ for(j = 0; j < s->height; j++){
+ for(i = 0; i < s->picture.linesize[0]; i++)
+ src[i] = 255 - src[i];
+ src += s->picture.linesize[0];
+ }
+ }
+ *picture= *(AVFrame*)&s->picture;
+ *data_size = sizeof(AVPicture);
+
+ return buf_size;
+}
+
+static int tiff_init(AVCodecContext *avctx){
+ TiffContext *s = avctx->priv_data;
+
+ s->width = 0;
+ s->height = 0;
+ s->avctx = avctx;
+ avcodec_get_frame_defaults((AVFrame*)&s->picture);
+ avctx->coded_frame= (AVFrame*)&s->picture;
+ s->picture.data[0] = NULL;
+ ff_lzw_decode_open(&s->lzw);
+
+ return 0;
+}
+
+static int tiff_end(AVCodecContext *avctx)
+{
+ TiffContext * const s = avctx->priv_data;
+
+ ff_lzw_decode_close(&s->lzw);
+ if(s->picture.data[0])
+ avctx->release_buffer(avctx, &s->picture);
+ return 0;
+}
+
+AVCodec tiff_decoder = {
+ "tiff",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_TIFF,
+ sizeof(TiffContext),
+ tiff_init,
+ NULL,
+ tiff_end,
+ decode_frame,
+ 0,
+ NULL
+};
diff --git a/src/libffmpeg/libavcodec/truemotion1.c b/contrib/ffmpeg/libavcodec/truemotion1.c
index d2c9efbf8..11d9320c0 100644
--- a/src/libffmpeg/libavcodec/truemotion1.c
+++ b/contrib/ffmpeg/libavcodec/truemotion1.c
@@ -2,18 +2,20 @@
* Duck TrueMotion 1.0 Decoder
* Copyright (C) 2003 Alex Beregszaszi & Mike Melanson
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/truemotion1data.h b/contrib/ffmpeg/libavcodec/truemotion1data.h
index 800bb306b..63d307c65 100644
--- a/src/libffmpeg/libavcodec/truemotion1data.h
+++ b/contrib/ffmpeg/libavcodec/truemotion1data.h
@@ -5,6 +5,22 @@
* distributed under the GNU GPL. It is redistributed with ffmpeg under the
* GNU LGPL using the common understanding that data tables necessary for
* decoding algorithms are not necessarily licensable.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef TRUEMOTION1DATA_H
#define TRUEMOTION1DATA_H
diff --git a/src/libffmpeg/libavcodec/truemotion2.c b/contrib/ffmpeg/libavcodec/truemotion2.c
index 84b940d42..1b67bd22a 100644
--- a/src/libffmpeg/libavcodec/truemotion2.c
+++ b/contrib/ffmpeg/libavcodec/truemotion2.c
@@ -2,18 +2,20 @@
* Duck/ON2 TrueMotion 2 Decoder
* Copyright (c) 2005 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -822,7 +824,7 @@ static int decode_init(AVCodecContext *avctx){
TM2Context * const l = avctx->priv_data;
int i;
- if (avcodec_check_dimensions(avctx, avctx->height, avctx->width) < 0) {
+ if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) {
return -1;
}
if((avctx->width & 3) || (avctx->height & 3)){
diff --git a/src/libffmpeg/libavcodec/truespeech.c b/contrib/ffmpeg/libavcodec/truespeech.c
index dbd29b38f..077e9b037 100644
--- a/src/libffmpeg/libavcodec/truespeech.c
+++ b/contrib/ffmpeg/libavcodec/truespeech.c
@@ -2,18 +2,20 @@
* DSP Group TrueSpeech compatible decoder
* Copyright (c) 2005 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
diff --git a/src/libffmpeg/libavcodec/truespeech_data.h b/contrib/ffmpeg/libavcodec/truespeech_data.h
index 9a9007234..cd8822fde 100644
--- a/src/libffmpeg/libavcodec/truespeech_data.h
+++ b/contrib/ffmpeg/libavcodec/truespeech_data.h
@@ -1,3 +1,24 @@
+/*
+ * DSP Group TrueSpeech compatible decoder
+ * copyright (c) 2005 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#ifndef __TRUESPEECH_DATA__
#define __TRUESPEECH_DATA__
diff --git a/src/libffmpeg/libavcodec/tscc.c b/contrib/ffmpeg/libavcodec/tscc.c
index 19edf3b2e..a24540f37 100644
--- a/src/libffmpeg/libavcodec/tscc.c
+++ b/contrib/ffmpeg/libavcodec/tscc.c
@@ -2,18 +2,20 @@
* TechSmith Camtasia decoder
* Copyright (c) 2004 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -264,7 +266,7 @@ static int decode_init(AVCodecContext *avctx)
c->pic.data[0] = NULL;
c->height = avctx->height;
- if (avcodec_check_dimensions(avctx, avctx->height, avctx->width) < 0) {
+ if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) {
return 1;
}
diff --git a/src/libffmpeg/libavcodec/tta.c b/contrib/ffmpeg/libavcodec/tta.c
index 979a94a74..82713fb0f 100644
--- a/src/libffmpeg/libavcodec/tta.c
+++ b/contrib/ffmpeg/libavcodec/tta.c
@@ -2,18 +2,20 @@
* TTA (The Lossless True Audio) decoder
* Copyright (c) 2006 Alex Beregszaszi
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -195,17 +197,6 @@ static int tta_get_unary(GetBitContext *gb)
return ret;
}
-// shamelessly copied from shorten.c
-static int inline get_le16(GetBitContext *gb)
-{
- return bswap_16(get_bits_long(gb, 16));
-}
-
-static int inline get_le32(GetBitContext *gb)
-{
- return bswap_32(get_bits_long(gb, 32));
-}
-
static int tta_decode_init(AVCodecContext * avctx)
{
TTAContext *s = avctx->priv_data;
@@ -218,7 +209,7 @@ static int tta_decode_init(AVCodecContext * avctx)
return -1;
init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size);
- if (show_bits_long(&s->gb, 32) == bswap_32(ff_get_fourcc("TTA1")))
+ if (show_bits_long(&s->gb, 32) == ff_get_fourcc("TTA1"))
{
/* signature */
skip_bits(&s->gb, 32);
@@ -227,22 +218,22 @@ static int tta_decode_init(AVCodecContext * avctx)
// return -1;
// }
- s->flags = get_le16(&s->gb);
+ s->flags = get_bits(&s->gb, 16);
if (s->flags != 1 && s->flags != 3)
{
av_log(s->avctx, AV_LOG_ERROR, "Invalid flags\n");
return -1;
}
s->is_float = (s->flags == FORMAT_FLOAT);
- avctx->channels = s->channels = get_le16(&s->gb);
- avctx->bits_per_sample = get_le16(&s->gb);
+ avctx->channels = s->channels = get_bits(&s->gb, 16);
+ avctx->bits_per_sample = get_bits(&s->gb, 16);
s->bps = (avctx->bits_per_sample + 7) / 8;
- avctx->sample_rate = get_le32(&s->gb);
+ avctx->sample_rate = get_bits_long(&s->gb, 32);
if(avctx->sample_rate > 1000000){ //prevent FRAME_TIME * avctx->sample_rate from overflowing and sanity check
av_log(avctx, AV_LOG_ERROR, "sample_rate too large\n");
return -1;
}
- s->data_length = get_le32(&s->gb);
+ s->data_length = get_bits_long(&s->gb, 32);
skip_bits(&s->gb, 32); // CRC32 of header
if (s->is_float)
@@ -361,9 +352,9 @@ static int tta_decode_frame(AVCodecContext *avctx,
rice->k0++;
}
- // extract sign
-#define SIGN(x) (((x)&1) ? (++(x)>>1) : (-(x)>>1))
- *p = SIGN(value);
+ // extract coded value
+#define UNFOLD(x) (((x)&1) ? (++(x)>>1) : (-(x)>>1))
+ *p = UNFOLD(value);
// run hybrid filter
ttafilter_process(filter, p, 0);
diff --git a/src/libffmpeg/libavcodec/ulti.c b/contrib/ffmpeg/libavcodec/ulti.c
index 484eef7c7..b4028f439 100755..100644
--- a/src/libffmpeg/libavcodec/ulti.c
+++ b/contrib/ffmpeg/libavcodec/ulti.c
@@ -2,18 +2,20 @@
* IBM Ultimotion Video Decoder
* Copyright (C) 2004 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/ulti_cb.h b/contrib/ffmpeg/libavcodec/ulti_cb.h
index 835910f6d..2d8c9082c 100755..100644
--- a/src/libffmpeg/libavcodec/ulti_cb.h
+++ b/contrib/ffmpeg/libavcodec/ulti_cb.h
@@ -1,3 +1,24 @@
+/*
+ * IBM Ultimotion Video Decoder
+ * copyright (C) 2004 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
static const unsigned char ulti_codebook[16384]={
0x00, 0x01, 0x01, 0x02,
0x00, 0x01, 0x02, 0x03,
diff --git a/src/libffmpeg/libavcodec/utils.c b/contrib/ffmpeg/libavcodec/utils.c
index 0f8a4f412..2c7a76c11 100644
--- a/src/libffmpeg/libavcodec/utils.c
+++ b/contrib/ffmpeg/libavcodec/utils.c
@@ -3,18 +3,20 @@
* Copyright (c) 2001 Fabrice Bellard.
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -57,31 +59,6 @@ const uint8_t ff_reverse[256]={
static int volatile entangled_thread_counter=0;
-void avcodec_default_free_buffers(AVCodecContext *s);
-
-void *av_mallocz(unsigned int size)
-{
- void *ptr;
-
- ptr = av_malloc(size);
- if (!ptr)
- return NULL;
- memset(ptr, 0, size);
- return ptr;
-}
-
-char *av_strdup(const char *s)
-{
- char *ptr;
- int len;
- len = strlen(s) + 1;
- ptr = av_malloc(len);
- if (!ptr)
- return NULL;
- memcpy(ptr, s, len);
- return ptr;
-}
-
/**
* realloc which does nothing if the block is large enough
*/
@@ -95,7 +72,6 @@ void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size)
return av_realloc(ptr, *size);
}
-
static unsigned int last_static = 0;
static unsigned int allocated_static = 0;
static void** array_static = NULL;
@@ -159,16 +135,6 @@ static void do_free(void)
av_free_static();
}
-/**
- * Frees memory and sets the pointer to NULL.
- * @param arg pointer to the pointer which should be freed
- */
-void av_freep(void *arg)
-{
- void **ptr= (void**)arg;
- av_free(*ptr);
- *ptr = NULL;
-}
/* encoder management */
AVCodec *first_avcodec = NULL;
@@ -211,6 +177,8 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){
case PIX_FMT_YUV422P:
case PIX_FMT_YUV444P:
case PIX_FMT_GRAY8:
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
case PIX_FMT_YUVJ420P:
case PIX_FMT_YUVJ422P:
case PIX_FMT_YUVJ444P:
@@ -440,7 +408,7 @@ static const char* context_to_name(void* ptr) {
return "NULL";
}
-#define OFFSET(x) (int)&((AVCodecContext*)0)->x
+#define OFFSET(x) offsetof(AVCodecContext,x)
#define DEFAULT 0 //should be NAN but it doesnt work as its not a constant in glibc as required by ANSI/ISO C
//these names are too long to be readable
#define V AV_OPT_FLAG_VIDEO_PARAM
@@ -449,9 +417,11 @@ static const char* context_to_name(void* ptr) {
#define E AV_OPT_FLAG_ENCODING_PARAM
#define D AV_OPT_FLAG_DECODING_PARAM
+#define AV_CODEC_DEFAULT_BITRATE 200*1000
+
static const AVOption options[]={
-{"bit_rate", NULL, OFFSET(bit_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|A|E},
-{"bit_rate_tolerance", NULL, OFFSET(bit_rate_tolerance), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"b", "set video bitrate (in bits/s)", OFFSET(bit_rate), FF_OPT_TYPE_INT, AV_CODEC_DEFAULT_BITRATE, INT_MIN, INT_MAX, V|A|E},
+{"bt", "set video bitrate tolerance (in bits/s)", OFFSET(bit_rate_tolerance), FF_OPT_TYPE_INT, AV_CODEC_DEFAULT_BITRATE*20, INT_MIN, INT_MAX, V|E},
{"flags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, V|A|E|D, "flags"},
{"mv4", "use four motion vector by macroblock (mpeg4)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_4MV, INT_MIN, INT_MAX, V|E, "flags"},
{"obmc", "use overlapped block motion compensation (h263+)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_OBMC, INT_MIN, INT_MAX, V|E, "flags"},
@@ -471,7 +441,7 @@ static const AVOption options[]={
{"truncated", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG_TRUNCATED, INT_MIN, INT_MAX, 0, "flags"},
{"naq", "normalize adaptive quantization", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_NORMALIZE_AQP, INT_MIN, INT_MAX, V|E, "flags"},
{"ildct", "use interlaced dct", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_INTERLACED_DCT, INT_MIN, INT_MAX, V|E, "flags"},
-{"low_delay", "force low delay", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_LOW_DELAY, INT_MIN, INT_MAX, V|D, "flags"},
+{"low_delay", "force low delay", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_LOW_DELAY, INT_MIN, INT_MAX, V|D|E, "flags"},
{"alt", "enable alternate scantable (mpeg2/mpeg4)", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_ALT_SCAN, INT_MIN, INT_MAX, V|E, "flags"},
{"trell", "use trellis quantization", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_TRELLIS_QUANT, INT_MIN, INT_MAX, V|E, "flags"},
{"global_header", "place global headers in extradata instead of every keyframe", 0, FF_OPT_TYPE_CONST, CODEC_FLAG_GLOBAL_HEADER, INT_MIN, INT_MAX, 0, "flags"},
@@ -490,25 +460,25 @@ static const AVOption options[]={
{"noout", "skip bitstream encoding", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_NO_OUTPUT, INT_MIN, INT_MAX, V|E, "flags2"},
{"local_header", "place global headers at every keyframe instead of in extradata", 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_LOCAL_HEADER, INT_MIN, INT_MAX, V|E, "flags2"},
{"sub_id", NULL, OFFSET(sub_id), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"me_method", NULL, OFFSET(me_method), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "me_method"},
+{"me_method", "set motion estimation method", OFFSET(me_method), FF_OPT_TYPE_INT, ME_EPZS, INT_MIN, INT_MAX, V|E, "me_method"},
{"extradata_size", NULL, OFFSET(extradata_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
{"time_base", NULL, OFFSET(time_base), FF_OPT_TYPE_RATIONAL, DEFAULT, INT_MIN, INT_MAX},
-{"gop_size", NULL, OFFSET(gop_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"g", "set the group of picture size", OFFSET(gop_size), FF_OPT_TYPE_INT, 12, INT_MIN, INT_MAX, V|E},
{"rate_emu", NULL, OFFSET(rate_emu), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"sample_rate", NULL, OFFSET(sample_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"channels", NULL, OFFSET(channels), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
+{"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
+{"ac", "set number of audio channels", OFFSET(channels), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
{"cutoff", "set cutoff bandwidth", OFFSET(cutoff), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|E},
{"frame_size", NULL, OFFSET(frame_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, A|E},
{"frame_number", NULL, OFFSET(frame_number), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
{"real_pict_num", NULL, OFFSET(real_pict_num), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
{"delay", NULL, OFFSET(delay), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"qcompress", NULL, OFFSET(qcompress), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
-{"qblur", NULL, OFFSET(qblur), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
-{"qmin", NULL, OFFSET(qmin), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"qmax", NULL, OFFSET(qmax), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"max_qdiff", NULL, OFFSET(max_qdiff), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"max_b_frames", NULL, OFFSET(max_b_frames), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"b_quant_factor", NULL, OFFSET(b_quant_factor), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
+{"qcomp", "video quantizer scale compression (VBR)", OFFSET(qcompress), FF_OPT_TYPE_FLOAT, 0.5, FLT_MIN, FLT_MAX, V|E},
+{"qblur", "video quantizer scale blur (VBR)", OFFSET(qblur), FF_OPT_TYPE_FLOAT, 0.5, FLT_MIN, FLT_MAX, V|E},
+{"qmin", "min video quantizer scale (VBR)", OFFSET(qmin), FF_OPT_TYPE_INT, 2, 1, 51, V|E},
+{"qmax", "max video quantizer scale (VBR)", OFFSET(qmax), FF_OPT_TYPE_INT, 31, 1, 51, V|E},
+{"qdiff", "max difference between the quantizer scale (VBR)", OFFSET(max_qdiff), FF_OPT_TYPE_INT, 3, INT_MIN, INT_MAX, V|E},
+{"bf", "use 'frames' B frames", OFFSET(max_b_frames), FF_OPT_TYPE_INT, DEFAULT, 0, FF_MAX_B_FRAMES, V|E},
+{"b_qfactor", "qp factor between p and b frames", OFFSET(b_quant_factor), FF_OPT_TYPE_FLOAT, 1.25, FLT_MIN, FLT_MAX, V|E},
{"rc_strategy", NULL, OFFSET(rc_strategy), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"b_strategy", NULL, OFFSET(b_frame_strategy), FF_OPT_TYPE_INT, 0, INT_MIN, INT_MAX, V|E},
{"hurry_up", NULL, OFFSET(hurry_up), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D},
@@ -524,7 +494,7 @@ static const AVOption options[]={
{"misc_bits", NULL, OFFSET(misc_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
{"frame_bits", NULL, OFFSET(frame_bits), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
{"codec_tag", NULL, OFFSET(codec_tag), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"bugs", NULL, OFFSET(workaround_bugs), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D, "bug"},
+{"bug", "workaround not auto detected encoder bugs", OFFSET(workaround_bugs), FF_OPT_TYPE_FLAGS, FF_BUG_AUTODETECT, INT_MIN, INT_MAX, V|D, "bug"},
{"autodetect", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_AUTODETECT, INT_MIN, INT_MAX, V|D, "bug"},
{"old_msmpeg4", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_OLD_MSMPEG4, INT_MIN, INT_MAX, V|D, "bug"},
{"xvid_ilace", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_XVID_ILACE, INT_MIN, INT_MAX, V|D, "bug"},
@@ -542,13 +512,13 @@ static const AVOption options[]={
{"ms", NULL, 0, FF_OPT_TYPE_CONST, FF_BUG_MS, INT_MIN, INT_MAX, V|D, "bug"},
{"lelim", "single coefficient elimination threshold for luminance (negative values also consider dc coefficient)", OFFSET(luma_elim_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"celim", "single coefficient elimination threshold for chrominance (negative values also consider dc coefficient)", OFFSET(chroma_elim_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"strict", NULL, OFFSET(strict_std_compliance), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "strict"},
+{"strict", "how strictly to follow the standards", OFFSET(strict_std_compliance), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "strict"},
{"very", NULL, 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_VERY_STRICT, INT_MIN, INT_MAX, V|E, "strict"},
{"strict", NULL, 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_STRICT, INT_MIN, INT_MAX, V|E, "strict"},
{"normal", NULL, 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_NORMAL, INT_MIN, INT_MAX, V|E, "strict"},
{"inofficial", NULL, 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_INOFFICIAL, INT_MIN, INT_MAX, V|E, "strict"},
{"experimental", NULL, 0, FF_OPT_TYPE_CONST, FF_COMPLIANCE_EXPERIMENTAL, INT_MIN, INT_MAX, V|E, "strict"},
-{"b_quant_offset", NULL, OFFSET(b_quant_offset), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
+{"b_qoffset", "qp offset between p and b frames", OFFSET(b_quant_offset), FF_OPT_TYPE_FLOAT, 1.25, FLT_MIN, FLT_MAX, V|E},
{"er", NULL, OFFSET(error_resilience), FF_OPT_TYPE_INT, FF_ER_CAREFUL, INT_MIN, INT_MAX, V|D, "er"},
{"careful", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_CAREFUL, INT_MIN, INT_MAX, V|D, "er"},
{"compliant", NULL, 0, FF_OPT_TYPE_CONST, FF_ER_COMPLIANT, INT_MIN, INT_MAX, V|D, "er"},
@@ -560,18 +530,18 @@ static const AVOption options[]={
{"mpeg_quant", NULL, OFFSET(mpeg_quant), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"stats_out", NULL, OFFSET(stats_out), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX},
{"stats_in", NULL, OFFSET(stats_in), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX},
-{"rc_qsquish", NULL, OFFSET(rc_qsquish), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
-{"rc_qmod_amp", NULL, OFFSET(rc_qmod_amp), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
+{"qsquish", "how to keep quantizer between qmin and qmax (0 = clip, 1 = use differentiable function)", OFFSET(rc_qsquish), FF_OPT_TYPE_FLOAT, DEFAULT, 0, 99, V|E},
+{"rc_qmod_amp", NULL, OFFSET(rc_qmod_amp), FF_OPT_TYPE_FLOAT, DEFAULT, -FLT_MAX, FLT_MAX, V|E},
{"rc_qmod_freq", NULL, OFFSET(rc_qmod_freq), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"rc_override_count", NULL, OFFSET(rc_override_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"rc_eq", NULL, OFFSET(rc_eq), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX, V|E},
-{"rc_max_rate", NULL, OFFSET(rc_max_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"rc_min_rate", NULL, OFFSET(rc_min_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"rc_buffer_size", NULL, OFFSET(rc_buffer_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"rc_buf_aggressivity", NULL, OFFSET(rc_buffer_aggressivity), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
-{"i_quant_factor", NULL, OFFSET(i_quant_factor), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
-{"i_quant_offset", NULL, OFFSET(i_quant_offset), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
-{"rc_initial_cplx", NULL, OFFSET(rc_initial_cplx), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
+{"rc_eq", "set rate control equation", OFFSET(rc_eq), FF_OPT_TYPE_STRING, DEFAULT, CHAR_MIN, CHAR_MAX, V|E},
+{"maxrate", "set max video bitrate tolerance (in bits/s)", OFFSET(rc_max_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"minrate", "set min video bitrate tolerance (in bits/s)", OFFSET(rc_min_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"bufsize", "set ratecontrol buffer size (in bits)", OFFSET(rc_buffer_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"rc_buf_aggressivity", NULL, OFFSET(rc_buffer_aggressivity), FF_OPT_TYPE_FLOAT, 1.0, FLT_MIN, FLT_MAX, V|E},
+{"i_qfactor", "qp factor between p and i frames", OFFSET(i_quant_factor), FF_OPT_TYPE_FLOAT, -0.8, -FLT_MAX, FLT_MAX, V|E},
+{"i_qoffset", "qp offset between p and i frames", OFFSET(i_quant_offset), FF_OPT_TYPE_FLOAT, 0.0, -FLT_MAX, FLT_MAX, V|E},
+{"rc_init_cplx", "initial complexity for 1-pass encoding", OFFSET(rc_initial_cplx), FF_OPT_TYPE_FLOAT, DEFAULT, -FLT_MAX, FLT_MAX, V|E},
{"dct", NULL, OFFSET(dct_algo), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, V|E, "dct"},
{"auto", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_AUTO, INT_MIN, INT_MAX, V|E, "dct"},
{"fastint", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_FASTINT, INT_MIN, INT_MAX, V|E, "dct"},
@@ -580,11 +550,11 @@ static const AVOption options[]={
{"mlib", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_MLIB, INT_MIN, INT_MAX, V|E, "dct"},
{"altivec", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_ALTIVEC, INT_MIN, INT_MAX, V|E, "dct"},
{"faan", NULL, 0, FF_OPT_TYPE_CONST, FF_DCT_FAAN, INT_MIN, INT_MAX, V|E, "dct"},
-{"lumi_mask", "lumimasking", OFFSET(lumi_masking), FF_OPT_TYPE_FLOAT, 0, FLT_MIN, FLT_MAX, V|E},
-{"tcplx_mask", "temporal complexity masking", OFFSET(temporal_cplx_masking), FF_OPT_TYPE_FLOAT, 0, FLT_MIN, FLT_MAX, V|E},
-{"scplx_mask", "spatial complexity masking", OFFSET(spatial_cplx_masking), FF_OPT_TYPE_FLOAT, 0, FLT_MIN, FLT_MAX, V|E},
-{"p_mask", "inter masking", OFFSET(p_masking), FF_OPT_TYPE_FLOAT, 0, FLT_MIN, FLT_MAX, V|E},
-{"dark_mask", "darkness masking", OFFSET(dark_masking), FF_OPT_TYPE_FLOAT, 0, FLT_MIN, FLT_MAX, V|E},
+{"lumi_mask", "lumimasking", OFFSET(lumi_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
+{"tcplx_mask", "temporal complexity masking", OFFSET(temporal_cplx_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
+{"scplx_mask", "spatial complexity masking", OFFSET(spatial_cplx_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
+{"p_mask", "inter masking", OFFSET(p_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
+{"dark_mask", "darkness masking", OFFSET(dark_masking), FF_OPT_TYPE_FLOAT, 0, -FLT_MAX, FLT_MAX, V|E},
{"unused", NULL, OFFSET(unused), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
{"idct", NULL, OFFSET(idct_algo), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, V|E|D, "idct"},
{"auto", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_AUTO, INT_MIN, INT_MAX, V|E|D, "idct"},
@@ -598,6 +568,7 @@ static const AVOption options[]={
{"altivec", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_ALTIVEC, INT_MIN, INT_MAX, V|E|D, "idct"},
{"sh4", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SH4, INT_MIN, INT_MAX, V|E|D, "idct"},
{"simplearm", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEARM, INT_MIN, INT_MAX, V|E|D, "idct"},
+{"simplearmv5te", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_SIMPLEARMV5TE, INT_MIN, INT_MAX, V|E|D, "idct"},
{"h264", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_H264, INT_MIN, INT_MAX, V|E|D, "idct"},
{"vp3", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_VP3, INT_MIN, INT_MAX, V|E|D, "idct"},
{"ipp", NULL, 0, FF_OPT_TYPE_CONST, FF_IDCT_IPP, INT_MIN, INT_MAX, V|E|D, "idct"},
@@ -637,10 +608,10 @@ static const AVOption options[]={
{"cmp", "full pel me compare function", OFFSET(me_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"subcmp", "sub pel me compare function", OFFSET(me_sub_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"mbcmp", "macroblock compare function", OFFSET(mb_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"ildctcmp", "interlaced dct compare function", OFFSET(ildct_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"},
+{"ildctcmp", "interlaced dct compare function", OFFSET(ildct_cmp), FF_OPT_TYPE_INT, FF_CMP_VSAD, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"dia_size", NULL, OFFSET(dia_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"last_pred", NULL, OFFSET(last_predictor_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"preme", NULL, OFFSET(pre_me), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"preme", "pre motion estimation", OFFSET(pre_me), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"precmp", "pre motion estimation compare function", OFFSET(me_pre_cmp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"sad", NULL, 0, FF_OPT_TYPE_CONST, FF_CMP_SAD, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"sse", NULL, 0, FF_OPT_TYPE_CONST, FF_CMP_SSE, INT_MIN, INT_MAX, V|E, "cmp_func"},
@@ -660,11 +631,11 @@ static const AVOption options[]={
{"dctmax", NULL, 0, FF_OPT_TYPE_CONST, FF_CMP_DCTMAX, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"chroma", NULL, 0, FF_OPT_TYPE_CONST, FF_CMP_CHROMA, INT_MIN, INT_MAX, V|E, "cmp_func"},
{"pre_dia_size", NULL, OFFSET(pre_dia_size), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"subq", "sub pel motion estimation quality", OFFSET(me_subpel_quality), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"subq", "sub pel motion estimation quality", OFFSET(me_subpel_quality), FF_OPT_TYPE_INT, 8, INT_MIN, INT_MAX, V|E},
{"dtg_active_format", NULL, OFFSET(dtg_active_format), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"me_range", NULL, OFFSET(me_range), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"ibias", NULL, OFFSET(intra_quant_bias), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"pbias", NULL, OFFSET(inter_quant_bias), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"me_range", "limit motion vectors range (1023 for DivX player)", OFFSET(me_range), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"ibias", "intra quant bias", OFFSET(intra_quant_bias), FF_OPT_TYPE_INT, FF_DEFAULT_QUANT_BIAS, INT_MIN, INT_MAX, V|E},
+{"pbias", "inter quant bias", OFFSET(inter_quant_bias), FF_OPT_TYPE_INT, FF_DEFAULT_QUANT_BIAS, INT_MIN, INT_MAX, V|E},
{"color_table_id", NULL, OFFSET(color_table_id), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
{"internal_buffer_count", NULL, OFFSET(internal_buffer_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
{"global_quality", NULL, OFFSET(global_quality), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
@@ -679,24 +650,24 @@ static const AVOption options[]={
{"bits", NULL, 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_BITS, INT_MIN, INT_MAX, V|E, "mbd"},
{"rd", NULL, 0, FF_OPT_TYPE_CONST, FF_MB_DECISION_RD, INT_MIN, INT_MAX, V|E, "mbd"},
{"stream_codec_tag", NULL, OFFSET(stream_codec_tag), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"sc_threshold", NULL, OFFSET(scenechange_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"lmin", "min lagrange factor", OFFSET(lmin), FF_OPT_TYPE_INT, 2*FF_QP2LAMBDA, 0, INT_MAX, V|E},
-{"lmax", "max lagrange factor", OFFSET(lmax), FF_OPT_TYPE_INT, 31*FF_QP2LAMBDA, 0, INT_MAX, V|E},
+{"sc_threshold", "scene change threshold", OFFSET(scenechange_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"lmin", "min lagrange factor (VBR)", OFFSET(lmin), FF_OPT_TYPE_INT, 2*FF_QP2LAMBDA, 0, INT_MAX, V|E},
+{"lmax", "max lagrange factor (VBR)", OFFSET(lmax), FF_OPT_TYPE_INT, 31*FF_QP2LAMBDA, 0, INT_MAX, V|E},
{"nr", "noise reduction", OFFSET(noise_reduction), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"rc_init_occupancy", NULL, OFFSET(rc_initial_buffer_occupancy), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"inter_threshold", NULL, OFFSET(inter_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"flags2", NULL, OFFSET(flags2), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, V|A|E|D, "flags2"},
-{"error_rate", NULL, OFFSET(error_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
+{"flags2", NULL, OFFSET(flags2), FF_OPT_TYPE_FLAGS, CODEC_FLAG2_FASTPSKIP, INT_MIN, INT_MAX, V|A|E|D, "flags2"},
+{"error", NULL, OFFSET(error_rate), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"antialias", NULL, OFFSET(antialias_algo), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D, "aa"},
{"auto", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_AUTO, INT_MIN, INT_MAX, V|D, "aa"},
{"fastint", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_FASTINT, INT_MIN, INT_MAX, V|D, "aa"},
{"int", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_INT, INT_MIN, INT_MAX, V|D, "aa"},
{"float", NULL, 0, FF_OPT_TYPE_CONST, FF_AA_FLOAT, INT_MIN, INT_MAX, V|D, "aa"},
{"qns", "quantizer noise shaping", OFFSET(quantizer_noise_shaping), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"thread_count", NULL, OFFSET(thread_count), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E|D},
+{"threads", NULL, OFFSET(thread_count), FF_OPT_TYPE_INT, 1, INT_MIN, INT_MAX, V|E|D},
{"me_threshold", "motion estimaton threshold", OFFSET(me_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"mb_threshold", NULL, OFFSET(mb_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX},
-{"dc", NULL, OFFSET(intra_dc_precision), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"mb_threshold", "macroblock threshold", OFFSET(mb_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"dc", "intra_dc_precision", OFFSET(intra_dc_precision), FF_OPT_TYPE_INT, 0, INT_MIN, INT_MAX, V|E},
{"nssew", "nsse weight", OFFSET(nsse_weight), FF_OPT_TYPE_INT, 8, INT_MIN, INT_MAX, V|E},
{"skip_top", NULL, OFFSET(skip_top), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D},
{"skip_bottom", NULL, OFFSET(skip_bottom), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|D},
@@ -705,24 +676,24 @@ static const AVOption options[]={
{"level", NULL, OFFSET(level), FF_OPT_TYPE_INT, FF_LEVEL_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "level"},
{"unknown", NULL, 0, FF_OPT_TYPE_CONST, FF_LEVEL_UNKNOWN, INT_MIN, INT_MAX, V|A|E, "level"},
{"lowres", NULL, OFFSET(lowres), FF_OPT_TYPE_INT, 0, 0, INT_MAX, V|D},
-{"frame_skip_threshold", NULL, OFFSET(frame_skip_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"frame_skip_factor", NULL, OFFSET(frame_skip_factor), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"frame_skip_exp", NULL, OFFSET(frame_skip_exp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"skip_threshold", "frame skip threshold", OFFSET(frame_skip_threshold), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"skip_factor", "frame skip factor", OFFSET(frame_skip_factor), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"skip_exp", "frame skip exponent", OFFSET(frame_skip_exp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"skipcmp", "frame skip compare function", OFFSET(frame_skip_cmp), FF_OPT_TYPE_INT, FF_CMP_DCTMAX, INT_MIN, INT_MAX, V|E, "cmp_func"},
-{"border_mask", NULL, OFFSET(border_masking), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
-{"mb_lmin", NULL, OFFSET(mb_lmin), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"mb_lmax", NULL, OFFSET(mb_lmax), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"me_penalty_compensation", NULL, OFFSET(me_penalty_compensation), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"border_mask", NULL, OFFSET(border_masking), FF_OPT_TYPE_FLOAT, DEFAULT, -FLT_MAX, FLT_MAX, V|E},
+{"mblmin", "min macroblock lagrange factor (VBR)", OFFSET(mb_lmin), FF_OPT_TYPE_INT, FF_QP2LAMBDA * 2, 1, FF_LAMBDA_MAX, V|E},
+{"mblmax", "max macroblock lagrange factor (VBR)", OFFSET(mb_lmax), FF_OPT_TYPE_INT, FF_QP2LAMBDA * 31, 1, FF_LAMBDA_MAX, V|E},
+{"mepc", "motion estimation bitrate penalty compensation (1.0 = 256)", OFFSET(me_penalty_compensation), FF_OPT_TYPE_INT, 256, INT_MIN, INT_MAX, V|E},
{"bidir_refine", NULL, OFFSET(bidir_refine), FF_OPT_TYPE_INT, DEFAULT, 0, 4, V|E},
{"brd_scale", NULL, OFFSET(brd_scale), FF_OPT_TYPE_INT, DEFAULT, 0, 10, V|E},
-{"crf", NULL, OFFSET(crf), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"cqp", NULL, OFFSET(cqp), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"keyint_min", NULL, OFFSET(keyint_min), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
-{"refs", NULL, OFFSET(refs), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"crf", NULL, OFFSET(crf), FF_OPT_TYPE_FLOAT, DEFAULT, 0, 51, V|E},
+{"cqp", NULL, OFFSET(cqp), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, V|E},
+{"keyint_min", NULL, OFFSET(keyint_min), FF_OPT_TYPE_INT, 25, INT_MIN, INT_MAX, V|E},
+{"refs", NULL, OFFSET(refs), FF_OPT_TYPE_INT, 1, INT_MIN, INT_MAX, V|E},
{"chromaoffset", NULL, OFFSET(chromaoffset), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"bframebias", NULL, OFFSET(bframebias), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"trellis", NULL, OFFSET(trellis), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|A|E},
-{"directpred", NULL, OFFSET(directpred), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
+{"directpred", NULL, OFFSET(directpred), FF_OPT_TYPE_INT, 2, INT_MIN, INT_MAX, V|E},
{"bpyramid", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_BPYRAMID, INT_MIN, INT_MAX, V|E, "flags2"},
{"wpred", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_WPRED, INT_MIN, INT_MAX, V|E, "flags2"},
{"mixed_refs", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_MIXED_REFS, INT_MIN, INT_MAX, V|E, "flags2"},
@@ -730,7 +701,7 @@ static const AVOption options[]={
{"fastpskip", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_FASTPSKIP, INT_MIN, INT_MAX, V|E, "flags2"},
{"aud", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_AUD, INT_MIN, INT_MAX, V|E, "flags2"},
{"brdo", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_BRDO, INT_MIN, INT_MAX, V|E, "flags2"},
-{"complexityblur", NULL, OFFSET(complexityblur), FF_OPT_TYPE_FLOAT, DEFAULT, FLT_MIN, FLT_MAX, V|E},
+{"complexityblur", NULL, OFFSET(complexityblur), FF_OPT_TYPE_FLOAT, 20.0, FLT_MIN, FLT_MAX, V|E},
{"deblockalpha", NULL, OFFSET(deblockalpha), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"deblockbeta", NULL, OFFSET(deblockbeta), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, V|E},
{"partitions", NULL, OFFSET(partitions), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, V|E, "partitions"},
@@ -751,11 +722,17 @@ static const AVOption options[]={
{"prediction_order_method", NULL, OFFSET(prediction_order_method), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
{"min_partition_order", NULL, OFFSET(min_partition_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
{"max_partition_order", NULL, OFFSET(max_partition_order), FF_OPT_TYPE_INT, -1, INT_MIN, INT_MAX, A|E},
+{"timecode_frame_start", NULL, OFFSET(timecode_frame_start), FF_OPT_TYPE_INT, 0, 0, INT_MAX, V|E},
+{"drop_frame_timecode", NULL, 0, FF_OPT_TYPE_CONST, CODEC_FLAG2_DROP_FRAME_TIMECODE, INT_MIN, INT_MAX, V|E, "flags2"},
{NULL},
};
#undef A
#undef V
+#undef S
+#undef E
+#undef D
+#undef DEFAULT
static AVClass av_codec_context_class = { "AVCodecContext", context_to_name, options };
@@ -763,60 +740,19 @@ void avcodec_get_context_defaults(AVCodecContext *s){
memset(s, 0, sizeof(AVCodecContext));
s->av_class= &av_codec_context_class;
- s->bit_rate= 800*1000;
- s->bit_rate_tolerance= s->bit_rate*10;
- s->qmin= 2;
- s->qmax= 31;
- s->mb_lmin= FF_QP2LAMBDA * 2;
- s->mb_lmax= FF_QP2LAMBDA * 31;
+
+ av_opt_set_defaults(s);
+
s->rc_eq= "tex^qComp";
- s->cqp = -1;
- s->refs = 1;
- s->directpred = 2;
- s->qcompress= 0.5;
- s->complexityblur = 20.0;
- s->keyint_min = 25;
- s->flags2 = CODEC_FLAG2_FASTPSKIP;
- s->max_qdiff= 3;
- s->b_quant_factor=1.25;
- s->b_quant_offset=1.25;
- s->i_quant_factor=-0.8;
- s->i_quant_offset=0.0;
- s->error_concealment= 3;
- s->error_resilience= 1;
- s->workaround_bugs= FF_BUG_AUTODETECT;
s->time_base= (AVRational){0,1};
- s->gop_size= 50;
- s->me_method= ME_EPZS;
s->get_buffer= avcodec_default_get_buffer;
s->release_buffer= avcodec_default_release_buffer;
s->get_format= avcodec_default_get_format;
s->execute= avcodec_default_execute;
- s->thread_count=1;
- s->me_subpel_quality=8;
- s->lmin= FF_QP2LAMBDA * s->qmin;
- s->lmax= FF_QP2LAMBDA * s->qmax;
s->sample_aspect_ratio= (AVRational){0,1};
- s->ildct_cmp= FF_CMP_VSAD;
- s->profile= FF_PROFILE_UNKNOWN;
- s->level= FF_LEVEL_UNKNOWN;
- s->me_penalty_compensation= 256;
s->pix_fmt= PIX_FMT_NONE;
- s->frame_skip_cmp= FF_CMP_DCTMAX;
- s->nsse_weight= 8;
s->sample_fmt= SAMPLE_FMT_S16; // FIXME: set to NONE
- s->mv0_threshold= 256;
- s->b_sensitivity= 40;
- s->compression_level = FF_COMPRESSION_DEFAULT;
- s->use_lpc = -1;
- s->min_prediction_order = -1;
- s->max_prediction_order = -1;
- s->prediction_order_method = -1;
- s->min_partition_order = -1;
- s->max_partition_order = -1;
-
- s->intra_quant_bias= FF_DEFAULT_QUANT_BIAS;
- s->inter_quant_bias= FF_DEFAULT_QUANT_BIAS;
+
s->palctrl = NULL;
s->reget_buffer= avcodec_default_reget_buffer;
}
diff --git a/src/libffmpeg/libavcodec/vc1.c b/contrib/ffmpeg/libavcodec/vc1.c
index 731baa4dc..cd3c0c2d6 100644
--- a/src/libffmpeg/libavcodec/vc1.c
+++ b/contrib/ffmpeg/libavcodec/vc1.c
@@ -3,18 +3,20 @@
* Copyright (c) 2006 Konstantin Shishkov
* Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -112,7 +114,7 @@ enum MVModes {
enum BMVTypes {
BMV_TYPE_BACKWARD,
BMV_TYPE_FORWARD,
- BMV_TYPE_INTERPOLATED = 3 //XXX: ??
+ BMV_TYPE_INTERPOLATED
};
//@}
@@ -209,6 +211,16 @@ enum CodingSet {
CS_HIGH_RATE_INTER
};
+/** @name Overlap conditions for Advanced Profile */
+//@{
+enum COTypes {
+ CONDOVER_NONE = 0,
+ CONDOVER_ALL,
+ CONDOVER_SELECT
+};
+//@}
+
+
/** The VC1 Context
* @fixme Change size wherever another size is more efficient
* Many members are only used for Advanced Profile
@@ -246,6 +258,7 @@ typedef struct VC1Context{
int matrix_coef; ///< 8bits, Color primaries->YCbCr transform matrix
int hrd_param_flag; ///< Presence of Hypothetical Reference
///< Decoder parameters
+ int psf; ///< Progressive Segmented Frame
//@}
/** Sequence header data for all Profiles
@@ -321,6 +334,7 @@ typedef struct VC1Context{
int dmb_is_raw; ///< direct mb plane is raw
int skip_is_raw; ///< skip mb plane is not coded
uint8_t luty[256], lutuv[256]; // lookup tables used for intensity compensation
+ int use_ic; ///< use intensity compensation in B-frames
int rnd; ///< rounding control
/** Frame decoding info for S/M profiles only */
@@ -344,8 +358,10 @@ typedef struct VC1Context{
int hrd_num_leaky_buckets;
uint8_t bit_rate_exponent;
uint8_t buffer_size_exponent;
-// BitPlane ac_pred_plane; ///< AC prediction flags bitplane
-// BitPlane over_flags_plane; ///< Overflags bitplane
+ uint8_t* acpred_plane; ///< AC prediction flags bitplane
+ int acpred_is_raw;
+ uint8_t* over_flags_plane; ///< Overflags bitplane
+ int overflg_is_raw;
uint8_t condover;
uint16_t *hrd_rate, *hrd_buffer;
uint8_t *hrd_fullness;
@@ -354,6 +370,9 @@ typedef struct VC1Context{
uint8_t range_mapy;
uint8_t range_mapuv;
//@}
+
+ int p_frame_skipped;
+ int bi_type;
} VC1Context;
/**
@@ -546,7 +565,6 @@ static void decode_colskip(uint8_t* plane, int width, int height, int stride, Ge
* @param v VC-1 context for bit reading and logging
* @return Status
* @fixme FIXME: Optimize
- * @todo TODO: Decide if a struct is needed
*/
static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
{
@@ -718,7 +736,6 @@ static int vop_dquant_decoding(VC1Context *v)
}
/** Put block onto picture
- * @todo move to DSPContext
*/
static void vc1_put_block(VC1Context *v, DCTELEM block[6][64])
{
@@ -763,8 +780,14 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
if(!v->s.last_picture.data[0])return;
- mx = s->mv[0][0][0];
- my = s->mv[0][0][1];
+ mx = s->mv[dir][0][0];
+ my = s->mv[dir][0][1];
+
+ // store motion vectors for further use in B frames
+ if(s->pict_type == P_TYPE) {
+ s->current_picture.motion_val[1][s->block_index[0]][0] = mx;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = my;
+ }
uvmx = (mx + ((mx & 3) == 3)) >> 1;
uvmy = (my + ((my & 3) == 3)) >> 1;
if(!dir) {
@@ -856,8 +879,8 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
}
if(v->fastuvmc) {
- uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
- uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
+ uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
+ uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
}
if(s->mspel) {
@@ -867,33 +890,26 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
srcY += s->linesize * 8;
dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
- } else if(!s->quarter_sample) { // hpel mc
- mx >>= 1;
- my >>= 1;
- dxy = ((my & 1) << 1) | (mx & 1);
+ } else { // hpel mc - always used for luma
+ dxy = (my & 2) | ((mx & 2) >> 1);
if(!v->rnd)
dsp->put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
else
dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
- } else {
- dxy = ((my & 3) << 2) | (mx & 3);
-
- if(!v->rnd)
- dsp->put_qpel_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize);
- else
- dsp->put_no_rnd_qpel_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize);
}
if(s->flags & CODEC_FLAG_GRAY) return;
- /* Chroma MC always uses qpel blilinear */
+ /* Chroma MC always uses qpel bilinear */
uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
+ uvmx = (uvmx&3)<<1;
+ uvmy = (uvmy&3)<<1;
if(!v->rnd){
- dsp->put_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
- dsp->put_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
+ dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
}else{
- dsp->put_no_rnd_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
- dsp->put_no_rnd_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
+ dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
}
}
@@ -922,8 +938,9 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n)
srcY += src_y * s->linesize + src_x;
- if(v->rangeredfrm || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel
- || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel){
+ if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
+ || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 8 - s->mspel*2
+ || (unsigned)(src_y - s->mspel) > s->v_edge_pos - (my&3) - 8 - s->mspel*2){
srcY -= s->mspel * (1 + s->linesize);
ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9+s->mspel*2, 9+s->mspel*2,
src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
@@ -939,28 +956,29 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n)
src += s->linesize;
}
}
+ /* if we deal with intensity compensation we need to scale source blocks */
+ if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
+ int i, j;
+ uint8_t *src;
+
+ src = srcY;
+ for(j = 0; j < 9 + s->mspel*2; j++) {
+ for(i = 0; i < 9 + s->mspel*2; i++) src[i] = v->luty[src[i]];
+ src += s->linesize;
+ }
+ }
srcY += s->mspel * (1 + s->linesize);
}
if(s->mspel) {
dxy = ((my & 3) << 2) | (mx & 3);
dsp->put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, v->rnd);
- } else if(!s->quarter_sample) { // hpel mc
- mx >>= 1;
- my >>= 1;
- dxy = ((my & 1) << 1) | (mx & 1);
-
+ } else { // hpel mc - always used for luma
+ dxy = (my & 2) | ((mx & 2) >> 1);
if(!v->rnd)
dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
else
dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
- } else {
- dxy = ((my & 3) << 2) | (mx & 3);
-
- if(!v->rnd)
- dsp->put_qpel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize);
- else
- dsp->put_no_rnd_qpel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize);
}
}
@@ -1030,6 +1048,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v)
} else
return; //no need to do MC for inter blocks
+ s->current_picture.motion_val[1][s->block_index[0]][0] = tx;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = ty;
uvmx = (tx + ((tx&3) == 3)) >> 1;
uvmy = (ty + ((ty&3) == 3)) >> 1;
@@ -1040,7 +1060,8 @@ static void vc1_mc_4mv_chroma(VC1Context *v)
uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
srcU = s->last_picture.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
srcV = s->last_picture.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
- if(v->rangeredfrm || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
+ if(v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP)
+ || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
|| (unsigned)uvsrc_y > (s->v_edge_pos >> 1) - 9){
ff_emulated_edge_mc(s->edge_emu_buffer , srcU, s->uvlinesize, 8+1, 8+1,
uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
@@ -1064,24 +1085,43 @@ static void vc1_mc_4mv_chroma(VC1Context *v)
src2 += s->uvlinesize;
}
}
+ /* if we deal with intensity compensation we need to scale source blocks */
+ if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
+ int i, j;
+ uint8_t *src, *src2;
+
+ src = srcU; src2 = srcV;
+ for(j = 0; j < 9; j++) {
+ for(i = 0; i < 9; i++) {
+ src[i] = v->lutuv[src[i]];
+ src2[i] = v->lutuv[src2[i]];
+ }
+ src += s->uvlinesize;
+ src2 += s->uvlinesize;
+ }
+ }
}
if(v->fastuvmc) {
- uvmx = uvmx + ((uvmx<0)?(uvmx&1):-(uvmx&1));
- uvmy = uvmy + ((uvmy<0)?(uvmy&1):-(uvmy&1));
+ uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
+ uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
}
- /* Chroma MC always uses qpel blilinear */
+ /* Chroma MC always uses qpel bilinear */
uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
+ uvmx = (uvmx&3)<<1;
+ uvmy = (uvmy&3)<<1;
if(!v->rnd){
- dsp->put_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
- dsp->put_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
+ dsp->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
}else{
- dsp->put_no_rnd_qpel_pixels_tab[1][uvdxy](s->dest[1], srcU, s->uvlinesize);
- dsp->put_no_rnd_qpel_pixels_tab[1][uvdxy](s->dest[2], srcV, s->uvlinesize);
+ dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->put_no_rnd_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
}
}
+static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
+
/**
* Decode Simple/Main Profiles sequence header
* @see Figure 7-8, p16-17
@@ -1093,7 +1133,7 @@ static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
{
VC1Context *v = avctx->priv_data;
- av_log(avctx, AV_LOG_INFO, "Header: %0X\n", show_bits(gb, 32));
+ av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits(gb, 32));
v->profile = get_bits(gb, 2);
if (v->profile == 2)
{
@@ -1103,18 +1143,7 @@ static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
if (v->profile == PROFILE_ADVANCED)
{
- v->level = get_bits(gb, 3);
- if(v->level >= 5)
- {
- av_log(avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
- }
- v->chromaformat = get_bits(gb, 2);
- if (v->chromaformat != 1)
- {
- av_log(avctx, AV_LOG_ERROR,
- "Only 4:2:0 chroma format supported\n");
- return -1;
- }
+ return decode_sequence_header_adv(v, gb);
}
else
{
@@ -1138,23 +1167,20 @@ static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
"LOOPFILTER shell not be enabled in simple profile\n");
}
- if (v->profile < PROFILE_ADVANCED)
+ v->res_x8 = get_bits(gb, 1); //reserved
+ if (v->res_x8)
{
- v->res_x8 = get_bits(gb, 1); //reserved
- if (v->res_x8)
- {
- av_log(avctx, AV_LOG_ERROR,
- "1 for reserved RES_X8 is forbidden\n");
- //return -1;
- }
- v->multires = get_bits(gb, 1);
- v->res_fasttx = get_bits(gb, 1);
- if (!v->res_fasttx)
- {
- av_log(avctx, AV_LOG_ERROR,
- "0 for reserved RES_FASTTX is forbidden\n");
- //return -1;
- }
+ av_log(avctx, AV_LOG_ERROR,
+ "1 for reserved RES_X8 is forbidden\n");
+ //return -1;
+ }
+ v->multires = get_bits(gb, 1);
+ v->res_fasttx = get_bits(gb, 1);
+ if (!v->res_fasttx)
+ {
+ av_log(avctx, AV_LOG_ERROR,
+ "0 for reserved RES_FASTTX is forbidden\n");
+ //return -1;
}
v->fastuvmc = get_bits(gb, 1); //common
@@ -1174,44 +1200,38 @@ static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
v->dquant = get_bits(gb, 2); //common
v->vstransform = get_bits(gb, 1); //common
- if (v->profile < PROFILE_ADVANCED)
+ v->res_transtab = get_bits(gb, 1);
+ if (v->res_transtab)
{
- v->res_transtab = get_bits(gb, 1);
- if (v->res_transtab)
- {
- av_log(avctx, AV_LOG_ERROR,
- "1 for reserved RES_TRANSTAB is forbidden\n");
- return -1;
- }
+ av_log(avctx, AV_LOG_ERROR,
+ "1 for reserved RES_TRANSTAB is forbidden\n");
+ return -1;
}
v->overlap = get_bits(gb, 1); //common
- if (v->profile < PROFILE_ADVANCED)
+ v->s.resync_marker = get_bits(gb, 1);
+ v->rangered = get_bits(gb, 1);
+ if (v->rangered && v->profile == PROFILE_SIMPLE)
{
- v->s.resync_marker = get_bits(gb, 1);
- v->rangered = get_bits(gb, 1);
- if (v->rangered && v->profile == PROFILE_SIMPLE)
- {
- av_log(avctx, AV_LOG_INFO,
- "RANGERED should be set to 0 in simple profile\n");
- }
+ av_log(avctx, AV_LOG_INFO,
+ "RANGERED should be set to 0 in simple profile\n");
}
v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
v->quantizer_mode = get_bits(gb, 2); //common
- if (v->profile < PROFILE_ADVANCED)
+ v->finterpflag = get_bits(gb, 1); //common
+ v->res_rtm_flag = get_bits(gb, 1); //reserved
+ if (!v->res_rtm_flag)
{
- v->finterpflag = get_bits(gb, 1); //common
- v->res_rtm_flag = get_bits(gb, 1); //reserved
- if (!v->res_rtm_flag)
- {
- av_log(avctx, AV_LOG_ERROR,
- "0 for reserved RES_RTM_FLAG is forbidden\n");
- //return -1;
- }
- av_log(avctx, AV_LOG_DEBUG,
+// av_log(avctx, AV_LOG_ERROR,
+// "0 for reserved RES_RTM_FLAG is forbidden\n");
+ av_log(avctx, AV_LOG_ERROR,
+ "Old WMV3 version detected, only I-frames will be decoded\n");
+ //return -1;
+ }
+ av_log(avctx, AV_LOG_DEBUG,
"Profile %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
"LoopFilter=%i, MultiRes=%i, FastUVMC=%i, Extended MV=%i\n"
"Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
@@ -1221,11 +1241,128 @@ static int decode_sequence_header(AVCodecContext *avctx, GetBitContext *gb)
v->rangered, v->vstransform, v->overlap, v->s.resync_marker,
v->dquant, v->quantizer_mode, avctx->max_b_frames
);
- return 0;
+ return 0;
+}
+
+static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
+{
+ v->res_rtm_flag = 1;
+ v->level = get_bits(gb, 3);
+ if(v->level >= 5)
+ {
+ av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
}
- return -1;
+ v->chromaformat = get_bits(gb, 2);
+ if (v->chromaformat != 1)
+ {
+ av_log(v->s.avctx, AV_LOG_ERROR,
+ "Only 4:2:0 chroma format supported\n");
+ return -1;
+ }
+
+ // (fps-2)/4 (->30)
+ v->frmrtq_postproc = get_bits(gb, 3); //common
+ // (bitrate-32kbps)/64kbps
+ v->bitrtq_postproc = get_bits(gb, 5); //common
+ v->postprocflag = get_bits(gb, 1); //common
+
+ v->s.avctx->coded_width = (get_bits(gb, 12) + 1) << 1;
+ v->s.avctx->coded_height = (get_bits(gb, 12) + 1) << 1;
+ v->broadcast = get_bits1(gb);
+ v->interlace = get_bits1(gb);
+ v->tfcntrflag = get_bits1(gb);
+ v->finterpflag = get_bits1(gb);
+ get_bits1(gb); // reserved
+ v->psf = get_bits1(gb);
+ if(v->psf) { //PsF, 6.1.13
+ av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
+ return -1;
+ }
+ if(get_bits1(gb)) { //Display Info - decoding is not affected by it
+ int w, h, ar = 0;
+ av_log(v->s.avctx, AV_LOG_INFO, "Display extended info:\n");
+ w = get_bits(gb, 14);
+ h = get_bits(gb, 14);
+ av_log(v->s.avctx, AV_LOG_INFO, "Display dimensions: %ix%i\n", w, h);
+ //TODO: store aspect ratio in AVCodecContext
+ if(get_bits1(gb))
+ ar = get_bits(gb, 4);
+ if(ar == 15) {
+ w = get_bits(gb, 8);
+ h = get_bits(gb, 8);
+ }
+
+ if(get_bits1(gb)){ //framerate stuff
+ if(get_bits1(gb)) {
+ get_bits(gb, 16);
+ } else {
+ get_bits(gb, 8);
+ get_bits(gb, 4);
+ }
+ }
+
+ if(get_bits1(gb)){
+ v->color_prim = get_bits(gb, 8);
+ v->transfer_char = get_bits(gb, 8);
+ v->matrix_coef = get_bits(gb, 8);
+ }
+ }
+
+ v->hrd_param_flag = get_bits1(gb);
+ if(v->hrd_param_flag) {
+ int i;
+ v->hrd_num_leaky_buckets = get_bits(gb, 5);
+ get_bits(gb, 4); //bitrate exponent
+ get_bits(gb, 4); //buffer size exponent
+ for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
+ get_bits(gb, 16); //hrd_rate[n]
+ get_bits(gb, 16); //hrd_buffer[n]
+ }
+ }
+ return 0;
}
+static int decode_entry_point(AVCodecContext *avctx, GetBitContext *gb)
+{
+ VC1Context *v = avctx->priv_data;
+ int i;
+
+ av_log(avctx, AV_LOG_DEBUG, "Entry point: %08X\n", show_bits_long(gb, 32));
+ get_bits1(gb); // broken link
+ avctx->max_b_frames = 1 - get_bits1(gb); // 'closed entry' also signalize possible B-frames
+ v->panscanflag = get_bits1(gb);
+ get_bits1(gb); // refdist flag
+ v->s.loop_filter = get_bits1(gb);
+ v->fastuvmc = get_bits1(gb);
+ v->extended_mv = get_bits1(gb);
+ v->dquant = get_bits(gb, 2);
+ v->vstransform = get_bits1(gb);
+ v->overlap = get_bits1(gb);
+ v->quantizer_mode = get_bits(gb, 2);
+
+ if(v->hrd_param_flag){
+ for(i = 0; i < v->hrd_num_leaky_buckets; i++) {
+ get_bits(gb, 8); //hrd_full[n]
+ }
+ }
+
+ if(get_bits1(gb)){
+ avctx->coded_width = (get_bits(gb, 12)+1)<<1;
+ avctx->coded_height = (get_bits(gb, 12)+1)<<1;
+ }
+ if(v->extended_mv)
+ v->extended_dmv = get_bits1(gb);
+ if(get_bits1(gb)) {
+ av_log(avctx, AV_LOG_ERROR, "Luma scaling is not supported, expect wrong picture\n");
+ skip_bits(gb, 3); // Y range, ignored for now
+ }
+ if(get_bits1(gb)) {
+ av_log(avctx, AV_LOG_ERROR, "Chroma scaling is not supported, expect wrong picture\n");
+ skip_bits(gb, 3); // UV range, ignored for now
+ }
+
+ return 0;
+}
static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
{
@@ -1243,18 +1380,19 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
} else v->s.pict_type = P_TYPE;
} else v->s.pict_type = v->s.pict_type ? P_TYPE : I_TYPE;
- if(v->s.pict_type == I_TYPE)
- get_bits(gb, 7); // skip buffer fullness
+ v->bi_type = 0;
if(v->s.pict_type == B_TYPE) {
v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
v->bfraction = vc1_bfraction_lut[v->bfraction];
- if(v->bfraction == -1) {
+ if(v->bfraction == 0) {
v->s.pict_type = BI_TYPE;
}
}
+ if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
+ get_bits(gb, 7); // skip buffer fullness
/* calculate RND */
- if(v->s.pict_type == I_TYPE)
+ if(v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
v->rnd = 1;
if(v->s.pict_type == P_TYPE)
v->rnd ^= 1;
@@ -1292,7 +1430,8 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
//av_log(v->s.avctx, AV_LOG_INFO, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
// (v->s.pict_type == P_TYPE) ? 'P' : ((v->s.pict_type == I_TYPE) ? 'I' : 'B'), pqindex, v->pq, v->halfpq, v->rangeredfrm);
- //TODO: complete parsing for P/B/BI frames
+ if(v->s.pict_type == I_TYPE || v->s.pict_type == P_TYPE) v->use_ic = 0;
+
switch(v->s.pict_type) {
case P_TYPE:
if (v->pq < 5) v->tt_index = 0;
@@ -1307,6 +1446,7 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
v->lumscale = get_bits(gb, 6);
v->lumshift = get_bits(gb, 6);
+ v->use_ic = 1;
/* fill lookup tables for intensity compensation */
if(!v->lumscale) {
scale = -64;
@@ -1428,6 +1568,261 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
/* DC Syntax */
v->s.dc_table_index = get_bits(gb, 1);
+ if(v->s.pict_type == BI_TYPE) {
+ v->s.pict_type = B_TYPE;
+ v->bi_type = 1;
+ }
+ return 0;
+}
+
+static int vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
+{
+ int fcm;
+ int pqindex, lowquant;
+ int status;
+
+ v->p_frame_skipped = 0;
+
+ if(v->interlace)
+ fcm = decode012(gb);
+ switch(get_prefix(gb, 0, 4)) {
+ case 0:
+ v->s.pict_type = P_TYPE;
+ break;
+ case 1:
+ v->s.pict_type = B_TYPE;
+ break;
+ case 2:
+ v->s.pict_type = I_TYPE;
+ break;
+ case 3:
+ v->s.pict_type = BI_TYPE;
+ break;
+ case 4:
+ v->s.pict_type = P_TYPE; // skipped pic
+ v->p_frame_skipped = 1;
+ return 0;
+ }
+ if(v->tfcntrflag)
+ get_bits(gb, 8);
+ if(v->broadcast) {
+ if(!v->interlace || v->panscanflag) {
+ get_bits(gb, 2);
+ } else {
+ get_bits1(gb);
+ get_bits1(gb);
+ }
+ }
+ if(v->panscanflag) {
+ //...
+ }
+ v->rnd = get_bits1(gb);
+ if(v->interlace)
+ v->uvsamp = get_bits1(gb);
+ if(v->finterpflag) v->interpfrm = get_bits(gb, 1);
+ if(v->s.pict_type == B_TYPE) {
+ v->bfraction = get_vlc2(gb, vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
+ v->bfraction = vc1_bfraction_lut[v->bfraction];
+ if(v->bfraction == 0) {
+ v->s.pict_type = BI_TYPE; /* XXX: should not happen here */
+ }
+ }
+ pqindex = get_bits(gb, 5);
+ v->pqindex = pqindex;
+ if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
+ v->pq = pquant_table[0][pqindex];
+ else
+ v->pq = pquant_table[1][pqindex];
+
+ v->pquantizer = 1;
+ if (v->quantizer_mode == QUANT_FRAME_IMPLICIT)
+ v->pquantizer = pqindex < 9;
+ if (v->quantizer_mode == QUANT_NON_UNIFORM)
+ v->pquantizer = 0;
+ v->pqindex = pqindex;
+ if (pqindex < 9) v->halfpq = get_bits(gb, 1);
+ else v->halfpq = 0;
+ if (v->quantizer_mode == QUANT_FRAME_EXPLICIT)
+ v->pquantizer = get_bits(gb, 1);
+
+ switch(v->s.pict_type) {
+ case I_TYPE:
+ case BI_TYPE:
+ status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ v->condover = CONDOVER_NONE;
+ if(v->overlap && v->pq <= 8) {
+ v->condover = decode012(gb);
+ if(v->condover == CONDOVER_SELECT) {
+ status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ }
+ }
+ break;
+ case P_TYPE:
+ if(v->postprocflag)
+ v->postproc = get_bits1(gb);
+ if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
+ else v->mvrange = 0;
+ v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
+ v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
+ v->range_x = 1 << (v->k_x - 1);
+ v->range_y = 1 << (v->k_y - 1);
+
+ if (v->pq < 5) v->tt_index = 0;
+ else if(v->pq < 13) v->tt_index = 1;
+ else v->tt_index = 2;
+
+ lowquant = (v->pq > 12) ? 0 : 1;
+ v->mv_mode = mv_pmode_table[lowquant][get_prefix(gb, 1, 4)];
+ if (v->mv_mode == MV_PMODE_INTENSITY_COMP)
+ {
+ int scale, shift, i;
+ v->mv_mode2 = mv_pmode_table2[lowquant][get_prefix(gb, 1, 3)];
+ v->lumscale = get_bits(gb, 6);
+ v->lumshift = get_bits(gb, 6);
+ /* fill lookup tables for intensity compensation */
+ if(!v->lumscale) {
+ scale = -64;
+ shift = (255 - v->lumshift * 2) << 6;
+ if(v->lumshift > 31)
+ shift += 128 << 6;
+ } else {
+ scale = v->lumscale + 32;
+ if(v->lumshift > 31)
+ shift = (v->lumshift - 64) << 6;
+ else
+ shift = v->lumshift << 6;
+ }
+ for(i = 0; i < 256; i++) {
+ v->luty[i] = clip_uint8((scale * i + shift + 32) >> 6);
+ v->lutuv[i] = clip_uint8((scale * (i - 128) + 128*64 + 32) >> 6);
+ }
+ }
+ if(v->mv_mode == MV_PMODE_1MV_HPEL || v->mv_mode == MV_PMODE_1MV_HPEL_BILIN)
+ v->s.quarter_sample = 0;
+ else if(v->mv_mode == MV_PMODE_INTENSITY_COMP) {
+ if(v->mv_mode2 == MV_PMODE_1MV_HPEL || v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN)
+ v->s.quarter_sample = 0;
+ else
+ v->s.quarter_sample = 1;
+ } else
+ v->s.quarter_sample = 1;
+ v->s.mspel = !(v->mv_mode == MV_PMODE_1MV_HPEL_BILIN || (v->mv_mode == MV_PMODE_INTENSITY_COMP && v->mv_mode2 == MV_PMODE_1MV_HPEL_BILIN));
+
+ if ((v->mv_mode == MV_PMODE_INTENSITY_COMP &&
+ v->mv_mode2 == MV_PMODE_MIXED_MV)
+ || v->mv_mode == MV_PMODE_MIXED_MV)
+ {
+ status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ } else {
+ v->mv_type_is_raw = 0;
+ memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
+ }
+ status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+
+ /* Hopefully this is correct for P frames */
+ v->s.mv_table_index = get_bits(gb, 2); //but using vc1_ tables
+ v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
+ if (v->dquant)
+ {
+ av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+ vop_dquant_decoding(v);
+ }
+
+ v->ttfrm = 0; //FIXME Is that so ?
+ if (v->vstransform)
+ {
+ v->ttmbf = get_bits(gb, 1);
+ if (v->ttmbf)
+ {
+ v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
+ }
+ } else {
+ v->ttmbf = 1;
+ v->ttfrm = TT_8X8;
+ }
+ break;
+ case B_TYPE:
+ if(v->postprocflag)
+ v->postproc = get_bits1(gb);
+ if (v->extended_mv) v->mvrange = get_prefix(gb, 0, 3);
+ else v->mvrange = 0;
+ v->k_x = v->mvrange + 9 + (v->mvrange >> 1); //k_x can be 9 10 12 13
+ v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
+ v->range_x = 1 << (v->k_x - 1);
+ v->range_y = 1 << (v->k_y - 1);
+
+ if (v->pq < 5) v->tt_index = 0;
+ else if(v->pq < 13) v->tt_index = 1;
+ else v->tt_index = 2;
+
+ lowquant = (v->pq > 12) ? 0 : 1;
+ v->mv_mode = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
+ v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
+ v->s.mspel = v->s.quarter_sample;
+
+ status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+ status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+ if (status < 0) return -1;
+ av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+ "Imode: %i, Invert: %i\n", status>>1, status&1);
+
+ v->s.mv_table_index = get_bits(gb, 2);
+ v->cbpcy_vlc = &vc1_cbpcy_p_vlc[get_bits(gb, 2)];
+
+ if (v->dquant)
+ {
+ av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+ vop_dquant_decoding(v);
+ }
+
+ v->ttfrm = 0;
+ if (v->vstransform)
+ {
+ v->ttmbf = get_bits(gb, 1);
+ if (v->ttmbf)
+ {
+ v->ttfrm = ttfrm_to_tt[get_bits(gb, 2)];
+ }
+ } else {
+ v->ttmbf = 1;
+ v->ttfrm = TT_8X8;
+ }
+ break;
+ }
+
+ /* AC Syntax */
+ v->c_ac_table_index = decode012(gb);
+ if (v->s.pict_type == I_TYPE || v->s.pict_type == BI_TYPE)
+ {
+ v->y_ac_table_index = decode012(gb);
+ }
+ /* DC Syntax */
+ v->s.dc_table_index = get_bits(gb, 1);
+ if (v->s.pict_type == I_TYPE && v->dquant) {
+ av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+ vop_dquant_decoding(v);
+ }
+
+ v->bi_type = 0;
+ if(v->s.pict_type == BI_TYPE) {
+ v->s.pict_type = B_TYPE;
+ v->bi_type = 1;
+ }
return 0;
}
@@ -1435,15 +1830,12 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
/**
* @defgroup block VC-1 Block-level functions
* @see 7.1.4, p91 and 8.1.1.7, p(1)04
- * @todo TODO: Integrate to MpegEncContext facilities
* @{
*/
/**
* @def GET_MQUANT
* @brief Get macroblock-level quantizer scale
- * @warning XXX: qdiff to the frame quant, not previous quant ?
- * @fixme XXX: Don't know how to initialize mquant otherwise in last case
*/
#define GET_MQUANT() \
if (v->dquantfrm) \
@@ -1484,7 +1876,6 @@ static int vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
* @see MVDATA decoding from 8.3.5.2, p(1)20
* @param _dmv_x Horizontal differential for decoded MV
* @param _dmv_y Vertical differential for decoded MV
- * @todo TODO: Use MpegEncContext arrays to store them
*/
#define GET_MVDATA(_dmv_x, _dmv_y) \
index = 1 + get_vlc2(gb, vc1_mv_diff_vlc[s->mv_table_index].table,\
@@ -1615,9 +2006,9 @@ static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, i
/* Calculate hybrid prediction as specified in 8.3.5.3.5 */
if((!s->first_slice_line || (n==2 || n==3)) && (s->mb_x || (n==1 || n==3))) {
if(is_intra[xy - wrap])
- sum = ABS(px) + ABS(py);
+ sum = FFABS(px) + FFABS(py);
else
- sum = ABS(px - A[0]) + ABS(py - A[1]);
+ sum = FFABS(px - A[0]) + FFABS(py - A[1]);
if(sum > 32) {
if(get_bits1(&s->gb)) {
px = A[0];
@@ -1628,9 +2019,9 @@ static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, i
}
} else {
if(is_intra[xy - 1])
- sum = ABS(px) + ABS(py);
+ sum = FFABS(px) + FFABS(py);
else
- sum = ABS(px - C[0]) + ABS(py - C[1]);
+ sum = FFABS(px - C[0]) + FFABS(py - C[1]);
if(sum > 32) {
if(get_bits1(&s->gb)) {
px = A[0];
@@ -1655,44 +2046,347 @@ static inline void vc1_pred_mv(MpegEncContext *s, int n, int dmv_x, int dmv_y, i
}
}
+/** Motion compensation for direct or interpolated blocks in B-frames
+ */
+static void vc1_interp_mc(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+ DSPContext *dsp = &v->s.dsp;
+ uint8_t *srcY, *srcU, *srcV;
+ int dxy, uvdxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
+
+ if(!v->s.next_picture.data[0])return;
+
+ mx = s->mv[1][0][0];
+ my = s->mv[1][0][1];
+ uvmx = (mx + ((mx & 3) == 3)) >> 1;
+ uvmy = (my + ((my & 3) == 3)) >> 1;
+ srcY = s->next_picture.data[0];
+ srcU = s->next_picture.data[1];
+ srcV = s->next_picture.data[2];
+
+ src_x = s->mb_x * 16 + (mx >> 2);
+ src_y = s->mb_y * 16 + (my >> 2);
+ uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
+ uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
+
+ src_x = clip( src_x, -16, s->mb_width * 16);
+ src_y = clip( src_y, -16, s->mb_height * 16);
+ uvsrc_x = clip(uvsrc_x, -8, s->mb_width * 8);
+ uvsrc_y = clip(uvsrc_y, -8, s->mb_height * 8);
+
+ srcY += src_y * s->linesize + src_x;
+ srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
+ srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
+
+ /* for grayscale we should not try to read from unknown area */
+ if(s->flags & CODEC_FLAG_GRAY) {
+ srcU = s->edge_emu_buffer + 18 * s->linesize;
+ srcV = s->edge_emu_buffer + 18 * s->linesize;
+ }
+
+ if(v->rangeredfrm
+ || (unsigned)src_x > s->h_edge_pos - (mx&3) - 16
+ || (unsigned)src_y > s->v_edge_pos - (my&3) - 16){
+ uint8_t *uvbuf= s->edge_emu_buffer + 19 * s->linesize;
+
+ srcY -= s->mspel * (1 + s->linesize);
+ ff_emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 17+s->mspel*2, 17+s->mspel*2,
+ src_x - s->mspel, src_y - s->mspel, s->h_edge_pos, s->v_edge_pos);
+ srcY = s->edge_emu_buffer;
+ ff_emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8+1, 8+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ ff_emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8+1, 8+1,
+ uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, s->v_edge_pos >> 1);
+ srcU = uvbuf;
+ srcV = uvbuf + 16;
+ /* if we deal with range reduction we need to scale source blocks */
+ if(v->rangeredfrm) {
+ int i, j;
+ uint8_t *src, *src2;
+
+ src = srcY;
+ for(j = 0; j < 17 + s->mspel*2; j++) {
+ for(i = 0; i < 17 + s->mspel*2; i++) src[i] = ((src[i] - 128) >> 1) + 128;
+ src += s->linesize;
+ }
+ src = srcU; src2 = srcV;
+ for(j = 0; j < 9; j++) {
+ for(i = 0; i < 9; i++) {
+ src[i] = ((src[i] - 128) >> 1) + 128;
+ src2[i] = ((src2[i] - 128) >> 1) + 128;
+ }
+ src += s->uvlinesize;
+ src2 += s->uvlinesize;
+ }
+ }
+ srcY += s->mspel * (1 + s->linesize);
+ }
+
+ if(v->fastuvmc) {
+ uvmx = uvmx + ((uvmx<0)?-(uvmx&1):(uvmx&1));
+ uvmy = uvmy + ((uvmy<0)?-(uvmy&1):(uvmy&1));
+ }
+
+ mx >>= 1;
+ my >>= 1;
+ dxy = ((my & 1) << 1) | (mx & 1);
+
+ dsp->avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
+
+ if(s->flags & CODEC_FLAG_GRAY) return;
+ /* Chroma MC always uses qpel blilinear */
+ uvdxy = ((uvmy & 3) << 2) | (uvmx & 3);
+ uvmx = (uvmx&3)<<1;
+ uvmy = (uvmy&3)<<1;
+ dsp->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
+ dsp->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
+}
+
+static always_inline int scale_mv(int value, int bfrac, int inv, int qs)
+{
+ int n = bfrac;
+
+#if B_FRACTION_DEN==256
+ if(inv)
+ n -= 256;
+ if(!qs)
+ return 2 * ((value * n + 255) >> 9);
+ return (value * n + 128) >> 8;
+#else
+ if(inv)
+ n -= B_FRACTION_DEN;
+ if(!qs)
+ return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
+ return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
+#endif
+}
+
/** Reconstruct motion vector for B-frame and do motion compensation
*/
static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mode)
{
+ if(v->use_ic) {
+ v->mv_mode2 = v->mv_mode;
+ v->mv_mode = MV_PMODE_INTENSITY_COMP;
+ }
+ if(direct) {
+ vc1_mc_1mv(v, 0);
+ vc1_interp_mc(v);
+ if(v->use_ic) v->mv_mode = v->mv_mode2;
+ return;
+ }
+ if(mode == BMV_TYPE_INTERPOLATED) {
+ vc1_mc_1mv(v, 0);
+ vc1_interp_mc(v);
+ if(v->use_ic) v->mv_mode = v->mv_mode2;
+ return;
+ }
+
+ if(v->use_ic && (mode == BMV_TYPE_BACKWARD)) v->mv_mode = v->mv_mode2;
+ vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
+ if(v->use_ic) v->mv_mode = v->mv_mode2;
+}
+
+static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
+{
MpegEncContext *s = &v->s;
- int mx[4], my[4], mv_x, mv_y;
- int i;
+ int xy, wrap, off = 0;
+ int16_t *A, *B, *C;
+ int px, py;
+ int sum;
+ int r_x, r_y;
+ const uint8_t *is_intra = v->mb_type[0];
+ r_x = v->range_x;
+ r_y = v->range_y;
/* scale MV difference to be quad-pel */
dmv_x[0] <<= 1 - s->quarter_sample;
dmv_y[0] <<= 1 - s->quarter_sample;
dmv_x[1] <<= 1 - s->quarter_sample;
dmv_y[1] <<= 1 - s->quarter_sample;
- if(direct || mode == BMV_TYPE_INTERPOLATED) {
- /* TODO */
+ wrap = s->b8_stride;
+ xy = s->block_index[0];
+
+ if(s->mb_intra) {
+ s->current_picture.motion_val[0][xy][0] =
+ s->current_picture.motion_val[0][xy][1] =
+ s->current_picture.motion_val[1][xy][0] =
+ s->current_picture.motion_val[1][xy][1] = 0;
return;
}
+ s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
+ s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
+ s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
+ s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
+ if(direct) {
+ s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
+ return;
+ }
+
+ if((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
+ C = s->current_picture.motion_val[0][xy - 2];
+ A = s->current_picture.motion_val[0][xy - wrap*2];
+ off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
+ B = s->current_picture.motion_val[0][xy - wrap*2 + off];
- if(mode == BMV_TYPE_BACKWARD) {
- for(i = 0; i < 4; i++) {
- mx[i] = s->last_picture.motion_val[0][s->block_index[i]][0];
- my[i] = s->last_picture.motion_val[0][s->block_index[i]][1];
+ if(!s->first_slice_line) { // predictor A is not out of bounds
+ if(s->mb_width == 1) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = mid_pred(A[0], B[0], C[0]);
+ py = mid_pred(A[1], B[1], C[1]);
+ }
+ } else if(s->mb_x) { // predictor C is not out of bounds
+ px = C[0];
+ py = C[1];
+ } else {
+ px = py = 0;
}
- } else {
- for(i = 0; i < 4; i++) {
- mx[i] = s->next_picture.motion_val[0][s->block_index[i]][0];
- my[i] = s->next_picture.motion_val[0][s->block_index[i]][1];
+ /* Pullback MV as specified in 8.3.5.3.4 */
+ {
+ int qx, qy, X, Y;
+ if(v->profile < PROFILE_ADVANCED) {
+ qx = (s->mb_x << 5);
+ qy = (s->mb_y << 5);
+ X = (s->mb_width << 5) - 4;
+ Y = (s->mb_height << 5) - 4;
+ if(qx + px < -28) px = -28 - qx;
+ if(qy + py < -28) py = -28 - qy;
+ if(qx + px > X) px = X - qx;
+ if(qy + py > Y) py = Y - qy;
+ } else {
+ qx = (s->mb_x << 6);
+ qy = (s->mb_y << 6);
+ X = (s->mb_width << 6) - 4;
+ Y = (s->mb_height << 6) - 4;
+ if(qx + px < -60) px = -60 - qx;
+ if(qy + py < -60) py = -60 - qy;
+ if(qx + px > X) px = X - qx;
+ if(qy + py > Y) py = Y - qy;
+ }
}
+ /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
+ if(0 && !s->first_slice_line && s->mb_x) {
+ if(is_intra[xy - wrap])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - A[0]) + FFABS(py - A[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ } else {
+ if(is_intra[xy - 2])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - C[0]) + FFABS(py - C[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ }
+ }
+ }
+ /* store MV using signed modulus of MV range defined in 4.11 */
+ s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
}
+ if((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
+ C = s->current_picture.motion_val[1][xy - 2];
+ A = s->current_picture.motion_val[1][xy - wrap*2];
+ off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
+ B = s->current_picture.motion_val[1][xy - wrap*2 + off];
+
+ if(!s->first_slice_line) { // predictor A is not out of bounds
+ if(s->mb_width == 1) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = mid_pred(A[0], B[0], C[0]);
+ py = mid_pred(A[1], B[1], C[1]);
+ }
+ } else if(s->mb_x) { // predictor C is not out of bounds
+ px = C[0];
+ py = C[1];
+ } else {
+ px = py = 0;
+ }
+ /* Pullback MV as specified in 8.3.5.3.4 */
+ {
+ int qx, qy, X, Y;
+ if(v->profile < PROFILE_ADVANCED) {
+ qx = (s->mb_x << 5);
+ qy = (s->mb_y << 5);
+ X = (s->mb_width << 5) - 4;
+ Y = (s->mb_height << 5) - 4;
+ if(qx + px < -28) px = -28 - qx;
+ if(qy + py < -28) py = -28 - qy;
+ if(qx + px > X) px = X - qx;
+ if(qy + py > Y) py = Y - qy;
+ } else {
+ qx = (s->mb_x << 6);
+ qy = (s->mb_y << 6);
+ X = (s->mb_width << 6) - 4;
+ Y = (s->mb_height << 6) - 4;
+ if(qx + px < -60) px = -60 - qx;
+ if(qy + py < -60) py = -60 - qy;
+ if(qx + px > X) px = X - qx;
+ if(qy + py > Y) py = Y - qy;
+ }
+ }
+ /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
+ if(0 && !s->first_slice_line && s->mb_x) {
+ if(is_intra[xy - wrap])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - A[0]) + FFABS(py - A[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ } else {
+ if(is_intra[xy - 2])
+ sum = FFABS(px) + FFABS(py);
+ else
+ sum = FFABS(px - C[0]) + FFABS(py - C[1]);
+ if(sum > 32) {
+ if(get_bits1(&s->gb)) {
+ px = A[0];
+ py = A[1];
+ } else {
+ px = C[0];
+ py = C[1];
+ }
+ }
+ }
+ }
+ /* store MV using signed modulus of MV range defined in 4.11 */
- /* XXX: not right but how to determine 4-MV intra/inter in another frame? */
- mv_x = median4(mx[0], mx[1], mx[2], mx[3]);
- mv_y = median4(my[0], my[1], my[2], my[3]);
- s->mv[0][0][0] = mv_x;
- s->mv[0][0][1] = mv_y;
-
- vc1_mc_1mv(v, (mode == BMV_TYPE_FORWARD));
+ s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
+ s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
+ }
+ s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
+ s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
+ s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
+ s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
}
/** Get predicted DC value for I-frames only
@@ -1833,7 +2527,6 @@ static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
/**
* @defgroup std_mb VC1 Macroblock-level functions in Simple/Main Profiles
* @see 7.1.4, p91 and 8.1.1.7, p(1)04
- * @todo TODO: Integrate to MpegEncContext facilities
* @{
*/
@@ -2094,6 +2787,202 @@ not_coded:
return 0;
}
+/** Decode intra block in intra frames - should be faster than decode_intra_block
+ * @param v VC1Context
+ * @param block block to decode
+ * @param coded are AC coeffs present or not
+ * @param codingset set of VLC to decode data
+ */
+static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int coded, int codingset, int mquant)
+{
+ GetBitContext *gb = &v->s.gb;
+ MpegEncContext *s = &v->s;
+ int dc_pred_dir = 0; /* Direction of the DC prediction used */
+ int run_diff, i;
+ int16_t *dc_val;
+ int16_t *ac_val, *ac_val2;
+ int dcdiff;
+ int a_avail = v->a_avail, c_avail = v->c_avail;
+ int use_pred = s->ac_pred;
+ int scale;
+ int q1, q2 = 0;
+ int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+
+ /* Get DC differential */
+ if (n < 4) {
+ dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ } else {
+ dcdiff = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
+ }
+ if (dcdiff < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
+ return -1;
+ }
+ if (dcdiff)
+ {
+ if (dcdiff == 119 /* ESC index value */)
+ {
+ /* TODO: Optimize */
+ if (mquant == 1) dcdiff = get_bits(gb, 10);
+ else if (mquant == 2) dcdiff = get_bits(gb, 9);
+ else dcdiff = get_bits(gb, 8);
+ }
+ else
+ {
+ if (mquant == 1)
+ dcdiff = (dcdiff<<2) + get_bits(gb, 2) - 3;
+ else if (mquant == 2)
+ dcdiff = (dcdiff<<1) + get_bits(gb, 1) - 1;
+ }
+ if (get_bits(gb, 1))
+ dcdiff = -dcdiff;
+ }
+
+ /* Prediction */
+ dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
+ *dc_val = dcdiff;
+
+ /* Store the quantized DC coeff, used for prediction */
+ if (n < 4) {
+ block[0] = dcdiff * s->y_dc_scale;
+ } else {
+ block[0] = dcdiff * s->c_dc_scale;
+ }
+ /* Skip ? */
+ run_diff = 0;
+ i = 0;
+
+ //AC Decoding
+ i = 1;
+
+ /* check if AC is needed at all and adjust direction if needed */
+ if(!a_avail) dc_pred_dir = 1;
+ if(!c_avail) dc_pred_dir = 0;
+ if(!a_avail && !c_avail) use_pred = 0;
+ ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
+ ac_val2 = ac_val;
+
+ scale = mquant * 2 + v->halfpq;
+
+ if(dc_pred_dir) //left
+ ac_val -= 16;
+ else //top
+ ac_val -= 16 * s->block_wrap[n];
+
+ q1 = s->current_picture.qscale_table[mb_pos];
+ if(dc_pred_dir && c_avail) q2 = s->current_picture.qscale_table[mb_pos - 1];
+ if(!dc_pred_dir && a_avail) q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
+ if(n && n<4) q2 = q1;
+
+ if(coded) {
+ int last = 0, skip, value;
+ const int8_t *zz_table;
+ int k;
+
+ if(v->s.ac_pred) {
+ if(!dc_pred_dir)
+ zz_table = vc1_horizontal_zz;
+ else
+ zz_table = vc1_vertical_zz;
+ } else
+ zz_table = vc1_normal_zz;
+
+ while (!last) {
+ vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
+ i += skip;
+ if(i > 63)
+ break;
+ block[zz_table[i++]] = value;
+ }
+
+ /* apply AC prediction if needed */
+ if(use_pred) {
+ /* scale predictors if needed*/
+ if(q2 && q1!=q2) {
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++)
+ block[k << 3] += (ac_val[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ } else { //top
+ for(k = 1; k < 8; k++)
+ block[k] += (ac_val[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ }
+ } else {
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++)
+ block[k << 3] += ac_val[k];
+ } else { //top
+ for(k = 1; k < 8; k++)
+ block[k] += ac_val[k + 8];
+ }
+ }
+ }
+ /* save AC coeffs for further prediction */
+ for(k = 1; k < 8; k++) {
+ ac_val2[k] = block[k << 3];
+ ac_val2[k + 8] = block[k];
+ }
+
+ /* scale AC coeffs */
+ for(k = 1; k < 64; k++)
+ if(block[k]) {
+ block[k] *= scale;
+ if(!v->pquantizer)
+ block[k] += (block[k] < 0) ? -mquant : mquant;
+ }
+
+ if(use_pred) i = 63;
+ } else { // no AC coeffs
+ int k;
+
+ memset(ac_val2, 0, 16 * 2);
+ if(dc_pred_dir) {//left
+ if(use_pred) {
+ memcpy(ac_val2, ac_val, 8 * 2);
+ if(q2 && q1!=q2) {
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ for(k = 1; k < 8; k++)
+ ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ }
+ }
+ } else {//top
+ if(use_pred) {
+ memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
+ if(q2 && q1!=q2) {
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
+ for(k = 1; k < 8; k++)
+ ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
+ }
+ }
+ }
+
+ /* apply AC prediction if needed */
+ if(use_pred) {
+ if(dc_pred_dir) { //left
+ for(k = 1; k < 8; k++) {
+ block[k << 3] = ac_val2[k] * scale;
+ if(!v->pquantizer && block[k << 3])
+ block[k << 3] += (block[k << 3] < 0) ? -mquant : mquant;
+ }
+ } else { //top
+ for(k = 1; k < 8; k++) {
+ block[k] = ac_val2[k + 8] * scale;
+ if(!v->pquantizer && block[k])
+ block[k] += (block[k] < 0) ? -mquant : mquant;
+ }
+ }
+ i = 63;
+ }
+ }
+ s->block_last_index[n] = i;
+
+ return 0;
+}
+
/** Decode intra block in inter frames - more generic version than vc1_decode_i_block
* @param v VC1Context
* @param block block to decode
@@ -2209,8 +3098,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
if(use_pred) {
/* scale predictors if needed*/
if(q2 && q1!=q2) {
- q1 = q1 * 2 - 1;
- q2 = q2 * 2 - 1;
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
if(dc_pred_dir) { //left
for(k = 1; k < 8; k++)
@@ -2252,8 +3141,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
if(use_pred) {
memcpy(ac_val2, ac_val, 8 * 2);
if(q2 && q1!=q2) {
- q1 = q1 * 2 - 1;
- q2 = q2 * 2 - 1;
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
for(k = 1; k < 8; k++)
ac_val2[k] = (ac_val2[k] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
@@ -2262,8 +3151,8 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
if(use_pred) {
memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
if(q2 && q1!=q2) {
- q1 = q1 * 2 - 1;
- q2 = q2 * 2 - 1;
+ q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
+ q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
for(k = 1; k < 8; k++)
ac_val2[k + 8] = (ac_val2[k + 8] * q2 * vc1_dqscale[q1 - 1] + 0x20000) >> 18;
}
@@ -2372,7 +3261,10 @@ static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquan
i += skip;
if(i > 31)
break;
- idx = vc1_simple_progressive_8x4_zz[i++];
+ if(v->profile < PROFILE_ADVANCED)
+ idx = vc1_simple_progressive_8x4_zz[i++];
+ else
+ idx = vc1_adv_progressive_8x4_zz[i++];
block[idx + off] = value * scale;
if(!v->pquantizer)
block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
@@ -2391,7 +3283,10 @@ static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquan
i += skip;
if(i > 31)
break;
- idx = vc1_simple_progressive_4x8_zz[i++];
+ if(v->profile < PROFILE_ADVANCED)
+ idx = vc1_simple_progressive_4x8_zz[i++];
+ else
+ idx = vc1_adv_progressive_4x8_zz[i++];
block[idx + off] = value * scale;
if(!v->pquantizer)
block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
@@ -2406,8 +3301,6 @@ static int vc1_decode_p_block(VC1Context *v, DCTELEM block[64], int n, int mquan
/** Decode one P-frame MB (in Simple/Main profile)
- * @todo TODO: Extend to AP
- * @fixme FIXME: DC value for inter blocks not set
*/
static int vc1_decode_p_mb(VC1Context *v)
{
@@ -2449,6 +3342,10 @@ static int vc1_decode_p_mb(VC1Context *v)
{
GET_MVDATA(dmv_x, dmv_y);
+ if (s->mb_intra) {
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+ }
s->current_picture.mb_type[mb_pos] = s->mb_intra ? MB_TYPE_INTRA : MB_TYPE_16x16;
vc1_pred_mv(s, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
@@ -2498,12 +3395,11 @@ static int vc1_decode_p_mb(VC1Context *v)
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
for(j = 0; j < 64; j++) s->block[i][j] += 128;
s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
- /* TODO: proper loop filtering */
if(v->pq >= 9 && v->overlap) {
- if(v->a_avail)
- s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
if(v->c_avail)
- s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
+ s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+ if(v->a_avail)
+ s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
}
} else if(val) {
vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
@@ -2602,12 +3498,11 @@ static int vc1_decode_p_mb(VC1Context *v)
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
for(j = 0; j < 64; j++) s->block[i][j] += 128;
s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i&4)?s->uvlinesize:s->linesize);
- /* TODO: proper loop filtering */
if(v->pq >= 9 && v->overlap) {
- if(v->a_avail)
- s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
if(v->c_avail)
- s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
+ s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
+ if(v->a_avail)
+ s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
}
} else if(is_coded[i]) {
status = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
@@ -2650,7 +3545,7 @@ static void vc1_decode_b_mb(VC1Context *v)
GetBitContext *gb = &s->gb;
int i, j;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
- int cbp; /* cbp decoding stuff */
+ int cbp = 0; /* cbp decoding stuff */
int mqdiff, mquant; /* MB quantization */
int ttmb = v->ttfrm; /* MB Transform type */
@@ -2663,7 +3558,7 @@ static void vc1_decode_b_mb(VC1Context *v)
int dst_idx, off;
int skipped, direct;
int dmv_x[2], dmv_y[2];
- int bmvtype = BMV_TYPE_BACKWARD; /* XXX: is it so? */
+ int bmvtype = BMV_TYPE_BACKWARD;
mquant = v->pq; /* Loosy initialization */
s->mb_intra = 0;
@@ -2688,6 +3583,8 @@ static void vc1_decode_b_mb(VC1Context *v)
if (!direct) {
if (!skipped) {
GET_MVDATA(dmv_x[0], dmv_y[0]);
+ dmv_x[1] = dmv_x[0];
+ dmv_y[1] = dmv_y[0];
}
if(skipped || !s->mb_intra) {
bmvtype = decode012(gb);
@@ -2700,24 +3597,34 @@ static void vc1_decode_b_mb(VC1Context *v)
break;
case 2:
bmvtype = BMV_TYPE_INTERPOLATED;
+ dmv_x[0] = dmv_y[0] = 0;
}
}
}
+ for(i = 0; i < 6; i++)
+ v->mb_type[0][s->block_index[i]] = s->mb_intra;
if (skipped) {
+ if(direct) bmvtype = BMV_TYPE_INTERPOLATED;
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
return;
}
if (direct) {
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
GET_MQUANT();
+ s->mb_intra = 0;
+ mb_has_coeffs = 0;
s->current_picture.qscale_table[mb_pos] = mquant;
- if(!v->ttmbf && !s->mb_intra && mb_has_coeffs)
+ if(!v->ttmbf)
ttmb = get_vlc2(gb, vc1_ttmb_vlc[v->tt_index].table, VC1_TTMB_VLC_BITS, 2);
+ dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
} else {
if(!mb_has_coeffs && !s->mb_intra) {
/* no coded blocks - effectively skipped */
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
return;
}
@@ -2726,17 +3633,21 @@ static void vc1_decode_b_mb(VC1Context *v)
s->current_picture.qscale_table[mb_pos] = mquant;
s->ac_pred = get_bits1(gb);
cbp = 0;
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
} else {
if(bmvtype == BMV_TYPE_INTERPOLATED) {
- GET_MVDATA(dmv_x[1], dmv_y[1]);
+ GET_MVDATA(dmv_x[0], dmv_y[0]);
if(!mb_has_coeffs) {
/* interpolated skipped block */
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
return;
}
}
- if(!s->mb_intra)
+ vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
+ if(!s->mb_intra) {
vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
+ }
if(s->mb_intra)
s->ac_pred = get_bits1(gb);
cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
@@ -2768,13 +3679,6 @@ static void vc1_decode_b_mb(VC1Context *v)
if(v->rangeredfrm) for(j = 0; j < 64; j++) s->block[i][j] <<= 1;
for(j = 0; j < 64; j++) s->block[i][j] += 128;
s->dsp.put_pixels_clamped(s->block[i], s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2));
- /* TODO: proper loop filtering */
- if(v->pq >= 9 && v->overlap) {
- if(v->a_avail)
- s->dsp.vc1_v_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? ((i&1)>>1) : (s->mb_y&1));
- if(v->c_avail)
- s->dsp.vc1_h_overlap(s->dest[dst_idx] + off, s->linesize >> ((i & 4) >> 2), (i<4) ? (i&1) : (s->mb_x&1));
- }
} else if(val) {
vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block);
if(!v->ttmbf && ttmb < 8) ttmb = -1;
@@ -2837,6 +3741,8 @@ static void vc1_decode_i_blocks(VC1Context *v)
mb_pos = s->mb_x + s->mb_y * s->mb_width;
s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
s->current_picture.qscale_table[mb_pos] = v->pq;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
// do actual MB decoding and displaying
cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
@@ -2861,27 +3767,156 @@ static void vc1_decode_i_blocks(VC1Context *v)
}
vc1_put_block(v, s->block);
- if(v->pq >= 9 && v->overlap) { /* XXX: do proper overlapping insted of loop filter */
+ if(v->pq >= 9 && v->overlap) {
+ if(s->mb_x) {
+ s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
+ s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+ if(!(s->flags & CODEC_FLAG_GRAY)) {
+ s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
+ s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
+ }
+ }
+ s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
+ s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
if(!s->first_slice_line) {
- s->dsp.vc1_v_overlap(s->dest[0], s->linesize, 0);
- s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize, 0);
+ s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
if(!(s->flags & CODEC_FLAG_GRAY)) {
- s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize, s->mb_y&1);
- s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize, s->mb_y&1);
+ s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
+ s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
}
}
- s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 1);
- s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
+ }
+
+ if(get_bits_count(&s->gb) > v->bits) {
+ av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n", get_bits_count(&s->gb), v->bits);
+ return;
+ }
+ }
+ ff_draw_horiz_band(s, s->mb_y * 16, 16);
+ s->first_slice_line = 0;
+ }
+}
+
+/** Decode blocks of I-frame for advanced profile
+ */
+static void vc1_decode_i_blocks_adv(VC1Context *v)
+{
+ int k, j;
+ MpegEncContext *s = &v->s;
+ int cbp, val;
+ uint8_t *coded_val;
+ int mb_pos;
+ int mquant = v->pq;
+ int mqdiff;
+ int overlap;
+ GetBitContext *gb = &s->gb;
+
+ /* select codingmode used for VLC tables selection */
+ switch(v->y_ac_table_index){
+ case 0:
+ v->codingset = (v->pqindex <= 8) ? CS_HIGH_RATE_INTRA : CS_LOW_MOT_INTRA;
+ break;
+ case 1:
+ v->codingset = CS_HIGH_MOT_INTRA;
+ break;
+ case 2:
+ v->codingset = CS_MID_RATE_INTRA;
+ break;
+ }
+
+ switch(v->c_ac_table_index){
+ case 0:
+ v->codingset2 = (v->pqindex <= 8) ? CS_HIGH_RATE_INTER : CS_LOW_MOT_INTER;
+ break;
+ case 1:
+ v->codingset2 = CS_HIGH_MOT_INTER;
+ break;
+ case 2:
+ v->codingset2 = CS_MID_RATE_INTER;
+ break;
+ }
+
+ //do frame decode
+ s->mb_x = s->mb_y = 0;
+ s->mb_intra = 1;
+ s->first_slice_line = 1;
+ ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
+ for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
+ for(s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
+ ff_init_block_index(s);
+ ff_update_block_index(s);
+ s->dsp.clear_blocks(s->block[0]);
+ mb_pos = s->mb_x + s->mb_y * s->mb_stride;
+ s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
+ s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
+ s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
+
+ // do actual MB decoding and displaying
+ cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
+ if(v->acpred_is_raw)
+ v->s.ac_pred = get_bits(&v->s.gb, 1);
+ else
+ v->s.ac_pred = v->acpred_plane[mb_pos];
+
+ if(v->condover == CONDOVER_SELECT) {
+ if(v->overflg_is_raw)
+ overlap = get_bits(&v->s.gb, 1);
+ else
+ overlap = v->over_flags_plane[mb_pos];
+ } else
+ overlap = (v->condover == CONDOVER_ALL);
+
+ GET_MQUANT();
+
+ s->current_picture.qscale_table[mb_pos] = mquant;
+ /* Set DC scale - y and c use the same */
+ s->y_dc_scale = s->y_dc_scale_table[mquant];
+ s->c_dc_scale = s->c_dc_scale_table[mquant];
+
+ for(k = 0; k < 6; k++) {
+ val = ((cbp >> (5 - k)) & 1);
+
+ if (k < 4) {
+ int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
+ val = val ^ pred;
+ *coded_val = val;
+ }
+ cbp |= val << (5 - k);
+
+ v->a_avail = !s->first_slice_line || (k==2 || k==3);
+ v->c_avail = !!s->mb_x || (k==1 || k==3);
+
+ vc1_decode_i_block_adv(v, s->block[k], k, val, (k<4)? v->codingset : v->codingset2, mquant);
+
+ s->dsp.vc1_inv_trans_8x8(s->block[k]);
+ for(j = 0; j < 64; j++) s->block[k][j] += 128;
+ }
+
+ vc1_put_block(v, s->block);
+ if(overlap) {
if(s->mb_x) {
- s->dsp.vc1_h_overlap(s->dest[0], s->linesize, 0);
- s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize, 0);
+ s->dsp.vc1_h_overlap(s->dest[0], s->linesize);
+ s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+ if(!(s->flags & CODEC_FLAG_GRAY)) {
+ s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
+ s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
+ }
+ }
+ s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
+ s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
+ if(!s->first_slice_line) {
+ s->dsp.vc1_v_overlap(s->dest[0], s->linesize);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
if(!(s->flags & CODEC_FLAG_GRAY)) {
- s->dsp.vc1_h_overlap(s->dest[1], s->uvlinesize, s->mb_x&1);
- s->dsp.vc1_h_overlap(s->dest[2], s->uvlinesize, s->mb_x&1);
+ s->dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
+ s->dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
}
}
- s->dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize, 1);
- s->dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize, 1);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
+ s->dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
}
if(get_bits_count(&s->gb) > v->bits) {
@@ -2990,6 +4025,25 @@ static void vc1_decode_b_blocks(VC1Context *v)
}
}
+static void vc1_decode_skip_blocks(VC1Context *v)
+{
+ MpegEncContext *s = &v->s;
+
+ ff_er_add_slice(s, 0, 0, s->mb_width - 1, s->mb_height - 1, (AC_END|DC_END|MV_END));
+ s->first_slice_line = 1;
+ for(s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
+ s->mb_x = 0;
+ ff_init_block_index(s);
+ ff_update_block_index(s);
+ memcpy(s->dest[0], s->last_picture.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
+ memcpy(s->dest[1], s->last_picture.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
+ memcpy(s->dest[2], s->last_picture.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
+ ff_draw_horiz_band(s, s->mb_y * 16, 16);
+ s->first_slice_line = 0;
+ }
+ s->pict_type = P_TYPE;
+}
+
static void vc1_decode_blocks(VC1Context *v)
{
@@ -2997,13 +4051,22 @@ static void vc1_decode_blocks(VC1Context *v)
switch(v->s.pict_type) {
case I_TYPE:
- vc1_decode_i_blocks(v);
+ if(v->profile == PROFILE_ADVANCED)
+ vc1_decode_i_blocks_adv(v);
+ else
+ vc1_decode_i_blocks(v);
break;
case P_TYPE:
- vc1_decode_p_blocks(v);
+ if(v->p_frame_skipped)
+ vc1_decode_skip_blocks(v);
+ else
+ vc1_decode_p_blocks(v);
break;
case B_TYPE:
- vc1_decode_b_blocks(v);
+ if(v->bi_type)
+ vc1_decode_i_blocks(v);
+ else
+ vc1_decode_b_blocks(v);
break;
}
}
@@ -3058,8 +4121,48 @@ static int vc1_decode_init(AVCodecContext *avctx)
{
av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
}
+ } else { // VC1/WVC1
+ int edata_size = avctx->extradata_size;
+ uint8_t *edata = avctx->extradata;
+
+ if(avctx->extradata_size < 16) {
+ av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", edata_size);
+ return -1;
+ }
+ while(edata_size > 8) {
+ // test if we've found header
+ if(BE_32(edata) == 0x0000010F) {
+ edata += 4;
+ edata_size -= 4;
+ break;
+ }
+ edata_size--;
+ edata++;
+ }
+
+ init_get_bits(&gb, edata, edata_size*8);
+
+ if (decode_sequence_header(avctx, &gb) < 0)
+ return -1;
+
+ while(edata_size > 8) {
+ // test if we've found entry point
+ if(BE_32(edata) == 0x0000010E) {
+ edata += 4;
+ edata_size -= 4;
+ break;
+ }
+ edata_size--;
+ edata++;
+ }
+
+ init_get_bits(&gb, edata, edata_size*8);
+
+ if (decode_entry_point(avctx, &gb) < 0)
+ return -1;
}
avctx->has_b_frames= !!(avctx->max_b_frames);
+ s->low_delay = !avctx->has_b_frames;
s->mb_width = (avctx->coded_width+15)>>4;
s->mb_height = (avctx->coded_height+15)>>4;
@@ -3067,6 +4170,8 @@ static int vc1_decode_init(AVCodecContext *avctx)
/* Allocate mb bitplanes */
v->mv_type_mb_plane = av_malloc(s->mb_stride * s->mb_height);
v->direct_mb_plane = av_malloc(s->mb_stride * s->mb_height);
+ v->acpred_plane = av_malloc(s->mb_stride * s->mb_height);
+ v->over_flags_plane = av_malloc(s->mb_stride * s->mb_height);
/* allocate block type info in that way so it could be used with s->block_index[] */
v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
@@ -3089,7 +4194,6 @@ static int vc1_decode_init(AVCodecContext *avctx)
/** Decode a VC1/WMV3 frame
* @todo TODO: Handle VC-1 IDUs (Transport level?)
- * @warning Initial try at using MpegEncContext stuff
*/
static int vc1_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
@@ -3098,6 +4202,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
VC1Context *v = avctx->priv_data;
MpegEncContext *s = &v->s;
AVFrame *pict = data;
+ uint8_t *buf2 = NULL;
/* no supplementary picture */
if (buf_size == 0) {
@@ -3118,29 +4223,61 @@ static int vc1_decode_frame(AVCodecContext *avctx,
s->current_picture_ptr= &s->picture[i];
}
- avctx->has_b_frames= !s->low_delay;
-
- init_get_bits(&s->gb, buf, buf_size*8);
+ //for advanced profile we need to unescape buffer
+ if (avctx->codec_id == CODEC_ID_VC1) {
+ int i, buf_size2;
+ buf2 = av_malloc(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ buf_size2 = 0;
+ for(i = 0; i < buf_size; i++) {
+ if(buf[i] == 3 && i >= 2 && !buf[i-1] && !buf[i-2] && i < buf_size-1 && buf[i+1] < 4) {
+ buf2[buf_size2++] = buf[i+1];
+ i++;
+ } else
+ buf2[buf_size2++] = buf[i];
+ }
+ init_get_bits(&s->gb, buf2, buf_size2*8);
+ } else
+ init_get_bits(&s->gb, buf, buf_size*8);
// do parse frame header
- if(vc1_parse_frame_header(v, &s->gb) == -1)
- return -1;
+ if(v->profile < PROFILE_ADVANCED) {
+ if(vc1_parse_frame_header(v, &s->gb) == -1) {
+ av_free(buf2);
+ return -1;
+ }
+ } else {
+ if(vc1_parse_frame_header_adv(v, &s->gb) == -1) {
+ av_free(buf2);
+ return -1;
+ }
+ }
-// if(s->pict_type != I_TYPE && s->pict_type != P_TYPE)return -1;
+ if(s->pict_type != I_TYPE && !v->res_rtm_flag){
+ av_free(buf2);
+ return -1;
+ }
// for hurry_up==5
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == I_TYPE;
/* skip B-frames if we don't have reference frames */
- if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)) return -1;//buf_size;
+ if(s->last_picture_ptr==NULL && (s->pict_type==B_TYPE || s->dropable)){
+ av_free(buf2);
+ return -1;//buf_size;
+ }
/* skip b frames if we are in a hurry */
if(avctx->hurry_up && s->pict_type==B_TYPE) return -1;//buf_size;
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
|| (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
- || avctx->skip_frame >= AVDISCARD_ALL)
+ || avctx->skip_frame >= AVDISCARD_ALL) {
+ av_free(buf2);
return buf_size;
+ }
/* skip everything if we are in a hurry>=5 */
- if(avctx->hurry_up>=5) return -1;//buf_size;
+ if(avctx->hurry_up>=5) {
+ av_free(buf2);
+ return -1;//buf_size;
+ }
if(s->next_p_frame_damaged){
if(s->pict_type==B_TYPE)
@@ -3149,8 +4286,10 @@ static int vc1_decode_frame(AVCodecContext *avctx,
s->next_p_frame_damaged=0;
}
- if(MPV_frame_start(s, avctx) < 0)
+ if(MPV_frame_start(s, avctx) < 0) {
+ av_free(buf2);
return -1;
+ }
ff_er_frame_start(s);
@@ -3180,6 +4319,7 @@ assert(s->current_picture.pict_type == s->pict_type);
/* we substract 1 because it is added on utils.c */
avctx->frame_number = s->picture_number - 1;
+ av_free(buf2);
return buf_size;
}
@@ -3196,6 +4336,8 @@ static int vc1_decode_end(AVCodecContext *avctx)
MPV_common_end(&v->s);
av_freep(&v->mv_type_mb_plane);
av_freep(&v->direct_mb_plane);
+ av_freep(&v->acpred_plane);
+ av_freep(&v->over_flags_plane);
av_freep(&v->mb_type_base);
return 0;
}
diff --git a/src/libffmpeg/libavcodec/vc1acdata.h b/contrib/ffmpeg/libavcodec/vc1acdata.h
index ffcc39d64..a6acecd78 100644
--- a/src/libffmpeg/libavcodec/vc1acdata.h
+++ b/contrib/ffmpeg/libavcodec/vc1acdata.h
@@ -1,3 +1,24 @@
+/*
+ * VC-1 and WMV3 decoder
+ * copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#define AC_MODES 8
static const int vc1_ac_sizes[AC_MODES] = {
diff --git a/src/libffmpeg/libavcodec/vc1data.h b/contrib/ffmpeg/libavcodec/vc1data.h
index 9f9e21b4a..70e88b525 100644
--- a/src/libffmpeg/libavcodec/vc1data.h
+++ b/contrib/ffmpeg/libavcodec/vc1data.h
@@ -1,3 +1,25 @@
+/*
+ * VC-1 and WMV3 decoder
+ * copyright (c) 2006 Konstantin Shishkov
+ * (c) 2005 anonymous, Alex Beregszaszi, Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file vc1data.h
* VC-1 tables.
@@ -6,6 +28,7 @@
#ifndef VC1DATA_H
#define VC1DATA_H
+#if 0 //original bfraction from vc9data.h, not conforming to standard
/* Denominator used for vc1_bfraction_lut */
#define B_FRACTION_DEN 840
@@ -19,7 +42,23 @@ const int16_t vc1_bfraction_lut[23] = {
525 /*5/8*/, 735 /*7/8*/,
-1 /*inv.*/, 0 /*BI fm*/
};
-const uint8_t vc1_bfraction_bits[23] = {
+#else
+/* Denominator used for vc1_bfraction_lut */
+#define B_FRACTION_DEN 256
+
+/* pre-computed scales for all bfractions and base=256 */
+static const int16_t vc1_bfraction_lut[23] = {
+ 128 /*1/2*/, 85 /*1/3*/, 170 /*2/3*/, 64 /*1/4*/,
+ 192 /*3/4*/, 51 /*1/5*/, 102 /*2/5*/,
+ 153 /*3/5*/, 204 /*4/5*/, 43 /*1/6*/, 215 /*5/6*/,
+ 37 /*1/7*/, 74 /*2/7*/, 111 /*3/7*/, 148 /*4/7*/,
+ 185 /*5/7*/, 222 /*6/7*/, 32 /*1/8*/, 96 /*3/8*/,
+ 160 /*5/8*/, 224 /*7/8*/,
+ -1 /*inv.*/, 0 /*BI fm*/
+};
+#endif
+
+static const uint8_t vc1_bfraction_bits[23] = {
3, 3, 3, 3,
3, 3, 3,
7, 7, 7, 7,
@@ -28,7 +67,7 @@ const uint8_t vc1_bfraction_bits[23] = {
7, 7,
7, 7
};
-const uint8_t vc1_bfraction_codes[23] = {
+static const uint8_t vc1_bfraction_codes[23] = {
0, 1, 2, 3,
4, 5, 6,
112, 113, 114, 115,
diff --git a/src/libffmpeg/libavcodec/vc1dsp.c b/contrib/ffmpeg/libavcodec/vc1dsp.c
index 16fe31b90..9139ffb28 100644
--- a/src/libffmpeg/libavcodec/vc1dsp.c
+++ b/contrib/ffmpeg/libavcodec/vc1dsp.c
@@ -2,18 +2,20 @@
* VC-1 and WMV3 decoder - DSP functions
* Copyright (c) 2006 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -27,43 +29,53 @@
#include "dsputil.h"
-/** Apply overlap transform to vertical edge
+/** Apply overlap transform to horizontal edge
*/
-static void vc1_v_overlap_c(uint8_t* src, int stride, int rnd)
+static void vc1_v_overlap_c(uint8_t* src, int stride)
{
int i;
int a, b, c, d;
+ int d1, d2;
+ int rnd = 1;
for(i = 0; i < 8; i++) {
a = src[-2*stride];
b = src[-stride];
c = src[0];
d = src[stride];
+ d1 = (a - d + 3 + rnd) >> 3;
+ d2 = (a - d + b - c + 4 - rnd) >> 3;
- src[-2*stride] = clip_uint8((7*a + d + 4 - rnd) >> 3);
- src[-stride] = clip_uint8((-a + 7*b + c + d + 3 + rnd) >> 3);
- src[0] = clip_uint8((a + b + 7*c - d + 4 - rnd) >> 3);
- src[stride] = clip_uint8((a + 7*d + 3 + rnd) >> 3);
+ src[-2*stride] = a - d1;
+ src[-stride] = b - d2;
+ src[0] = c + d2;
+ src[stride] = d + d1;
src++;
+ rnd = !rnd;
}
}
-/** Apply overlap transform to horizontal edge
+/** Apply overlap transform to vertical edge
*/
-static void vc1_h_overlap_c(uint8_t* src, int stride, int rnd)
+static void vc1_h_overlap_c(uint8_t* src, int stride)
{
int i;
int a, b, c, d;
+ int d1, d2;
+ int rnd = 1;
for(i = 0; i < 8; i++) {
a = src[-2];
b = src[-1];
c = src[0];
d = src[1];
+ d1 = (a - d + 3 + rnd) >> 3;
+ d2 = (a - d + b - c + 4 - rnd) >> 3;
- src[-2] = clip_uint8((7*a + d + 4 - rnd) >> 3);
- src[-1] = clip_uint8((-a + 7*b + c + d + 3 + rnd) >> 3);
- src[0] = clip_uint8((a + b + 7*c - d + 4 - rnd) >> 3);
- src[1] = clip_uint8((a + 7*d + 3 + rnd) >> 3);
+ src[-2] = a - d1;
+ src[-1] = b - d2;
+ src[0] = c + d2;
+ src[1] = d + d1;
src += stride;
+ rnd = !rnd;
}
}
diff --git a/src/libffmpeg/libavcodec/vcr1.c b/contrib/ffmpeg/libavcodec/vcr1.c
index 6012752eb..62bf12320 100644
--- a/src/libffmpeg/libavcodec/vcr1.c
+++ b/contrib/ffmpeg/libavcodec/vcr1.c
@@ -2,18 +2,20 @@
* ATI VCR1 codec
* Copyright (c) 2003 Michael Niedermayer
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavcodec/vmdav.c b/contrib/ffmpeg/libavcodec/vmdav.c
index b850a09f9..a9937144e 100644
--- a/src/libffmpeg/libavcodec/vmdav.c
+++ b/contrib/ffmpeg/libavcodec/vmdav.c
@@ -2,18 +2,20 @@
* Sierra VMD Audio & Video Decoders
* Copyright (C) 2004 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -92,7 +94,7 @@ static void lz_unpack(unsigned char *src, unsigned char *dest, int dest_len)
d_end = d + dest_len;
dataleft = LE_32(s);
s += 4;
- memset(queue, QUEUE_SIZE, 0x20);
+ memset(queue, 0x20, QUEUE_SIZE);
if (LE_32(s) == 0x56781234) {
s += 4;
qpos = 0x111;
@@ -482,10 +484,13 @@ static int vmdaudio_loadsound(VmdAudioContext *s, unsigned char *data,
} else {
if (s->bits == 16)
vmdaudio_decode_audio(s, data, buf, 1);
- else
+ else {
/* copy the data but convert it to signed */
- for (i = 0; i < s->block_align; i++)
- data[i * 2 + 1] = buf[i] + 0x80;
+ for (i = 0; i < s->block_align; i++){
+ *data++ = buf[i] + 0x80;
+ *data++ = buf[i] + 0x80;
+ }
+ }
}
} else {
bytes_decoded = s->block_align * 2;
@@ -498,8 +503,10 @@ static int vmdaudio_loadsound(VmdAudioContext *s, unsigned char *data,
vmdaudio_decode_audio(s, data, buf, 0);
} else {
/* copy the data but convert it to signed */
- for (i = 0; i < s->block_align; i++)
- data[i * 2 + 1] = buf[i] + 0x80;
+ for (i = 0; i < s->block_align; i++){
+ *data++ = buf[i] + 0x80;
+ *data++ = buf[i] + 0x80;
+ }
}
}
}
@@ -512,12 +519,10 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx,
uint8_t *buf, int buf_size)
{
VmdAudioContext *s = (VmdAudioContext *)avctx->priv_data;
- unsigned int sound_flags;
unsigned char *output_samples = (unsigned char *)data;
/* point to the start of the encoded data */
unsigned char *p = buf + 16;
- unsigned char *p_end = buf + buf_size;
if (buf_size < 16)
return buf_size;
@@ -526,24 +531,10 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx,
/* the chunk contains audio */
*data_size = vmdaudio_loadsound(s, output_samples, p, 0);
} else if (buf[6] == 2) {
- /* the chunk contains audio and silence mixed together */
- sound_flags = LE_32(p);
+ /* the chunk may contain audio */
p += 4;
-
- /* do something with extrabufs here? */
-
- while (p < p_end) {
- if (sound_flags & 0x01)
- /* silence */
- *data_size += vmdaudio_loadsound(s, output_samples, p, 1);
- else {
- /* audio */
- *data_size += vmdaudio_loadsound(s, output_samples, p, 0);
- p += s->block_align;
- }
- output_samples += (s->block_align * s->bits / 8);
- sound_flags >>= 1;
- }
+ *data_size = vmdaudio_loadsound(s, output_samples, p, (buf_size == 16));
+ output_samples += (s->block_align * s->bits / 8);
} else if (buf[6] == 3) {
/* silent chunk */
*data_size = vmdaudio_loadsound(s, output_samples, p, 1);
diff --git a/contrib/ffmpeg/libavcodec/vmnc.c b/contrib/ffmpeg/libavcodec/vmnc.c
new file mode 100644
index 000000000..7b5f567e7
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vmnc.c
@@ -0,0 +1,525 @@
+/*
+ * VMware Screen Codec (VMnc) decoder
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+/**
+ * @file vmnc.c
+ * VMware Screen Codec (VMnc) decoder
+ * As Alex Beregszaszi discovered, this is effectively RFB data dump
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "common.h"
+#include "avcodec.h"
+
+enum EncTypes {
+ MAGIC_WMVd = 0x574D5664,
+ MAGIC_WMVe,
+ MAGIC_WMVf,
+ MAGIC_WMVg,
+ MAGIC_WMVh,
+ MAGIC_WMVi,
+ MAGIC_WMVj
+};
+
+enum HexTile_Flags {
+ HT_RAW = 1, // tile is raw
+ HT_BKG = 2, // background color is present
+ HT_FG = 4, // foreground color is present
+ HT_SUB = 8, // subrects are present
+ HT_CLR = 16 // each subrect has own color
+};
+
+/*
+ * Decoder context
+ */
+typedef struct VmncContext {
+ AVCodecContext *avctx;
+ AVFrame pic;
+
+ int bpp;
+ int bpp2;
+ int bigendian;
+ uint8_t pal[768];
+ int width, height;
+
+ /* cursor data */
+ int cur_w, cur_h;
+ int cur_x, cur_y;
+ int cur_hx, cur_hy;
+ uint8_t* curbits, *curmask;
+ uint8_t* screendta;
+} VmncContext;
+
+/* read pixel value from stream */
+static always_inline int vmnc_get_pixel(uint8_t* buf, int bpp, int be) {
+ switch(bpp * 2 + be) {
+ case 2:
+ case 3: return *buf;
+ case 4: return LE_16(buf);
+ case 5: return BE_16(buf);
+ case 8: return LE_32(buf);
+ case 9: return BE_32(buf);
+ default: return 0;
+ }
+}
+
+static void load_cursor(VmncContext *c, uint8_t *src)
+{
+ int i, j, p;
+ const int bpp = c->bpp2;
+ uint8_t *dst8 = c->curbits;
+ uint16_t *dst16 = (uint16_t*)c->curbits;
+ uint32_t *dst32 = (uint32_t*)c->curbits;
+
+ for(j = 0; j < c->cur_h; j++) {
+ for(i = 0; i < c->cur_w; i++) {
+ p = vmnc_get_pixel(src, bpp, c->bigendian);
+ src += bpp;
+ if(bpp == 1) *dst8++ = p;
+ if(bpp == 2) *dst16++ = p;
+ if(bpp == 4) *dst32++ = p;
+ }
+ }
+ dst8 = c->curmask;
+ dst16 = (uint16_t*)c->curmask;
+ dst32 = (uint32_t*)c->curmask;
+ for(j = 0; j < c->cur_h; j++) {
+ for(i = 0; i < c->cur_w; i++) {
+ p = vmnc_get_pixel(src, bpp, c->bigendian);
+ src += bpp;
+ if(bpp == 1) *dst8++ = p;
+ if(bpp == 2) *dst16++ = p;
+ if(bpp == 4) *dst32++ = p;
+ }
+ }
+}
+
+static void put_cursor(uint8_t *dst, int stride, VmncContext *c, int dx, int dy)
+{
+ int i, j;
+ int w, h, x, y;
+ w = c->cur_w;
+ if(c->width < c->cur_x + c->cur_w) w = c->width - c->cur_x;
+ h = c->cur_h;
+ if(c->height < c->cur_y + c->cur_h) h = c->height - c->cur_y;
+ x = c->cur_x;
+ y = c->cur_y;
+ if(x < 0) {
+ w += x;
+ x = 0;
+ }
+ if(y < 0) {
+ h += y;
+ y = 0;
+ }
+
+ if((w < 1) || (h < 1)) return;
+ dst += x * c->bpp2 + y * stride;
+
+ if(c->bpp2 == 1) {
+ uint8_t* cd = c->curbits, *msk = c->curmask;
+ for(j = 0; j < h; j++) {
+ for(i = 0; i < w; i++)
+ dst[i] = (dst[i] & cd[i]) ^ msk[i];
+ msk += c->cur_w;
+ cd += c->cur_w;
+ dst += stride;
+ }
+ } else if(c->bpp2 == 2) {
+ uint16_t* cd = (uint16_t*)c->curbits, *msk = (uint16_t*)c->curmask;
+ uint16_t* dst2;
+ for(j = 0; j < h; j++) {
+ dst2 = (uint16_t*)dst;
+ for(i = 0; i < w; i++)
+ dst2[i] = (dst2[i] & cd[i]) ^ msk[i];
+ msk += c->cur_w;
+ cd += c->cur_w;
+ dst += stride;
+ }
+ } else if(c->bpp2 == 4) {
+ uint32_t* cd = (uint32_t*)c->curbits, *msk = (uint32_t*)c->curmask;
+ uint32_t* dst2;
+ for(j = 0; j < h; j++) {
+ dst2 = (uint32_t*)dst;
+ for(i = 0; i < w; i++)
+ dst2[i] = (dst2[i] & cd[i]) ^ msk[i];
+ msk += c->cur_w;
+ cd += c->cur_w;
+ dst += stride;
+ }
+ }
+}
+
+/* fill rectangle with given colour */
+static always_inline void paint_rect(uint8_t *dst, int dx, int dy, int w, int h, int color, int bpp, int stride)
+{
+ int i, j;
+ dst += dx * bpp + dy * stride;
+ if(bpp == 1){
+ for(j = 0; j < h; j++) {
+ memset(dst, color, w);
+ dst += stride;
+ }
+ }else if(bpp == 2){
+ uint16_t* dst2;
+ for(j = 0; j < h; j++) {
+ dst2 = (uint16_t*)dst;
+ for(i = 0; i < w; i++) {
+ *dst2++ = color;
+ }
+ dst += stride;
+ }
+ }else if(bpp == 4){
+ uint32_t* dst2;
+ for(j = 0; j < h; j++) {
+ dst2 = (uint32_t*)dst;
+ for(i = 0; i < w; i++) {
+ dst2[i] = color;
+ }
+ dst += stride;
+ }
+ }
+}
+
+static always_inline void paint_raw(uint8_t *dst, int w, int h, uint8_t* src, int bpp, int be, int stride)
+{
+ int i, j, p;
+ for(j = 0; j < h; j++) {
+ for(i = 0; i < w; i++) {
+ p = vmnc_get_pixel(src, bpp, be);
+ src += bpp;
+ switch(bpp){
+ case 1:
+ dst[i] = p;
+ break;
+ case 2:
+ ((uint16_t*)dst)[i] = p;
+ break;
+ case 4:
+ ((uint32_t*)dst)[i] = p;
+ break;
+ }
+ }
+ dst += stride;
+ }
+}
+
+static int decode_hextile(VmncContext *c, uint8_t* dst, uint8_t* src, int ssize, int w, int h, int stride)
+{
+ int i, j, k;
+ int bg = 0, fg = 0, rects, color, flags, xy, wh;
+ const int bpp = c->bpp2;
+ uint8_t *dst2;
+ int bw = 16, bh = 16;
+ uint8_t *ssrc=src;
+
+ for(j = 0; j < h; j += 16) {
+ dst2 = dst;
+ bw = 16;
+ if(j + 16 > h) bh = h - j;
+ for(i = 0; i < w; i += 16, dst2 += 16 * bpp) {
+ if(src - ssrc >= ssize) {
+ av_log(c->avctx, AV_LOG_ERROR, "Premature end of data!\n");
+ return -1;
+ }
+ if(i + 16 > w) bw = w - i;
+ flags = *src++;
+ if(flags & HT_RAW) {
+ if(src - ssrc > ssize - bw * bh * bpp) {
+ av_log(c->avctx, AV_LOG_ERROR, "Premature end of data!\n");
+ return -1;
+ }
+ paint_raw(dst2, bw, bh, src, bpp, c->bigendian, stride);
+ src += bw * bh * bpp;
+ } else {
+ if(flags & HT_BKG) {
+ bg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp;
+ }
+ if(flags & HT_FG) {
+ fg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp;
+ }
+ rects = 0;
+ if(flags & HT_SUB)
+ rects = *src++;
+ color = !!(flags & HT_CLR);
+
+ paint_rect(dst2, 0, 0, bw, bh, bg, bpp, stride);
+
+ if(src - ssrc > ssize - rects * (color * bpp + 2)) {
+ av_log(c->avctx, AV_LOG_ERROR, "Premature end of data!\n");
+ return -1;
+ }
+ for(k = 0; k < rects; k++) {
+ if(color) {
+ fg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp;
+ }
+ xy = *src++;
+ wh = *src++;
+ paint_rect(dst2, xy >> 4, xy & 0xF, (wh>>4)+1, (wh & 0xF)+1, fg, bpp, stride);
+ }
+ }
+ }
+ dst += stride * 16;
+ }
+ return src - ssrc;
+}
+
+static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, uint8_t *buf, int buf_size)
+{
+ VmncContext * const c = (VmncContext *)avctx->priv_data;
+ uint8_t *outptr;
+ uint8_t *src = buf;
+ int dx, dy, w, h, depth, enc, chunks, res, size_left;
+
+ c->pic.reference = 1;
+ c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
+ if(avctx->reget_buffer(avctx, &c->pic) < 0){
+ av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
+ return -1;
+ }
+
+ c->pic.key_frame = 0;
+ c->pic.pict_type = FF_P_TYPE;
+
+ //restore screen after cursor
+ if(c->screendta) {
+ int i;
+ w = c->cur_w;
+ if(c->width < c->cur_x + w) w = c->width - c->cur_x;
+ h = c->cur_h;
+ if(c->height < c->cur_y + h) h = c->height - c->cur_y;
+ dx = c->cur_x;
+ if(dx < 0) {
+ w += dx;
+ dx = 0;
+ }
+ dy = c->cur_y;
+ if(dy < 0) {
+ h += dy;
+ dy = 0;
+ }
+ if((w > 0) && (h > 0)) {
+ outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
+ for(i = 0; i < h; i++) {
+ memcpy(outptr, c->screendta + i * c->cur_w * c->bpp2, w * c->bpp2);
+ outptr += c->pic.linesize[0];
+ }
+ }
+ }
+ src += 2;
+ chunks = BE_16(src); src += 2;
+ while(chunks--) {
+ dx = BE_16(src); src += 2;
+ dy = BE_16(src); src += 2;
+ w = BE_16(src); src += 2;
+ h = BE_16(src); src += 2;
+ enc = BE_32(src); src += 4;
+ outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
+ size_left = buf_size - (src - buf);
+ switch(enc) {
+ case MAGIC_WMVd: // cursor
+ if(size_left < 2 + w * h * c->bpp2 * 2) {
+ av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", 2 + w * h * c->bpp2 * 2, size_left);
+ return -1;
+ }
+ src += 2;
+ c->cur_w = w;
+ c->cur_h = h;
+ c->cur_hx = dx;
+ c->cur_hy = dy;
+ if((c->cur_hx > c->cur_w) || (c->cur_hy > c->cur_h)) {
+ av_log(avctx, AV_LOG_ERROR, "Cursor hot spot is not in image: %ix%i of %ix%i cursor size\n", c->cur_hx, c->cur_hy, c->cur_w, c->cur_h);
+ c->cur_hx = c->cur_hy = 0;
+ }
+ c->curbits = av_realloc(c->curbits, c->cur_w * c->cur_h * c->bpp2);
+ c->curmask = av_realloc(c->curmask, c->cur_w * c->cur_h * c->bpp2);
+ c->screendta = av_realloc(c->screendta, c->cur_w * c->cur_h * c->bpp2);
+ load_cursor(c, src);
+ src += w * h * c->bpp2 * 2;
+ break;
+ case MAGIC_WMVe: // unknown
+ src += 2;
+ break;
+ case MAGIC_WMVf: // update cursor position
+ c->cur_x = dx - c->cur_hx;
+ c->cur_y = dy - c->cur_hy;
+ break;
+ case MAGIC_WMVg: // unknown
+ src += 10;
+ break;
+ case MAGIC_WMVh: // unknown
+ src += 4;
+ break;
+ case MAGIC_WMVi: // ServerInitialization struct
+ c->pic.key_frame = 1;
+ c->pic.pict_type = FF_I_TYPE;
+ depth = *src++;
+ if(depth != c->bpp) {
+ av_log(avctx, AV_LOG_INFO, "Depth mismatch. Container %i bpp, Frame data: %i bpp\n", c->bpp, depth);
+ }
+ src++;
+ c->bigendian = *src++;
+ if(c->bigendian & (~1)) {
+ av_log(avctx, AV_LOG_INFO, "Invalid header: bigendian flag = %i\n", c->bigendian);
+ return -1;
+ }
+ //skip the rest of pixel format data
+ src += 13;
+ break;
+ case MAGIC_WMVj: // unknown
+ src += 2;
+ break;
+ case 0x00000000: // raw rectangle data
+ if((dx + w > c->width) || (dy + h > c->height)) {
+ av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
+ return -1;
+ }
+ if(size_left < w * h * c->bpp2) {
+ av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", w * h * c->bpp2, size_left);
+ return -1;
+ }
+ paint_raw(outptr, w, h, src, c->bpp2, c->bigendian, c->pic.linesize[0]);
+ src += w * h * c->bpp2;
+ break;
+ case 0x00000005: // HexTile encoded rectangle
+ if((dx + w > c->width) || (dy + h > c->height)) {
+ av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
+ return -1;
+ }
+ res = decode_hextile(c, outptr, src, size_left, w, h, c->pic.linesize[0]);
+ if(res < 0)
+ return -1;
+ src += res;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Unsupported block type 0x%08X\n", enc);
+ chunks = 0; // leave chunks decoding loop
+ }
+ }
+ if(c->screendta){
+ int i;
+ //save screen data before painting cursor
+ w = c->cur_w;
+ if(c->width < c->cur_x + w) w = c->width - c->cur_x;
+ h = c->cur_h;
+ if(c->height < c->cur_y + h) h = c->height - c->cur_y;
+ dx = c->cur_x;
+ if(dx < 0) {
+ w += dx;
+ dx = 0;
+ }
+ dy = c->cur_y;
+ if(dy < 0) {
+ h += dy;
+ dy = 0;
+ }
+ if((w > 0) && (h > 0)) {
+ outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
+ for(i = 0; i < h; i++) {
+ memcpy(c->screendta + i * c->cur_w * c->bpp2, outptr, w * c->bpp2);
+ outptr += c->pic.linesize[0];
+ }
+ outptr = c->pic.data[0];
+ put_cursor(outptr, c->pic.linesize[0], c, c->cur_x, c->cur_y);
+ }
+ }
+ *data_size = sizeof(AVFrame);
+ *(AVFrame*)data = c->pic;
+
+ /* always report that the buffer was completely consumed */
+ return buf_size;
+}
+
+
+
+/*
+ *
+ * Init VMnc decoder
+ *
+ */
+static int decode_init(AVCodecContext *avctx)
+{
+ VmncContext * const c = (VmncContext *)avctx->priv_data;
+
+ c->avctx = avctx;
+ avctx->has_b_frames = 0;
+
+ c->pic.data[0] = NULL;
+ c->width = avctx->width;
+ c->height = avctx->height;
+
+ if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) {
+ return 1;
+ }
+ c->bpp = avctx->bits_per_sample;
+ c->bpp2 = c->bpp/8;
+
+ switch(c->bpp){
+ case 8:
+ avctx->pix_fmt = PIX_FMT_PAL8;
+ break;
+ case 16:
+ avctx->pix_fmt = PIX_FMT_RGB555;
+ break;
+ case 32:
+ avctx->pix_fmt = PIX_FMT_RGB32;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Unsupported bitdepth %i\n", c->bpp);
+ }
+
+ return 0;
+}
+
+
+
+/*
+ *
+ * Uninit VMnc decoder
+ *
+ */
+static int decode_end(AVCodecContext *avctx)
+{
+ VmncContext * const c = (VmncContext *)avctx->priv_data;
+
+ if (c->pic.data[0])
+ avctx->release_buffer(avctx, &c->pic);
+
+ av_free(c->curbits);
+ av_free(c->curmask);
+ av_free(c->screendta);
+ return 0;
+}
+
+AVCodec vmnc_decoder = {
+ "VMware video",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VMNC,
+ sizeof(VmncContext),
+ decode_init,
+ NULL,
+ decode_end,
+ decode_frame
+};
+
diff --git a/src/libffmpeg/libavcodec/vorbis.c b/contrib/ffmpeg/libavcodec/vorbis.c
index de3688c91..ca8d0a956 100644
--- a/src/libffmpeg/libavcodec/vorbis.c
+++ b/contrib/ffmpeg/libavcodec/vorbis.c
@@ -3,18 +3,20 @@
* Vorbis I decoder
* @author Denes Balatoni ( dbalatoni programozo hu )
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -43,25 +45,127 @@
#undef NDEBUG
#include <assert.h>
-/* Helper functions */
-
-/**
- * reads 0-32 bits when using the ALT_BITSTREAM_READER_LE bitstream reader
- */
-static unsigned int get_bits_long_le(GetBitContext *s, int n){
- if(n<=17) return get_bits(s, n);
- else{
- int ret= get_bits(s, 16);
- return ret | (get_bits(s, n-16) << 16);
- }
-}
+typedef struct {
+ uint_fast8_t dimensions;
+ uint_fast8_t lookup_type;
+ uint_fast8_t maxdepth;
+ VLC vlc;
+ float *codevectors;
+ unsigned int nb_bits;
+} vorbis_codebook;
+
+typedef union vorbis_floor_u vorbis_floor_data;
+typedef struct vorbis_floor0_s vorbis_floor0;
+typedef struct vorbis_floor1_s vorbis_floor1;
+struct vorbis_context_s;
+typedef
+uint_fast8_t (* vorbis_floor_decode_func)
+ (struct vorbis_context_s *, vorbis_floor_data *, float *);
+typedef struct {
+ uint_fast8_t floor_type;
+ vorbis_floor_decode_func decode;
+ union vorbis_floor_u
+ {
+ struct vorbis_floor0_s
+ {
+ uint_fast8_t order;
+ uint_fast16_t rate;
+ uint_fast16_t bark_map_size;
+ int_fast32_t * map[2];
+ uint_fast32_t map_size[2];
+ uint_fast8_t amplitude_bits;
+ uint_fast8_t amplitude_offset;
+ uint_fast8_t num_books;
+ uint_fast8_t * book_list;
+ float * lsp;
+ } t0;
+ struct vorbis_floor1_s
+ {
+ uint_fast8_t partitions;
+ uint_fast8_t maximum_class;
+ uint_fast8_t partition_class[32];
+ uint_fast8_t class_dimensions[16];
+ uint_fast8_t class_subclasses[16];
+ uint_fast8_t class_masterbook[16];
+ int_fast16_t subclass_books[16][8];
+ uint_fast8_t multiplier;
+ uint_fast16_t x_list_dim;
+ floor1_entry_t * list;
+ } t1;
+ } data;
+} vorbis_floor;
+
+typedef struct {
+ uint_fast16_t type;
+ uint_fast32_t begin;
+ uint_fast32_t end;
+ uint_fast32_t partition_size;
+ uint_fast8_t classifications;
+ uint_fast8_t classbook;
+ int_fast16_t books[64][8];
+ uint_fast8_t maxpass;
+} vorbis_residue;
+
+typedef struct {
+ uint_fast8_t submaps;
+ uint_fast16_t coupling_steps;
+ uint_fast8_t *magnitude;
+ uint_fast8_t *angle;
+ uint_fast8_t *mux;
+ uint_fast8_t submap_floor[16];
+ uint_fast8_t submap_residue[16];
+} vorbis_mapping;
+
+typedef struct {
+ uint_fast8_t blockflag;
+ uint_fast16_t windowtype;
+ uint_fast16_t transformtype;
+ uint_fast8_t mapping;
+} vorbis_mode;
+
+typedef struct vorbis_context_s {
+ AVCodecContext *avccontext;
+ GetBitContext gb;
+ DSPContext dsp;
+
+ MDCTContext mdct[2];
+ uint_fast8_t first_frame;
+ uint_fast32_t version;
+ uint_fast8_t audio_channels;
+ uint_fast32_t audio_samplerate;
+ uint_fast32_t bitrate_maximum;
+ uint_fast32_t bitrate_nominal;
+ uint_fast32_t bitrate_minimum;
+ uint_fast32_t blocksize[2];
+ const float * win[2];
+ uint_fast16_t codebook_count;
+ vorbis_codebook *codebooks;
+ uint_fast8_t floor_count;
+ vorbis_floor *floors;
+ uint_fast8_t residue_count;
+ vorbis_residue *residues;
+ uint_fast8_t mapping_count;
+ vorbis_mapping *mappings;
+ uint_fast8_t mode_count;
+ vorbis_mode *modes;
+ uint_fast8_t mode_number; // mode number for the current packet
+ float *channel_residues;
+ float *channel_floors;
+ float *saved;
+ uint_fast16_t saved_start;
+ float *ret;
+ float *buf;
+ float *buf_tmp;
+ uint_fast32_t add_bias; // for float->int conversion
+ uint_fast32_t exp_bias;
+} vorbis_context;
-#define ilog(i) av_log2(2*(i))
+/* Helper functions */
#define BARK(x) \
(13.1f*atan(0.00074f*(x))+2.24f*atan(1.85e-8f*(x)*(x))+1e-4f*(x))
-static unsigned int nth_root(unsigned int x, unsigned int n) { // x^(1/n)
+unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n) { // x^(1/n)
unsigned int ret=0, i, j;
do {
@@ -82,7 +186,7 @@ static float vorbisfloat2float(uint_fast32_t val) {
// Generate vlc codes from vorbis huffman code lengths
-static int vorbis_len2vlc(vorbis_context *vc, uint_fast8_t *bits, uint_fast32_t *codes, uint_fast32_t num) {
+int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, uint_fast32_t num) {
uint_fast32_t exit_at_level[33]={404,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
@@ -105,12 +209,12 @@ static int vorbis_len2vlc(vorbis_context *vc, uint_fast8_t *bits, uint_fast32_t
}
#ifdef V_DEBUG
- av_log(vc->avccontext, AV_LOG_INFO, " %d. of %d code len %d code %d - ", p, num, bits[p], codes[p]);
+ av_log(NULL, AV_LOG_INFO, " %d. of %d code len %d code %d - ", p, num, bits[p], codes[p]);
init_get_bits(&gb, (uint_fast8_t *)&codes[p], bits[p]);
for(i=0;i<bits[p];++i) {
- av_log(vc->avccontext, AV_LOG_INFO, "%s", get_bits1(&gb) ? "1" : "0");
+ av_log(NULL, AV_LOG_INFO, "%s", get_bits1(&gb) ? "1" : "0");
}
- av_log(vc->avccontext, AV_LOG_INFO, "\n");
+ av_log(NULL, AV_LOG_INFO, "\n");
#endif
++p;
@@ -131,21 +235,53 @@ static int vorbis_len2vlc(vorbis_context *vc, uint_fast8_t *bits, uint_fast32_t
codes[p]=code;
#ifdef V_DEBUG
- av_log(vc->avccontext, AV_LOG_INFO, " %d. code len %d code %d - ", p, bits[p], codes[p]);
+ av_log(NULL, AV_LOG_INFO, " %d. code len %d code %d - ", p, bits[p], codes[p]);
init_get_bits(&gb, (uint_fast8_t *)&codes[p], bits[p]);
for(i=0;i<bits[p];++i) {
- av_log(vc->avccontext, AV_LOG_INFO, "%s", get_bits1(&gb) ? "1" : "0");
+ av_log(NULL, AV_LOG_INFO, "%s", get_bits1(&gb) ? "1" : "0");
}
- av_log(vc->avccontext, AV_LOG_INFO, "\n");
+ av_log(NULL, AV_LOG_INFO, "\n");
#endif
}
- //FIXME no exits should be left (underspecified tree - ie. unused valid vlcs - not allowed by SPEC)
+ //no exits should be left (underspecified tree - ie. unused valid vlcs - not allowed by SPEC)
+ for (p=1; p<33; p++)
+ if (exit_at_level[p]) return 1;
return 0;
}
+void ff_vorbis_ready_floor1_list(floor1_entry_t * list, int values) {
+ int i;
+ list[0].sort = 0;
+ list[1].sort = 1;
+ for (i = 2; i < values; i++) {
+ int j;
+ list[i].low = 0;
+ list[i].high = 1;
+ list[i].sort = i;
+ for (j = 2; j < i; j++) {
+ int tmp = list[j].x;
+ if (tmp < list[i].x) {
+ if (tmp > list[list[i].low].x) list[i].low = j;
+ } else {
+ if (tmp < list[list[i].high].x) list[i].high = j;
+ }
+ }
+ }
+ for (i = 0; i < values - 1; i++) {
+ int j;
+ for (j = i + 1; j < values; j++) {
+ if (list[list[i].sort].x > list[list[j].sort].x) {
+ int tmp = list[i].sort;
+ list[i].sort = list[j].sort;
+ list[j].sort = tmp;
+ }
+ }
+ }
+}
+
// Free all allocated memory -----------------------------------------
static void vorbis_free(vorbis_context *vc) {
@@ -161,8 +297,8 @@ static void vorbis_free(vorbis_context *vc) {
av_freep(&vc->residues);
av_freep(&vc->modes);
- ff_mdct_end(&vc->mdct0);
- ff_mdct_end(&vc->mdct1);
+ ff_mdct_end(&vc->mdct[0]);
+ ff_mdct_end(&vc->mdct[1]);
for(i=0;i<vc->codebook_count;++i) {
av_free(vc->codebooks[i].codevectors);
@@ -178,10 +314,7 @@ static void vorbis_free(vorbis_context *vc) {
av_free(vc->floors[i].data.t0.lsp);
}
else {
- av_free(vc->floors[i].data.t1.x_list);
- av_free(vc->floors[i].data.t1.x_list_order);
- av_free(vc->floors[i].data.t1.low_neighbour);
- av_free(vc->floors[i].data.t1.high_neighbour);
+ av_free(vc->floors[i].data.t1.list);
}
}
av_freep(&vc->floors);
@@ -192,6 +325,11 @@ static void vorbis_free(vorbis_context *vc) {
av_free(vc->mappings[i].mux);
}
av_freep(&vc->mappings);
+
+ if(vc->exp_bias){
+ av_freep(&vc->win[0]);
+ av_freep(&vc->win[1]);
+ }
}
// Parse setup header -------------------------------------------------
@@ -200,8 +338,8 @@ static void vorbis_free(vorbis_context *vc) {
static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) {
uint_fast16_t cb;
- uint_fast8_t *tmp_vlc_bits;
- uint_fast32_t *tmp_vlc_codes;
+ uint8_t *tmp_vlc_bits;
+ uint32_t *tmp_vlc_codes;
GetBitContext *gb=&vc->gb;
vc->codebook_count=get_bits(gb,8)+1;
@@ -209,8 +347,8 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) {
AV_DEBUG(" Codebooks: %d \n", vc->codebook_count);
vc->codebooks=(vorbis_codebook *)av_mallocz(vc->codebook_count * sizeof(vorbis_codebook));
- tmp_vlc_bits=(uint_fast8_t *)av_mallocz(V_MAX_VLCS * sizeof(uint_fast8_t));
- tmp_vlc_codes=(uint_fast32_t *)av_mallocz(V_MAX_VLCS * sizeof(uint_fast32_t));
+ tmp_vlc_bits=(uint8_t *)av_mallocz(V_MAX_VLCS * sizeof(uint8_t));
+ tmp_vlc_codes=(uint32_t *)av_mallocz(V_MAX_VLCS * sizeof(uint32_t));
for(cb=0;cb<vc->codebook_count;++cb) {
vorbis_codebook *codebook_setup=&vc->codebooks[cb];
@@ -303,11 +441,11 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) {
if (codebook_setup->lookup_type==1) {
uint_fast16_t i, j, k;
- uint_fast16_t codebook_lookup_values=nth_root(entries, codebook_setup->dimensions);
+ uint_fast16_t codebook_lookup_values=ff_vorbis_nth_root(entries, codebook_setup->dimensions);
uint_fast16_t codebook_multiplicands[codebook_lookup_values];
- float codebook_minimum_value=vorbisfloat2float(get_bits_long_le(gb, 32));
- float codebook_delta_value=vorbisfloat2float(get_bits_long_le(gb, 32));
+ float codebook_minimum_value=vorbisfloat2float(get_bits_long(gb, 32));
+ float codebook_delta_value=vorbisfloat2float(get_bits_long(gb, 32));
uint_fast8_t codebook_value_bits=get_bits(gb, 4)+1;
uint_fast8_t codebook_sequence_p=get_bits1(gb);
@@ -367,7 +505,7 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) {
}
// Initialize VLC table
- if (vorbis_len2vlc(vc, tmp_vlc_bits, tmp_vlc_codes, entries)) {
+ if (ff_vorbis_len2vlc(tmp_vlc_bits, tmp_vlc_codes, entries)) {
av_log(vc->avccontext, AV_LOG_ERROR, " Invalid code lengths while generating vlcs. \n");
goto error;
}
@@ -488,57 +626,23 @@ static int vorbis_parse_setup_hdr_floors(vorbis_context *vc) {
floor_setup->data.t1.x_list_dim+=floor_setup->data.t1.class_dimensions[floor_setup->data.t1.partition_class[j]];
}
- floor_setup->data.t1.x_list=(uint_fast16_t *)av_mallocz(floor_setup->data.t1.x_list_dim * sizeof(uint_fast16_t));
- floor_setup->data.t1.x_list_order=(uint_fast16_t *)av_mallocz(floor_setup->data.t1.x_list_dim * sizeof(uint_fast16_t));
- floor_setup->data.t1.low_neighbour=(uint_fast16_t *)av_mallocz(floor_setup->data.t1.x_list_dim * sizeof(uint_fast16_t));
- floor_setup->data.t1.high_neighbour=(uint_fast16_t *)av_mallocz(floor_setup->data.t1.x_list_dim * sizeof(uint_fast16_t));
+ floor_setup->data.t1.list=(floor1_entry_t *)av_mallocz(floor_setup->data.t1.x_list_dim * sizeof(floor1_entry_t));
rangebits=get_bits(gb, 4);
- floor_setup->data.t1.x_list[0] = 0;
- floor_setup->data.t1.x_list[1] = (1<<rangebits);
+ floor_setup->data.t1.list[0].x = 0;
+ floor_setup->data.t1.list[1].x = (1<<rangebits);
for(j=0;j<floor_setup->data.t1.partitions;++j) {
for(k=0;k<floor_setup->data.t1.class_dimensions[floor_setup->data.t1.partition_class[j]];++k,++floor1_values) {
- floor_setup->data.t1.x_list[floor1_values]=get_bits(gb, rangebits);
+ floor_setup->data.t1.list[floor1_values].x=get_bits(gb, rangebits);
- AV_DEBUG(" %d. floor1 Y coord. %d \n", floor1_values, floor_setup->data.t1.x_list[floor1_values]);
+ AV_DEBUG(" %d. floor1 Y coord. %d \n", floor1_values, floor_setup->data.t1.list[floor1_values].x);
}
}
// Precalculate order of x coordinates - needed for decode
-
- for(k=0;k<floor_setup->data.t1.x_list_dim;++k) {
- floor_setup->data.t1.x_list_order[k]=k;
- }
-
- for(k=0;k<floor_setup->data.t1.x_list_dim-1;++k) { // FIXME optimize sorting ?
- for(j=k+1;j<floor_setup->data.t1.x_list_dim;++j) {
- if(floor_setup->data.t1.x_list[floor_setup->data.t1.x_list_order[k]]>floor_setup->data.t1.x_list[floor_setup->data.t1.x_list_order[j]]) {
- uint_fast16_t tmp=floor_setup->data.t1.x_list_order[k];
- floor_setup->data.t1.x_list_order[k]=floor_setup->data.t1.x_list_order[j];
- floor_setup->data.t1.x_list_order[j]=tmp;
- }
- }
- }
-
-// Precalculate low and high neighbours
-
- for(k=2;k<floor_setup->data.t1.x_list_dim;++k) {
- floor_setup->data.t1.low_neighbour[k]=0;
- floor_setup->data.t1.high_neighbour[k]=1; // correct according to SPEC requirements
-
- for (j=0;j<k;++j) {
- if ((floor_setup->data.t1.x_list[j]<floor_setup->data.t1.x_list[k]) &&
- (floor_setup->data.t1.x_list[j]>floor_setup->data.t1.x_list[floor_setup->data.t1.low_neighbour[k]])) {
- floor_setup->data.t1.low_neighbour[k]=j;
- }
- if ((floor_setup->data.t1.x_list[j]>floor_setup->data.t1.x_list[k]) &&
- (floor_setup->data.t1.x_list[j]<floor_setup->data.t1.x_list[floor_setup->data.t1.high_neighbour[k]])) {
- floor_setup->data.t1.high_neighbour[k]=j;
- }
- }
- }
+ ff_vorbis_ready_floor1_list(floor_setup->data.t1.list, floor_setup->data.t1.x_list_dim);
}
else if(floor_setup->floor_type==0) {
uint_fast8_t max_codebook_dim=0;
@@ -757,7 +861,7 @@ static void create_map( vorbis_context * vc, uint_fast8_t floor_number )
for (blockflag=0;blockflag<2;++blockflag)
{
- n=(blockflag ? vc->blocksize_1 : vc->blocksize_0) / 2;
+ n=vc->blocksize[blockflag]/2;
floors[floor_number].data.t0.map[blockflag]=
av_malloc((n+1) * sizeof(int_fast32_t)); // n+sentinel
@@ -855,7 +959,6 @@ static int vorbis_parse_setup_hdr(vorbis_context *vc) {
static int vorbis_parse_id_hdr(vorbis_context *vc){
GetBitContext *gb=&vc->gb;
uint_fast8_t bl0, bl1;
- const float *vwin[8]={ vwin64, vwin128, vwin256, vwin512, vwin1024, vwin2048, vwin4096, vwin8192 };
if ((get_bits(gb, 8)!='v') || (get_bits(gb, 8)!='o') ||
(get_bits(gb, 8)!='r') || (get_bits(gb, 8)!='b') ||
@@ -864,53 +967,63 @@ static int vorbis_parse_id_hdr(vorbis_context *vc){
return 1;
}
- vc->version=get_bits_long_le(gb, 32); //FIXME check 0
+ vc->version=get_bits_long(gb, 32); //FIXME check 0
vc->audio_channels=get_bits(gb, 8); //FIXME check >0
- vc->audio_samplerate=get_bits_long_le(gb, 32); //FIXME check >0
- vc->bitrate_maximum=get_bits_long_le(gb, 32);
- vc->bitrate_nominal=get_bits_long_le(gb, 32);
- vc->bitrate_minimum=get_bits_long_le(gb, 32);
+ vc->audio_samplerate=get_bits_long(gb, 32); //FIXME check >0
+ vc->bitrate_maximum=get_bits_long(gb, 32);
+ vc->bitrate_nominal=get_bits_long(gb, 32);
+ vc->bitrate_minimum=get_bits_long(gb, 32);
bl0=get_bits(gb, 4);
bl1=get_bits(gb, 4);
- vc->blocksize_0=(1<<bl0);
- vc->blocksize_1=(1<<bl1);
+ vc->blocksize[0]=(1<<bl0);
+ vc->blocksize[1]=(1<<bl1);
if (bl0>13 || bl0<6 || bl1>13 || bl1<6 || bl1<bl0) {
av_log(vc->avccontext, AV_LOG_ERROR, " Vorbis id header packet corrupt (illegal blocksize). \n");
return 3;
}
// output format int16
- if (vc->blocksize_1/2 * vc->audio_channels * 2 >
+ if (vc->blocksize[1]/2 * vc->audio_channels * 2 >
AVCODEC_MAX_AUDIO_FRAME_SIZE) {
av_log(vc->avccontext, AV_LOG_ERROR, "Vorbis channel count makes "
"output packets too large.\n");
return 4;
}
- vc->swin=vwin[bl0-6];
- vc->lwin=vwin[bl1-6];
+ vc->win[0]=ff_vorbis_vwin[bl0-6];
+ vc->win[1]=ff_vorbis_vwin[bl1-6];
+
+ if(vc->exp_bias){
+ int i, j;
+ for(j=0; j<2; j++){
+ float *win = av_malloc(vc->blocksize[j]/2 * sizeof(float));
+ for(i=0; i<vc->blocksize[j]/2; i++)
+ win[i] = vc->win[j][i] * (1<<15);
+ vc->win[j] = win;
+ }
+ }
if ((get_bits1(gb)) == 0) {
av_log(vc->avccontext, AV_LOG_ERROR, " Vorbis id header packet corrupt (framing flag not set). \n");
return 2;
}
- vc->channel_residues=(float *)av_malloc((vc->blocksize_1/2)*vc->audio_channels * sizeof(float));
- vc->channel_floors=(float *)av_malloc((vc->blocksize_1/2)*vc->audio_channels * sizeof(float));
- vc->saved=(float *)av_malloc((vc->blocksize_1/2)*vc->audio_channels * sizeof(float));
- vc->ret=(float *)av_malloc((vc->blocksize_1/2)*vc->audio_channels * sizeof(float));
- vc->buf=(float *)av_malloc(vc->blocksize_1 * sizeof(float));
- vc->buf_tmp=(float *)av_malloc(vc->blocksize_1 * sizeof(float));
+ vc->channel_residues=(float *)av_malloc((vc->blocksize[1]/2)*vc->audio_channels * sizeof(float));
+ vc->channel_floors=(float *)av_malloc((vc->blocksize[1]/2)*vc->audio_channels * sizeof(float));
+ vc->saved=(float *)av_malloc((vc->blocksize[1]/2)*vc->audio_channels * sizeof(float));
+ vc->ret=(float *)av_malloc((vc->blocksize[1]/2)*vc->audio_channels * sizeof(float));
+ vc->buf=(float *)av_malloc(vc->blocksize[1] * sizeof(float));
+ vc->buf_tmp=(float *)av_malloc(vc->blocksize[1] * sizeof(float));
vc->saved_start=0;
- ff_mdct_init(&vc->mdct0, bl0, 1);
- ff_mdct_init(&vc->mdct1, bl1, 1);
+ ff_mdct_init(&vc->mdct[0], bl0, 1);
+ ff_mdct_init(&vc->mdct[1], bl1, 1);
AV_DEBUG(" vorbis version %d \n audio_channels %d \n audio_samplerate %d \n bitrate_max %d \n bitrate_nom %d \n bitrate_min %d \n blk_0 %d blk_1 %d \n ",
- vc->version, vc->audio_channels, vc->audio_samplerate, vc->bitrate_maximum, vc->bitrate_nominal, vc->bitrate_minimum, vc->blocksize_0, vc->blocksize_1);
+ vc->version, vc->audio_channels, vc->audio_samplerate, vc->bitrate_maximum, vc->bitrate_nominal, vc->bitrate_minimum, vc->blocksize[0], vc->blocksize[1]);
/*
- BLK=vc->blocksize_0;
+ BLK=vc->blocksize[0];
for(i=0;i<BLK/2;++i) {
- vc->swin[i]=sin(0.5*3.14159265358*(sin(((float)i+0.5)/(float)BLK*3.14159265358))*(sin(((float)i+0.5)/(float)BLK*3.14159265358)));
+ vc->win[0][i]=sin(0.5*3.14159265358*(sin(((float)i+0.5)/(float)BLK*3.14159265358))*(sin(((float)i+0.5)/(float)BLK*3.14159265358)));
}
*/
@@ -929,6 +1042,15 @@ static int vorbis_decode_init(AVCodecContext *avccontext) {
int i, j, hdr_type;
vc->avccontext = avccontext;
+ dsputil_init(&vc->dsp, avccontext);
+
+ if(vc->dsp.float_to_int16 == ff_float_to_int16_c) {
+ vc->add_bias = 385;
+ vc->exp_bias = 0;
+ } else {
+ vc->add_bias = 0;
+ vc->exp_bias = 15<<23;
+ }
if (!headers_len) {
av_log(avccontext, AV_LOG_ERROR, "Extradata corrupt.\n");
@@ -1110,6 +1232,50 @@ static uint_fast8_t vorbis_floor0_decode(vorbis_context *vc,
return 0;
}
+
+static void render_line(int x0, int y0, int x1, int y1, float * buf, int n) {
+ int dy = y1 - y0;
+ int adx = x1 - x0;
+ int ady = FFABS(dy);
+ int base = dy / adx;
+ int x = x0;
+ int y = y0;
+ int err = 0;
+ int sy;
+ if (dy < 0) sy = base - 1;
+ else sy = base + 1;
+ ady = ady - FFABS(base) * adx;
+ if (x >= n) return;
+ buf[x] = ff_vorbis_floor1_inverse_db_table[y];
+ for (x = x0 + 1; x < x1; x++) {
+ if (x >= n) return;
+ err += ady;
+ if (err >= adx) {
+ err -= adx;
+ y += sy;
+ } else {
+ y += base;
+ }
+ buf[x] = ff_vorbis_floor1_inverse_db_table[y];
+ }
+}
+
+void ff_vorbis_floor1_render_list(floor1_entry_t * list, int values, uint_fast16_t * y_list, int * flag, int multiplier, float * out, int samples) {
+ int lx, ly, i;
+ lx = 0;
+ ly = y_list[0] * multiplier;
+ for (i = 1; i < values; i++) {
+ int pos = list[i].sort;
+ if (flag[pos]) {
+ render_line(lx, ly, list[pos].x, y_list[pos] * multiplier, out, samples);
+ lx = list[pos].x;
+ ly = y_list[pos] * multiplier;
+ }
+ if (lx >= samples) break;
+ }
+ if (lx < samples) render_line(lx, ly, samples, ly, out, samples);
+}
+
static uint_fast8_t vorbis_floor1_decode(vorbis_context *vc, vorbis_floor_data *vfu, float *vec) {
vorbis_floor1 * vf=&vfu->t1;
GetBitContext *gb=&vc->gb;
@@ -1117,7 +1283,7 @@ static uint_fast8_t vorbis_floor1_decode(vorbis_context *vc, vorbis_floor_data *
uint_fast16_t range=range_v[vf->multiplier-1];
uint_fast16_t floor1_Y[vf->x_list_dim];
uint_fast16_t floor1_Y_final[vf->x_list_dim];
- uint_fast8_t floor1_flag[vf->x_list_dim];
+ int floor1_flag[vf->x_list_dim];
uint_fast8_t class_;
uint_fast8_t cdim;
uint_fast8_t cbits;
@@ -1126,10 +1292,8 @@ static uint_fast8_t vorbis_floor1_decode(vorbis_context *vc, vorbis_floor_data *
int_fast16_t book;
uint_fast16_t offset;
uint_fast16_t i,j;
- uint_fast16_t *floor_x_sort=vf->x_list_order;
/*u*/int_fast16_t adx, ady, off, predicted; // WTF ? dy/adx= (unsigned)dy/adx ?
int_fast16_t dy, err;
- uint_fast16_t lx,hx, ly, hy=0;
if (!get_bits1(gb)) return 1; // silence
@@ -1162,14 +1326,14 @@ static uint_fast8_t vorbis_floor1_decode(vorbis_context *vc, vorbis_floor_data *
AV_DEBUG("book %d Cbits %d cval %d bits:%d \n", book, cbits, cval, get_bits_count(gb));
cval=cval>>cbits;
- if (book>0) {
+ if (book>-1) {
floor1_Y[offset+j]=get_vlc2(gb, vc->codebooks[book].vlc.table,
vc->codebooks[book].nb_bits, 3);
} else {
floor1_Y[offset+j]=0;
}
- AV_DEBUG(" floor(%d) = %d \n", vf->x_list[offset+j], floor1_Y[offset+j]);
+ AV_DEBUG(" floor(%d) = %d \n", vf->list[offset+j].x, floor1_Y[offset+j]);
}
offset+=cdim;
}
@@ -1186,13 +1350,13 @@ static uint_fast8_t vorbis_floor1_decode(vorbis_context *vc, vorbis_floor_data *
uint_fast16_t high_neigh_offs;
uint_fast16_t low_neigh_offs;
- low_neigh_offs=vf->low_neighbour[i];
- high_neigh_offs=vf->high_neighbour[i];
+ low_neigh_offs=vf->list[i].low;
+ high_neigh_offs=vf->list[i].high;
dy=floor1_Y_final[high_neigh_offs]-floor1_Y_final[low_neigh_offs]; // render_point begin
- adx=vf->x_list[high_neigh_offs]-vf->x_list[low_neigh_offs];
- ady= ABS(dy);
- err=ady*(vf->x_list[i]-vf->x_list[low_neigh_offs]);
- off=err/adx;
+ adx=vf->list[high_neigh_offs].x-vf->list[low_neigh_offs].x;
+ ady= FFABS(dy);
+ err=ady*(vf->list[i].x-vf->list[low_neigh_offs].x);
+ off=(int16_t)err/(int16_t)adx;
if (dy<0) {
predicted=floor1_Y_final[low_neigh_offs]-off;
} else {
@@ -1229,85 +1393,12 @@ static uint_fast8_t vorbis_floor1_decode(vorbis_context *vc, vorbis_floor_data *
floor1_Y_final[i]=predicted;
}
- AV_DEBUG(" Decoded floor(%d) = %d / val %d \n", vf->x_list[i], floor1_Y_final[i], val);
+ AV_DEBUG(" Decoded floor(%d) = %d / val %d \n", vf->list[i].x, floor1_Y_final[i], val);
}
// Curve synth - connect the calculated dots and convert from dB scale FIXME optimize ?
- hx=0;
- lx=0;
- ly=floor1_Y_final[0]*vf->multiplier; // conforms to SPEC
-
- vec[0]=floor1_inverse_db_table[ly];
-
- for(i=1;i<vf->x_list_dim;++i) {
- AV_DEBUG(" Looking at post %d \n", i);
-
- if (floor1_flag[floor_x_sort[i]]) { // SPEC mispelled
- int_fast16_t x, y, dy, base, sy; // if uncommented: dy = -32 adx = 2 base = 2blablabla ?????
-
- hy=floor1_Y_final[floor_x_sort[i]]*vf->multiplier;
- hx=vf->x_list[floor_x_sort[i]];
-
- dy=hy-ly;
- adx=hx-lx;
- ady= (dy<0) ? -dy:dy;//ABS(dy);
- base=dy/adx;
-
- AV_DEBUG(" dy %d adx %d base %d = %d \n", dy, adx, base, dy/adx);
-
- x=lx;
- y=ly;
- err=0;
- if (dy<0) {
- sy=base-1;
- } else {
- sy=base+1;
- }
- ady=ady-(base<0 ? -base : base)*adx;
- vec[x]=floor1_inverse_db_table[y];
-
- AV_DEBUG(" vec[ %d ] = %d \n", x, y);
-
- for(x=lx+1;(x<hx) && (x<vf->x_list[1]);++x) {
- err+=ady;
- if (err>=adx) {
- err-=adx;
- y+=sy;
- } else {
- y+=base;
- }
- vec[x]=floor1_inverse_db_table[y];
-
- AV_DEBUG(" vec[ %d ] = %d \n", x, y);
- }
-
-/* for(j=1;j<hx-lx+1;++j) { // iterating render_point
- dy=hy-ly;
- adx=hx-lx;
- ady= dy<0 ? -dy : dy;
- err=ady*j;
- off=err/adx;
- if (dy<0) {
- predicted=ly-off;
- } else {
- predicted=ly+off;
- }
- if (lx+j < vf->x_list[1]) {
- vec[lx+j]=floor1_inverse_db_table[predicted];
- }
- }*/
-
- lx=hx;
- ly=hy;
- }
- }
-
- if (hx<vf->x_list[1]) {
- for(i=hx;i<vf->x_list[1];++i) {
- vec[i]=floor1_inverse_db_table[hy];
- }
- }
+ ff_vorbis_floor1_render_list(vf->list, vf->x_list_dim, floor1_Y_final, floor1_flag, vf->multiplier, vec, vf->list[1].x);
AV_DEBUG(" Floor decoded\n");
@@ -1347,6 +1438,7 @@ static int vorbis_residue_decode(vorbis_context *vc, vorbis_residue *vr, uint_fa
voffset=vr->begin;
for(partition_count=0;partition_count<ptns_to_read;) { // SPEC error
if (!pass) {
+ uint_fast32_t inverse_class = ff_inverse[vr->classifications];
for(j_times_ptns_to_read=0, j=0;j<ch_used;++j) {
if (!do_not_decode[j]) {
uint_fast32_t temp=get_vlc2(gb, vc->codebooks[vr->classbook].vlc.table,
@@ -1358,7 +1450,7 @@ static int vorbis_residue_decode(vorbis_context *vc, vorbis_residue *vr, uint_fa
for(i=0;i<c_p_c;++i) {
uint_fast32_t temp2;
- temp2=(((uint_fast64_t)temp) * inverse[vr->classifications])>>32;
+ temp2=(((uint_fast64_t)temp) * inverse_class)>>32;
if (partition_count+c_p_c-1-i < ptns_to_read) {
classifs[j_times_ptns_to_read+partition_count+c_p_c-1-i]=temp-temp2*vr->classifications;
}
@@ -1378,15 +1470,17 @@ static int vorbis_residue_decode(vorbis_context *vc, vorbis_residue *vr, uint_fa
if (vqbook>=0) {
uint_fast16_t coffs;
- uint_fast16_t step=vr->partition_size/vc->codebooks[vqbook].dimensions;
+ unsigned dim= vc->codebooks[vqbook].dimensions; // not uint_fast8_t: 64bit is slower here on amd64
+ uint_fast16_t step= dim==1 ? vr->partition_size
+ : FASTDIV(vr->partition_size, dim);
vorbis_codebook codebook= vc->codebooks[vqbook];
if (vr->type==0) {
voffs=voffset+j*vlen;
for(k=0;k<step;++k) {
- coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * codebook.dimensions;
- for(l=0;l<codebook.dimensions;++l) {
+ coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
+ for(l=0;l<dim;++l) {
vec[voffs+k+l*step]+=codebook.codevectors[coffs+l]; // FPMATH
}
}
@@ -1394,20 +1488,27 @@ static int vorbis_residue_decode(vorbis_context *vc, vorbis_residue *vr, uint_fa
else if (vr->type==1) {
voffs=voffset+j*vlen;
for(k=0;k<step;++k) {
- coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * codebook.dimensions;
- for(l=0;l<codebook.dimensions;++l, ++voffs) {
+ coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
+ for(l=0;l<dim;++l, ++voffs) {
vec[voffs]+=codebook.codevectors[coffs+l]; // FPMATH
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d \n", pass, voffs, vec[voffs], codebook.codevectors[coffs+l], coffs);
}
}
}
- else if (vr->type==2 && ch==2 && (voffset&1)==0 && (codebook.dimensions&1)==0) { // most frequent case optimized
+ else if (vr->type==2 && ch==2 && (voffset&1)==0 && (dim&1)==0) { // most frequent case optimized
voffs=voffset>>1;
+ if(dim==2) {
+ for(k=0;k<step;++k) {
+ coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * 2;
+ vec[voffs+k ]+=codebook.codevectors[coffs ]; // FPMATH
+ vec[voffs+k+vlen]+=codebook.codevectors[coffs+1]; // FPMATH
+ }
+ } else
for(k=0;k<step;++k) {
- coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * codebook.dimensions;
- for(l=0;l<codebook.dimensions;l+=2, voffs++) {
+ coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
+ for(l=0;l<dim;l+=2, voffs++) {
vec[voffs ]+=codebook.codevectors[coffs+l ]; // FPMATH
vec[voffs+vlen]+=codebook.codevectors[coffs+l+1]; // FPMATH
@@ -1420,8 +1521,8 @@ static int vorbis_residue_decode(vorbis_context *vc, vorbis_residue *vr, uint_fa
voffs=voffset;
for(k=0;k<step;++k) {
- coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * codebook.dimensions;
- for(l=0;l<codebook.dimensions;++l, ++voffs) {
+ coffs=get_vlc2(gb, codebook.vlc.table, codebook.nb_bits, 3) * dim;
+ for(l=0;l<dim;++l, ++voffs) {
vec[voffs/ch+(voffs%ch)*vlen]+=codebook.codevectors[coffs+l]; // FPMATH FIXME use if and counter instead of / and %
AV_DEBUG(" pass %d offs: %d curr: %f change: %f cv offs.: %d+%d \n", pass, voffset/ch+(voffs%ch)*vlen, vec[voffset/ch+(voffs%ch)*vlen], codebook.codevectors[coffs+l], coffs, l);
@@ -1443,8 +1544,32 @@ static int vorbis_residue_decode(vorbis_context *vc, vorbis_residue *vr, uint_fa
return 0;
}
+void vorbis_inverse_coupling(float *mag, float *ang, int blocksize)
+{
+ int i;
+ for(i=0; i<blocksize; i++)
+ {
+ if (mag[i]>0.0) {
+ if (ang[i]>0.0) {
+ ang[i]=mag[i]-ang[i];
+ } else {
+ float temp=ang[i];
+ ang[i]=mag[i];
+ mag[i]+=temp;
+ }
+ } else {
+ if (ang[i]>0.0) {
+ ang[i]+=mag[i];
+ } else {
+ float temp=ang[i];
+ ang[i]=mag[i];
+ mag[i]-=temp;
+ }
+ }
+ }
+}
+
// Decode the audio packet using the functions above
-#define BIAS 385
static int vorbis_parse_audio_packet(vorbis_context *vc) {
GetBitContext *gb=&vc->gb;
@@ -1462,6 +1587,7 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) {
uint_fast8_t res_num=0;
int_fast16_t retlen=0;
uint_fast16_t saved_start=0;
+ float fadd_bias = vc->add_bias;
if (get_bits1(gb)) {
av_log(vc->avccontext, AV_LOG_ERROR, "Not a Vorbis I audio packet.\n");
@@ -1483,7 +1609,7 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) {
next_window=get_bits1(gb);
}
- blocksize=vc->modes[mode_number].blockflag ? vc->blocksize_1 : vc->blocksize_0;
+ blocksize=vc->blocksize[vc->modes[mode_number].blockflag];
memset(ch_res_ptr, 0, sizeof(float)*vc->audio_channels*blocksize/2); //FIXME can this be removed ?
memset(ch_floor_ptr, 0, sizeof(float)*vc->audio_channels*blocksize/2); //FIXME can this be removed ?
@@ -1541,36 +1667,14 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) {
mag=vc->channel_residues+res_chan[mapping->magnitude[i]]*blocksize/2;
ang=vc->channel_residues+res_chan[mapping->angle[i]]*blocksize/2;
- for(j=0;j<blocksize/2;++j) {
- float temp;
- if (mag[j]>0.0) {
- if (ang[j]>0.0) {
- ang[j]=mag[j]-ang[j];
- } else {
- temp=ang[j];
- ang[j]=mag[j];
- mag[j]+=temp;
- }
- } else {
- if (ang[j]>0.0) {
- ang[j]+=mag[j];
- } else {
- temp=ang[j];
- ang[j]=mag[j];
- mag[j]-=temp;
- }
- }
- }
+ vc->dsp.vorbis_inverse_coupling(mag, ang, blocksize/2);
}
// Dotproduct
for(j=0, ch_floor_ptr=vc->channel_floors;j<vc->audio_channels;++j,ch_floor_ptr+=blocksize/2) {
ch_res_ptr=vc->channel_residues+res_chan[j]*blocksize/2;
-
- for(i=0;i<blocksize/2;++i) {
- ch_floor_ptr[i]*=ch_res_ptr[i]; //FPMATH
- }
+ vc->dsp.vector_fmul(ch_floor_ptr, ch_res_ptr, blocksize/2);
}
// MDCT, overlap/add, save data for next overlapping FPMATH
@@ -1578,10 +1682,10 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) {
for(j=0;j<vc->audio_channels;++j) {
uint_fast8_t step=vc->audio_channels;
uint_fast16_t k;
- float *saved=vc->saved+j*vc->blocksize_1/2;
+ float *saved=vc->saved+j*vc->blocksize[1]/2;
float *ret=vc->ret;
- const float *lwin=vc->lwin;
- const float *swin=vc->swin;
+ const float *lwin=vc->win[1];
+ const float *swin=vc->win[0];
float *buf=vc->buf;
float *buf_tmp=vc->buf_tmp;
@@ -1589,61 +1693,56 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) {
saved_start=vc->saved_start;
- ff_imdct_calc(vc->modes[mode_number].blockflag ? &vc->mdct1 : &vc->mdct0, buf, ch_floor_ptr, buf_tmp);
+ vc->mdct[0].fft.imdct_calc(&vc->mdct[vc->modes[mode_number].blockflag], buf, ch_floor_ptr, buf_tmp);
+ //FIXME process channels together, to allow faster simd vector_fmul_add_add?
if (vc->modes[mode_number].blockflag) {
// -- overlap/add
if (previous_window) {
- for(k=j, i=0;i<vc->blocksize_1/2;++i, k+=step) {
- ret[k]=saved[i]+buf[i]*lwin[i]+BIAS;
- }
- retlen=vc->blocksize_1/2;
+ vc->dsp.vector_fmul_add_add(ret+j, buf, lwin, saved, vc->add_bias, vc->blocksize[1]/2, step);
+ retlen=vc->blocksize[1]/2;
} else {
- buf += (vc->blocksize_1-vc->blocksize_0)/4;
- for(k=j, i=0;i<vc->blocksize_0/2;++i, k+=step) {
- ret[k]=saved[i]+buf[i]*swin[i]+BIAS;
- }
- buf += vc->blocksize_0/2;
- for(i=0;i<(vc->blocksize_1-vc->blocksize_0)/4;++i, k+=step) {
- ret[k]=buf[i]+BIAS;
+ int len = (vc->blocksize[1]-vc->blocksize[0])/4;
+ buf += len;
+ vc->dsp.vector_fmul_add_add(ret+j, buf, swin, saved, vc->add_bias, vc->blocksize[0]/2, step);
+ k = vc->blocksize[0]/2*step + j;
+ buf += vc->blocksize[0]/2;
+ if(vc->exp_bias){
+ for(i=0; i<len; i++, k+=step)
+ ((uint32_t*)ret)[k] = ((uint32_t*)buf)[i] + vc->exp_bias; // ret[k]=buf[i]*(1<<bias)
+ } else {
+ for(i=0; i<len; i++, k+=step)
+ ret[k] = buf[i] + fadd_bias;
}
buf=vc->buf;
- retlen=vc->blocksize_0/2+(vc->blocksize_1-vc->blocksize_0)/4;
+ retlen=vc->blocksize[0]/2+len;
}
// -- save
if (next_window) {
- buf += vc->blocksize_1/2;
- lwin += vc->blocksize_1/2-1;
- for(i=0;i<vc->blocksize_1/2;++i) {
- saved[i]=buf[i]*lwin[-i];
- }
+ buf += vc->blocksize[1]/2;
+ vc->dsp.vector_fmul_reverse(saved, buf, lwin, vc->blocksize[1]/2);
saved_start=0;
} else {
- saved_start=(vc->blocksize_1-vc->blocksize_0)/4;
- buf += vc->blocksize_1/2;
- for(i=0;i<saved_start;++i) {
- saved[i]=buf[i];
- }
- swin += vc->blocksize_0/2-1;
- for(i=0;i<vc->blocksize_0/2;++i) {
- saved[saved_start+i]=buf[saved_start+i]*swin[-i];
- }
+ saved_start=(vc->blocksize[1]-vc->blocksize[0])/4;
+ buf += vc->blocksize[1]/2;
+ for(i=0; i<saved_start; i++)
+ ((uint32_t*)saved)[i] = ((uint32_t*)buf)[i] + vc->exp_bias;
+ vc->dsp.vector_fmul_reverse(saved+saved_start, buf+saved_start, swin, vc->blocksize[0]/2);
}
} else {
// --overlap/add
- for(k=j, i=0;i<saved_start;++i, k+=step) {
- ret[k]=saved[i]+BIAS;
- }
- for(i=0;i<vc->blocksize_0/2;++i, k+=step) {
- ret[k]=saved[saved_start+i]+buf[i]*swin[i]+BIAS;
+ if(vc->add_bias) {
+ for(k=j, i=0;i<saved_start;++i, k+=step)
+ ret[k] = saved[i] + fadd_bias;
+ } else {
+ for(k=j, i=0;i<saved_start;++i, k+=step)
+ ret[k] = saved[i];
}
- retlen=saved_start+vc->blocksize_0/2;
+ vc->dsp.vector_fmul_add_add(ret+k, buf, swin, saved+saved_start, vc->add_bias, vc->blocksize[0]/2, step);
+ retlen=saved_start+vc->blocksize[0]/2;
// -- save
- buf += vc->blocksize_0/2;
- swin += vc->blocksize_0/2-1;
- for(i=0;i<vc->blocksize_0/2;++i) {
- saved[i]=buf[i]*swin[-i];
- }
+ buf += vc->blocksize[0]/2;
+ vc->dsp.vector_fmul_reverse(saved, buf, swin, vc->blocksize[0]/2);
saved_start=0;
}
}
@@ -1661,7 +1760,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
vorbis_context *vc = avccontext->priv_data ;
GetBitContext *gb = &(vc->gb);
- int_fast16_t i, len;
+ int_fast16_t len;
if(!buf_size){
return 0;
@@ -1686,16 +1785,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext,
AV_DEBUG("parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", get_bits_count(gb)/8, get_bits_count(gb)%8, len);
- for(i=0;i<len;++i) {
- int_fast32_t tmp= ((int32_t*)vc->ret)[i];
- if(tmp & 0xf0000){
-// tmp= (0x43c0ffff - tmp)>>31; //ask gcc devs why this is slower
- if(tmp > 0x43c0ffff) tmp= 0xFFFF;
- else tmp= 0;
- }
- ((int16_t*)data)[i]=tmp - 0x8000;
- }
-
+ vc->dsp.float_to_int16(data, vc->ret, len);
*data_size=len*2;
return buf_size ;
diff --git a/contrib/ffmpeg/libavcodec/vorbis.h b/contrib/ffmpeg/libavcodec/vorbis.h
new file mode 100644
index 000000000..cda909aa9
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vorbis.h
@@ -0,0 +1,43 @@
+/*
+ * copyright (c) 2006 Oded Shimon <ods15@ods15.dyndns.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef VORBIS_H
+#define VORBIS_H
+
+#include "avcodec.h"
+
+extern const float ff_vorbis_floor1_inverse_db_table[256];
+extern const float * ff_vorbis_vwin[8];
+
+typedef struct {
+ uint_fast16_t x;
+ uint_fast16_t sort;
+ uint_fast16_t low;
+ uint_fast16_t high;
+} floor1_entry_t;
+
+void ff_vorbis_ready_floor1_list(floor1_entry_t * list, int values);
+unsigned int ff_vorbis_nth_root(unsigned int x, unsigned int n); // x^(1/n)
+int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, uint_fast32_t num);
+void ff_vorbis_floor1_render_list(floor1_entry_t * list, int values, uint_fast16_t * y_list, int * flag, int multiplier, float * out, int samples);
+
+#define ilog(i) av_log2(2*(i))
+
+#endif
diff --git a/src/libffmpeg/libavcodec/vorbis.h b/contrib/ffmpeg/libavcodec/vorbis_data.c
index c818207d9..5dc9c5f00 100644
--- a/src/libffmpeg/libavcodec/vorbis.h
+++ b/contrib/ffmpeg/libavcodec/vorbis_data.c
@@ -1,127 +1,24 @@
-#define ALT_BITSTREAM_READER_LE
-#include "avcodec.h"
-#include "bitstream.h"
-#include "dsputil.h"
-
-typedef struct {
- uint_fast8_t dimensions;
- uint_fast8_t lookup_type;
- uint_fast8_t maxdepth;
- VLC vlc;
- float *codevectors;
- unsigned int nb_bits;
-} vorbis_codebook;
-
-typedef union vorbis_floor_u vorbis_floor_data;
-typedef struct vorbis_floor0_s vorbis_floor0;
-typedef struct vorbis_floor1_s vorbis_floor1;
-struct vorbis_context_s;
-typedef
-uint_fast8_t (* vorbis_floor_decode_func)
- (struct vorbis_context_s *, vorbis_floor_data *, float *);
-typedef struct {
- uint_fast8_t floor_type;
- vorbis_floor_decode_func decode;
- union vorbis_floor_u
- {
- struct vorbis_floor0_s
- {
- uint_fast8_t order;
- uint_fast16_t rate;
- uint_fast16_t bark_map_size;
- int_fast32_t * map[2];
- uint_fast32_t map_size[2];
- uint_fast8_t amplitude_bits;
- uint_fast8_t amplitude_offset;
- uint_fast8_t num_books;
- uint_fast8_t * book_list;
- float * lsp;
- } t0;
- struct vorbis_floor1_s
- {
- uint_fast8_t partitions;
- uint_fast8_t maximum_class;
- uint_fast8_t partition_class[32];
- uint_fast8_t class_dimensions[16];
- uint_fast8_t class_subclasses[16];
- uint_fast8_t class_masterbook[16];
- int_fast16_t subclass_books[16][8];
- uint_fast8_t multiplier;
- uint_fast16_t x_list_dim;
- uint_fast16_t *x_list;
- uint_fast16_t *x_list_order;
- uint_fast16_t *low_neighbour;
- uint_fast16_t *high_neighbour;
- } t1;
- } data;
-} vorbis_floor;
-
-typedef struct {
- uint_fast16_t type;
- uint_fast32_t begin;
- uint_fast32_t end;
- uint_fast32_t partition_size;
- uint_fast8_t classifications;
- uint_fast8_t classbook;
- int_fast16_t books[64][8];
- uint_fast8_t maxpass;
-} vorbis_residue;
-
-typedef struct {
- uint_fast8_t submaps;
- uint_fast16_t coupling_steps;
- uint_fast8_t *magnitude;
- uint_fast8_t *angle;
- uint_fast8_t *mux;
- uint_fast8_t submap_floor[16];
- uint_fast8_t submap_residue[16];
-} vorbis_mapping;
-
-typedef struct {
- uint_fast8_t blockflag;
- uint_fast16_t windowtype;
- uint_fast16_t transformtype;
- uint_fast8_t mapping;
-} vorbis_mode;
-
-typedef struct vorbis_context_s {
- AVCodecContext *avccontext;
- GetBitContext gb;
-
- MDCTContext mdct0;
- MDCTContext mdct1;
- uint_fast8_t first_frame;
- uint_fast32_t version;
- uint_fast8_t audio_channels;
- uint_fast32_t audio_samplerate;
- uint_fast32_t bitrate_maximum;
- uint_fast32_t bitrate_nominal;
- uint_fast32_t bitrate_minimum;
- uint_fast32_t blocksize_0;
- uint_fast32_t blocksize_1;
- const float * swin;
- const float * lwin;
- uint_fast16_t codebook_count;
- vorbis_codebook *codebooks;
- uint_fast8_t floor_count;
- vorbis_floor *floors;
- uint_fast8_t residue_count;
- vorbis_residue *residues;
- uint_fast8_t mapping_count;
- vorbis_mapping *mappings;
- uint_fast8_t mode_count;
- vorbis_mode *modes;
- uint_fast8_t mode_number; // mode number for the current packet
- float *channel_residues;
- float *channel_floors;
- float *saved;
- uint_fast16_t saved_start;
- float *ret;
- float *buf;
- float *buf_tmp;
-} vorbis_context;
-
+/*
+ * copyright (c) 2005 Denes Balatoni ( dbalatoni programozo hu )
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "vorbis.h"
static const float vwin64[32] = {
0.0009460463F, 0.0085006468F, 0.0235352254F, 0.0458950567F,
@@ -2187,7 +2084,7 @@ static const float vwin8192[4096] = {
1.0000000000F, 1.0000000000F, 1.0000000000F, 1.0000000000F,
};
-static const float floor1_inverse_db_table[256]={
+const float ff_vorbis_floor1_inverse_db_table[256]={
1.0649863e-07F, 1.1341951e-07F, 1.2079015e-07F, 1.2863978e-07F,
1.3699951e-07F, 1.4590251e-07F, 1.5538408e-07F, 1.6548181e-07F,
1.7623575e-07F, 1.8768855e-07F, 1.9988561e-07F, 2.128753e-07F,
@@ -2254,3 +2151,5 @@ static const float floor1_inverse_db_table[256]={
0.82788260F, 0.88168307F, 0.9389798F, 1.F,
};
+const float * ff_vorbis_vwin[8] = { vwin64, vwin128, vwin256, vwin512, vwin1024, vwin2048, vwin4096, vwin8192 };
+
diff --git a/contrib/ffmpeg/libavcodec/vorbis_enc.c b/contrib/ffmpeg/libavcodec/vorbis_enc.c
new file mode 100644
index 000000000..636b0cfae
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vorbis_enc.c
@@ -0,0 +1,1087 @@
+/*
+ * copyright (c) 2006 Oded Shimon <ods15@ods15.dyndns.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file vorbis_enc.c
+ * Native Vorbis encoder.
+ * @author Oded Shimon <ods15@ods15.dyndns.org>
+ */
+
+#include <float.h>
+#include "avcodec.h"
+#include "dsputil.h"
+#include "vorbis.h"
+#include "vorbis_enc_data.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+typedef struct {
+ int nentries;
+ uint8_t * lens;
+ uint32_t * codewords;
+ int ndimentions;
+ float min;
+ float delta;
+ int seq_p;
+ int lookup;
+ int * quantlist;
+ float * dimentions;
+ float * pow2;
+} codebook_t;
+
+typedef struct {
+ int dim;
+ int subclass;
+ int masterbook;
+ int * books;
+} floor_class_t;
+
+typedef struct {
+ int partitions;
+ int * partition_to_class;
+ int nclasses;
+ floor_class_t * classes;
+ int multiplier;
+ int rangebits;
+ int values;
+ floor1_entry_t * list;
+} floor_t;
+
+typedef struct {
+ int type;
+ int begin;
+ int end;
+ int partition_size;
+ int classifications;
+ int classbook;
+ int8_t (*books)[8];
+ float (*maxes)[2];
+} residue_t;
+
+typedef struct {
+ int submaps;
+ int * mux;
+ int * floor;
+ int * residue;
+ int coupling_steps;
+ int * magnitude;
+ int * angle;
+} mapping_t;
+
+typedef struct {
+ int blockflag;
+ int mapping;
+} vorbis_mode_t;
+
+typedef struct {
+ int channels;
+ int sample_rate;
+ int log2_blocksize[2];
+ MDCTContext mdct[2];
+ const float * win[2];
+ int have_saved;
+ float * saved;
+ float * samples;
+ float * floor; // also used for tmp values for mdct
+ float * coeffs; // also used for residue after floor
+ float quality;
+
+ int ncodebooks;
+ codebook_t * codebooks;
+
+ int nfloors;
+ floor_t * floors;
+
+ int nresidues;
+ residue_t * residues;
+
+ int nmappings;
+ mapping_t * mappings;
+
+ int nmodes;
+ vorbis_mode_t * modes;
+} venc_context_t;
+
+typedef struct {
+ int total;
+ int total_pos;
+ int pos;
+ uint8_t * buf_ptr;
+} PutBitContext;
+
+static inline void init_put_bits(PutBitContext * pb, uint8_t * buf, int buffer_len) {
+ pb->total = buffer_len * 8;
+ pb->total_pos = 0;
+ pb->pos = 0;
+ pb->buf_ptr = buf;
+}
+
+static void put_bits(PutBitContext * pb, int bits, uint64_t val) {
+ if ((pb->total_pos += bits) >= pb->total) return;
+ if (!bits) return;
+ if (pb->pos) {
+ if (pb->pos > bits) {
+ *pb->buf_ptr |= val << (8 - pb->pos);
+ pb->pos -= bits;
+ bits = 0;
+ } else {
+ *pb->buf_ptr++ |= (val << (8 - pb->pos)) & 0xFF;
+ val >>= pb->pos;
+ bits -= pb->pos;
+ pb->pos = 0;
+ }
+ }
+ for (; bits >= 8; bits -= 8) {
+ *pb->buf_ptr++ = val & 0xFF;
+ val >>= 8;
+ }
+ if (bits) {
+ *pb->buf_ptr = val;
+ pb->pos = 8 - bits;
+ }
+}
+
+static inline void flush_put_bits(PutBitContext * pb) {
+}
+
+static inline int put_bits_count(PutBitContext * pb) {
+ return pb->total_pos;
+}
+
+static inline void put_codeword(PutBitContext * pb, codebook_t * cb, int entry) {
+ assert(entry >= 0);
+ assert(entry < cb->nentries);
+ assert(cb->lens[entry]);
+ put_bits(pb, cb->lens[entry], cb->codewords[entry]);
+}
+
+static int cb_lookup_vals(int lookup, int dimentions, int entries) {
+ if (lookup == 1) return ff_vorbis_nth_root(entries, dimentions);
+ else if (lookup == 2) return dimentions * entries;
+ return 0;
+}
+
+static void ready_codebook(codebook_t * cb) {
+ int i;
+
+ ff_vorbis_len2vlc(cb->lens, cb->codewords, cb->nentries);
+
+ if (!cb->lookup)
+ cb->pow2 = cb->dimentions = NULL;
+ else {
+ int vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries);
+ cb->dimentions = av_malloc(sizeof(float) * cb->nentries * cb->ndimentions);
+ cb->pow2 = av_mallocz(sizeof(float) * cb->nentries);
+ for (i = 0; i < cb->nentries; i++) {
+ float last = 0;
+ int j;
+ int div = 1;
+ for (j = 0; j < cb->ndimentions; j++) {
+ int off;
+ if (cb->lookup == 1)
+ off = (i / div) % vals; // lookup type 1
+ else
+ off = i * cb->ndimentions + j; // lookup type 2
+
+ cb->dimentions[i * cb->ndimentions + j] = last + cb->min + cb->quantlist[off] * cb->delta;
+ if (cb->seq_p)
+ last = cb->dimentions[i * cb->ndimentions + j];
+ cb->pow2[i] += cb->dimentions[i * cb->ndimentions + j]*cb->dimentions[i * cb->ndimentions + j];
+ div *= vals;
+ }
+ cb->pow2[i] /= 2.;
+ }
+ }
+}
+
+static void ready_residue(residue_t * rc, venc_context_t * venc) {
+ int i;
+ assert(rc->type == 2);
+ rc->maxes = av_mallocz(sizeof(float[2]) * rc->classifications);
+ for (i = 0; i < rc->classifications; i++) {
+ int j;
+ codebook_t * cb;
+ for (j = 0; j < 8; j++)
+ if (rc->books[i][j] != -1) break;
+ if (j == 8) continue; // zero
+ cb = &venc->codebooks[rc->books[i][j]];
+ assert(cb->ndimentions >= 2);
+ assert(cb->lookup);
+
+ for (j = 0; j < cb->nentries; j++) {
+ float a;
+ if (!cb->lens[j]) continue;
+ a = fabs(cb->dimentions[j * cb->ndimentions]);
+ if (a > rc->maxes[i][0])
+ rc->maxes[i][0] = a;
+ a = fabs(cb->dimentions[j * cb->ndimentions + 1]);
+ if (a > rc->maxes[i][1])
+ rc->maxes[i][1] = a;
+ }
+ }
+ // small bias
+ for (i = 0; i < rc->classifications; i++) {
+ rc->maxes[i][0] += 0.8;
+ rc->maxes[i][1] += 0.8;
+ }
+}
+
+static void create_vorbis_context(venc_context_t * venc, AVCodecContext * avccontext) {
+ floor_t * fc;
+ residue_t * rc;
+ mapping_t * mc;
+ int i, book;
+
+ venc->channels = avccontext->channels;
+ venc->sample_rate = avccontext->sample_rate;
+ venc->log2_blocksize[0] = venc->log2_blocksize[1] = 11;
+
+ venc->ncodebooks = sizeof(cvectors)/sizeof(cvectors[0]);
+ venc->codebooks = av_malloc(sizeof(codebook_t) * venc->ncodebooks);
+
+ // codebook 0..14 - floor1 book, values 0..255
+ // codebook 15 residue masterbook
+ // codebook 16..29 residue
+ for (book = 0; book < venc->ncodebooks; book++) {
+ codebook_t * cb = &venc->codebooks[book];
+ int vals;
+ cb->ndimentions = cvectors[book].dim;
+ cb->nentries = cvectors[book].real_len;
+ cb->min = cvectors[book].min;
+ cb->delta = cvectors[book].delta;
+ cb->lookup = cvectors[book].lookup;
+ cb->seq_p = 0;
+
+ cb->lens = av_malloc(sizeof(uint8_t) * cb->nentries);
+ cb->codewords = av_malloc(sizeof(uint32_t) * cb->nentries);
+ memcpy(cb->lens, cvectors[book].clens, cvectors[book].len);
+ memset(cb->lens + cvectors[book].len, 0, cb->nentries - cvectors[book].len);
+
+ if (cb->lookup) {
+ vals = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries);
+ cb->quantlist = av_malloc(sizeof(int) * vals);
+ for (i = 0; i < vals; i++)
+ cb->quantlist[i] = cvectors[book].quant[i];
+ } else {
+ cb->quantlist = NULL;
+ }
+ ready_codebook(cb);
+ }
+
+ venc->nfloors = 1;
+ venc->floors = av_malloc(sizeof(floor_t) * venc->nfloors);
+
+ // just 1 floor
+ fc = &venc->floors[0];
+ fc->partitions = 8;
+ fc->partition_to_class = av_malloc(sizeof(int) * fc->partitions);
+ fc->nclasses = 0;
+ for (i = 0; i < fc->partitions; i++) {
+ static const int a[] = {0,1,2,2,3,3,4,4};
+ fc->partition_to_class[i] = a[i];
+ fc->nclasses = FFMAX(fc->nclasses, fc->partition_to_class[i]);
+ }
+ fc->nclasses++;
+ fc->classes = av_malloc(sizeof(floor_class_t) * fc->nclasses);
+ for (i = 0; i < fc->nclasses; i++) {
+ floor_class_t * c = &fc->classes[i];
+ int j, books;
+ c->dim = floor_classes[i].dim;
+ c->subclass = floor_classes[i].subclass;
+ c->masterbook = floor_classes[i].masterbook;
+ books = (1 << c->subclass);
+ c->books = av_malloc(sizeof(int) * books);
+ for (j = 0; j < books; j++)
+ c->books[j] = floor_classes[i].nbooks[j];
+ }
+ fc->multiplier = 2;
+ fc->rangebits = venc->log2_blocksize[0] - 1;
+
+ fc->values = 2;
+ for (i = 0; i < fc->partitions; i++)
+ fc->values += fc->classes[fc->partition_to_class[i]].dim;
+
+ fc->list = av_malloc(sizeof(floor1_entry_t) * fc->values);
+ fc->list[0].x = 0;
+ fc->list[1].x = 1 << fc->rangebits;
+ for (i = 2; i < fc->values; i++) {
+ static const int a[] = {
+ 93, 23,372, 6, 46,186,750, 14, 33, 65,
+ 130,260,556, 3, 10, 18, 28, 39, 55, 79,
+ 111,158,220,312,464,650,850
+ };
+ fc->list[i].x = a[i - 2];
+ }
+ ff_vorbis_ready_floor1_list(fc->list, fc->values);
+
+ venc->nresidues = 1;
+ venc->residues = av_malloc(sizeof(residue_t) * venc->nresidues);
+
+ // single residue
+ rc = &venc->residues[0];
+ rc->type = 2;
+ rc->begin = 0;
+ rc->end = 1600;
+ rc->partition_size = 32;
+ rc->classifications = 10;
+ rc->classbook = 15;
+ rc->books = av_malloc(sizeof(*rc->books) * rc->classifications);
+ {
+ static const int8_t a[10][8] = {
+ { -1, -1, -1, -1, -1, -1, -1, -1, },
+ { -1, -1, 16, -1, -1, -1, -1, -1, },
+ { -1, -1, 17, -1, -1, -1, -1, -1, },
+ { -1, -1, 18, -1, -1, -1, -1, -1, },
+ { -1, -1, 19, -1, -1, -1, -1, -1, },
+ { -1, -1, 20, -1, -1, -1, -1, -1, },
+ { -1, -1, 21, -1, -1, -1, -1, -1, },
+ { 22, 23, -1, -1, -1, -1, -1, -1, },
+ { 24, 25, -1, -1, -1, -1, -1, -1, },
+ { 26, 27, 28, -1, -1, -1, -1, -1, },
+ };
+ memcpy(rc->books, a, sizeof a);
+ }
+ ready_residue(rc, venc);
+
+ venc->nmappings = 1;
+ venc->mappings = av_malloc(sizeof(mapping_t) * venc->nmappings);
+
+ // single mapping
+ mc = &venc->mappings[0];
+ mc->submaps = 1;
+ mc->mux = av_malloc(sizeof(int) * venc->channels);
+ for (i = 0; i < venc->channels; i++)
+ mc->mux[i] = 0;
+ mc->floor = av_malloc(sizeof(int) * mc->submaps);
+ mc->residue = av_malloc(sizeof(int) * mc->submaps);
+ for (i = 0; i < mc->submaps; i++) {
+ mc->floor[i] = 0;
+ mc->residue[i] = 0;
+ }
+ mc->coupling_steps = venc->channels == 2 ? 1 : 0;
+ mc->magnitude = av_malloc(sizeof(int) * mc->coupling_steps);
+ mc->angle = av_malloc(sizeof(int) * mc->coupling_steps);
+ if (mc->coupling_steps) {
+ mc->magnitude[0] = 0;
+ mc->angle[0] = 1;
+ }
+
+ venc->nmodes = 1;
+ venc->modes = av_malloc(sizeof(vorbis_mode_t) * venc->nmodes);
+
+ // single mode
+ venc->modes[0].blockflag = 0;
+ venc->modes[0].mapping = 0;
+
+ venc->have_saved = 0;
+ venc->saved = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2);
+ venc->samples = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]));
+ venc->floor = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2);
+ venc->coeffs = av_malloc(sizeof(float) * venc->channels * (1 << venc->log2_blocksize[1]) / 2);
+
+ venc->win[0] = ff_vorbis_vwin[venc->log2_blocksize[0] - 6];
+ venc->win[1] = ff_vorbis_vwin[venc->log2_blocksize[1] - 6];
+
+ ff_mdct_init(&venc->mdct[0], venc->log2_blocksize[0], 0);
+ ff_mdct_init(&venc->mdct[1], venc->log2_blocksize[1], 0);
+}
+
+static void put_float(PutBitContext * pb, float f) {
+ int exp, mant;
+ uint32_t res = 0;
+ mant = (int)ldexp(frexp(f, &exp), 20);
+ exp += 788 - 20;
+ if (mant < 0) { res |= (1 << 31); mant = -mant; }
+ res |= mant | (exp << 21);
+ put_bits(pb, 32, res);
+}
+
+static void put_codebook_header(PutBitContext * pb, codebook_t * cb) {
+ int i;
+ int ordered = 0;
+
+ put_bits(pb, 24, 0x564342); //magic
+ put_bits(pb, 16, cb->ndimentions);
+ put_bits(pb, 24, cb->nentries);
+
+ for (i = 1; i < cb->nentries; i++)
+ if (cb->lens[i] < cb->lens[i-1]) break;
+ if (i == cb->nentries)
+ ordered = 1;
+
+ put_bits(pb, 1, ordered);
+ if (ordered) {
+ int len = cb->lens[0];
+ put_bits(pb, 5, len - 1);
+ i = 0;
+ while (i < cb->nentries) {
+ int j;
+ for (j = 0; j+i < cb->nentries; j++)
+ if (cb->lens[j+i] != len) break;
+ put_bits(pb, ilog(cb->nentries - i), j);
+ i += j;
+ len++;
+ }
+ } else {
+ int sparse = 0;
+ for (i = 0; i < cb->nentries; i++)
+ if (!cb->lens[i]) break;
+ if (i != cb->nentries)
+ sparse = 1;
+ put_bits(pb, 1, sparse);
+
+ for (i = 0; i < cb->nentries; i++) {
+ if (sparse) put_bits(pb, 1, !!cb->lens[i]);
+ if (cb->lens[i]) put_bits(pb, 5, cb->lens[i] - 1);
+ }
+ }
+
+ put_bits(pb, 4, cb->lookup);
+ if (cb->lookup) {
+ int tmp = cb_lookup_vals(cb->lookup, cb->ndimentions, cb->nentries);
+ int bits = ilog(cb->quantlist[0]);
+
+ for (i = 1; i < tmp; i++)
+ bits = FFMAX(bits, ilog(cb->quantlist[i]));
+
+ put_float(pb, cb->min);
+ put_float(pb, cb->delta);
+
+ put_bits(pb, 4, bits - 1);
+ put_bits(pb, 1, cb->seq_p);
+
+ for (i = 0; i < tmp; i++)
+ put_bits(pb, bits, cb->quantlist[i]);
+ }
+}
+
+static void put_floor_header(PutBitContext * pb, floor_t * fc) {
+ int i;
+
+ put_bits(pb, 16, 1); // type, only floor1 is supported
+
+ put_bits(pb, 5, fc->partitions);
+
+ for (i = 0; i < fc->partitions; i++)
+ put_bits(pb, 4, fc->partition_to_class[i]);
+
+ for (i = 0; i < fc->nclasses; i++) {
+ int j, books;
+
+ put_bits(pb, 3, fc->classes[i].dim - 1);
+ put_bits(pb, 2, fc->classes[i].subclass);
+
+ if (fc->classes[i].subclass)
+ put_bits(pb, 8, fc->classes[i].masterbook);
+
+ books = (1 << fc->classes[i].subclass);
+
+ for (j = 0; j < books; j++)
+ put_bits(pb, 8, fc->classes[i].books[j] + 1);
+ }
+
+ put_bits(pb, 2, fc->multiplier - 1);
+ put_bits(pb, 4, fc->rangebits);
+
+ for (i = 2; i < fc->values; i++)
+ put_bits(pb, fc->rangebits, fc->list[i].x);
+}
+
+static void put_residue_header(PutBitContext * pb, residue_t * rc) {
+ int i;
+
+ put_bits(pb, 16, rc->type);
+
+ put_bits(pb, 24, rc->begin);
+ put_bits(pb, 24, rc->end);
+ put_bits(pb, 24, rc->partition_size - 1);
+ put_bits(pb, 6, rc->classifications - 1);
+ put_bits(pb, 8, rc->classbook);
+
+ for (i = 0; i < rc->classifications; i++) {
+ int j, tmp = 0;
+ for (j = 0; j < 8; j++)
+ tmp |= (rc->books[i][j] != -1) << j;
+
+ put_bits(pb, 3, tmp & 7);
+ put_bits(pb, 1, tmp > 7);
+
+ if (tmp > 7)
+ put_bits(pb, 5, tmp >> 3);
+ }
+
+ for (i = 0; i < rc->classifications; i++) {
+ int j;
+ for (j = 0; j < 8; j++)
+ if (rc->books[i][j] != -1)
+ put_bits(pb, 8, rc->books[i][j]);
+ }
+}
+
+static int put_main_header(venc_context_t * venc, uint8_t ** out) {
+ int i;
+ PutBitContext pb;
+ uint8_t buffer[50000] = {0}, * p = buffer;
+ int buffer_len = sizeof buffer;
+ int len, hlens[3];
+
+ // identification header
+ init_put_bits(&pb, p, buffer_len);
+ put_bits(&pb, 8, 1); //magic
+ for (i = 0; "vorbis"[i]; i++)
+ put_bits(&pb, 8, "vorbis"[i]);
+ put_bits(&pb, 32, 0); // version
+ put_bits(&pb, 8, venc->channels);
+ put_bits(&pb, 32, venc->sample_rate);
+ put_bits(&pb, 32, 0); // bitrate
+ put_bits(&pb, 32, 0); // bitrate
+ put_bits(&pb, 32, 0); // bitrate
+ put_bits(&pb, 4, venc->log2_blocksize[0]);
+ put_bits(&pb, 4, venc->log2_blocksize[1]);
+ put_bits(&pb, 1, 1); // framing
+
+ flush_put_bits(&pb);
+ hlens[0] = (put_bits_count(&pb) + 7) / 8;
+ buffer_len -= hlens[0];
+ p += hlens[0];
+
+ // comment header
+ init_put_bits(&pb, p, buffer_len);
+ put_bits(&pb, 8, 3); //magic
+ for (i = 0; "vorbis"[i]; i++)
+ put_bits(&pb, 8, "vorbis"[i]);
+ put_bits(&pb, 32, 0); // vendor length TODO
+ put_bits(&pb, 32, 0); // amount of comments
+ put_bits(&pb, 1, 1); // framing
+
+ flush_put_bits(&pb);
+ hlens[1] = (put_bits_count(&pb) + 7) / 8;
+ buffer_len -= hlens[1];
+ p += hlens[1];
+
+ // setup header
+ init_put_bits(&pb, p, buffer_len);
+ put_bits(&pb, 8, 5); //magic
+ for (i = 0; "vorbis"[i]; i++)
+ put_bits(&pb, 8, "vorbis"[i]);
+
+ // codebooks
+ put_bits(&pb, 8, venc->ncodebooks - 1);
+ for (i = 0; i < venc->ncodebooks; i++)
+ put_codebook_header(&pb, &venc->codebooks[i]);
+
+ // time domain, reserved, zero
+ put_bits(&pb, 6, 0);
+ put_bits(&pb, 16, 0);
+
+ // floors
+ put_bits(&pb, 6, venc->nfloors - 1);
+ for (i = 0; i < venc->nfloors; i++)
+ put_floor_header(&pb, &venc->floors[i]);
+
+ // residues
+ put_bits(&pb, 6, venc->nresidues - 1);
+ for (i = 0; i < venc->nresidues; i++)
+ put_residue_header(&pb, &venc->residues[i]);
+
+ // mappings
+ put_bits(&pb, 6, venc->nmappings - 1);
+ for (i = 0; i < venc->nmappings; i++) {
+ mapping_t * mc = &venc->mappings[i];
+ int j;
+ put_bits(&pb, 16, 0); // mapping type
+
+ put_bits(&pb, 1, mc->submaps > 1);
+ if (mc->submaps > 1)
+ put_bits(&pb, 4, mc->submaps - 1);
+
+ put_bits(&pb, 1, !!mc->coupling_steps);
+ if (mc->coupling_steps) {
+ put_bits(&pb, 8, mc->coupling_steps - 1);
+ for (j = 0; j < mc->coupling_steps; j++) {
+ put_bits(&pb, ilog(venc->channels - 1), mc->magnitude[j]);
+ put_bits(&pb, ilog(venc->channels - 1), mc->angle[j]);
+ }
+ }
+
+ put_bits(&pb, 2, 0); // reserved
+
+ if (mc->submaps > 1)
+ for (j = 0; j < venc->channels; j++)
+ put_bits(&pb, 4, mc->mux[j]);
+
+ for (j = 0; j < mc->submaps; j++) {
+ put_bits(&pb, 8, 0); // reserved time configuration
+ put_bits(&pb, 8, mc->floor[j]);
+ put_bits(&pb, 8, mc->residue[j]);
+ }
+ }
+
+ // modes
+ put_bits(&pb, 6, venc->nmodes - 1);
+ for (i = 0; i < venc->nmodes; i++) {
+ put_bits(&pb, 1, venc->modes[i].blockflag);
+ put_bits(&pb, 16, 0); // reserved window type
+ put_bits(&pb, 16, 0); // reserved transform type
+ put_bits(&pb, 8, venc->modes[i].mapping);
+ }
+
+ put_bits(&pb, 1, 1); // framing
+
+ flush_put_bits(&pb);
+ hlens[2] = (put_bits_count(&pb) + 7) / 8;
+
+ len = hlens[0] + hlens[1] + hlens[2];
+ p = *out = av_mallocz(64 + len + len/255);
+
+ *p++ = 2;
+ p += av_xiphlacing(p, hlens[0]);
+ p += av_xiphlacing(p, hlens[1]);
+ buffer_len = 0;
+ for (i = 0; i < 3; i++) {
+ memcpy(p, buffer + buffer_len, hlens[i]);
+ p += hlens[i];
+ buffer_len += hlens[i];
+ }
+
+ return p - *out;
+}
+
+static float get_floor_average(floor_t * fc, float * coeffs, int i) {
+ int begin = fc->list[fc->list[FFMAX(i-1, 0)].sort].x;
+ int end = fc->list[fc->list[FFMIN(i+1, fc->values - 1)].sort].x;
+ int j;
+ float average = 0;
+
+ for (j = begin; j < end; j++)
+ average += fabs(coeffs[j]);
+ return average / (end - begin);
+}
+
+static void floor_fit(venc_context_t * venc, floor_t * fc, float * coeffs, uint_fast16_t * posts, int samples) {
+ int range = 255 / fc->multiplier + 1;
+ int i;
+ float tot_average = 0.;
+ float averages[fc->values];
+ for (i = 0; i < fc->values; i++){
+ averages[i] = get_floor_average(fc, coeffs, i);
+ tot_average += averages[i];
+ }
+ tot_average /= fc->values;
+ tot_average /= venc->quality;
+
+ for (i = 0; i < fc->values; i++) {
+ int position = fc->list[fc->list[i].sort].x;
+ float average = averages[i];
+ int j;
+
+ average *= pow(tot_average / average, 0.5) * pow(1.25, position/200.); // MAGIC!
+ for (j = 0; j < range - 1; j++)
+ if (ff_vorbis_floor1_inverse_db_table[j * fc->multiplier] > average) break;
+ posts[fc->list[i].sort] = j;
+ }
+}
+
+static int render_point(int x0, int y0, int x1, int y1, int x) {
+ return y0 + (x - x0) * (y1 - y0) / (x1 - x0);
+}
+
+static void floor_encode(venc_context_t * venc, floor_t * fc, PutBitContext * pb, uint_fast16_t * posts, float * floor, int samples) {
+ int range = 255 / fc->multiplier + 1;
+ int coded[fc->values]; // first 2 values are unused
+ int i, counter;
+
+ put_bits(pb, 1, 1); // non zero
+ put_bits(pb, ilog(range - 1), posts[0]);
+ put_bits(pb, ilog(range - 1), posts[1]);
+ coded[0] = coded[1] = 1;
+
+ for (i = 2; i < fc->values; i++) {
+ int predicted = render_point(fc->list[fc->list[i].low].x,
+ posts[fc->list[i].low],
+ fc->list[fc->list[i].high].x,
+ posts[fc->list[i].high],
+ fc->list[i].x);
+ int highroom = range - predicted;
+ int lowroom = predicted;
+ int room = FFMIN(highroom, lowroom);
+ if (predicted == posts[i]) {
+ coded[i] = 0; // must be used later as flag!
+ continue;
+ } else {
+ if (!coded[fc->list[i].low ]) coded[fc->list[i].low ] = -1;
+ if (!coded[fc->list[i].high]) coded[fc->list[i].high] = -1;
+ }
+ if (posts[i] > predicted) {
+ if (posts[i] - predicted > room)
+ coded[i] = posts[i] - predicted + lowroom;
+ else
+ coded[i] = (posts[i] - predicted) << 1;
+ } else {
+ if (predicted - posts[i] > room)
+ coded[i] = predicted - posts[i] + highroom - 1;
+ else
+ coded[i] = ((predicted - posts[i]) << 1) - 1;
+ }
+ }
+
+ counter = 2;
+ for (i = 0; i < fc->partitions; i++) {
+ floor_class_t * c = &fc->classes[fc->partition_to_class[i]];
+ int k, cval = 0, csub = 1<<c->subclass;
+ if (c->subclass) {
+ codebook_t * book = &venc->codebooks[c->masterbook];
+ int cshift = 0;
+ for (k = 0; k < c->dim; k++) {
+ int l;
+ for (l = 0; l < csub; l++) {
+ int maxval = 1;
+ if (c->books[l] != -1)
+ maxval = venc->codebooks[c->books[l]].nentries;
+ // coded could be -1, but this still works, cause thats 0
+ if (coded[counter + k] < maxval) break;
+ }
+ assert(l != csub);
+ cval |= l << cshift;
+ cshift += c->subclass;
+ }
+ put_codeword(pb, book, cval);
+ }
+ for (k = 0; k < c->dim; k++) {
+ int book = c->books[cval & (csub-1)];
+ int entry = coded[counter++];
+ cval >>= c->subclass;
+ if (book == -1) continue;
+ if (entry == -1) entry = 0;
+ put_codeword(pb, &venc->codebooks[book], entry);
+ }
+ }
+
+ ff_vorbis_floor1_render_list(fc->list, fc->values, posts, coded, fc->multiplier, floor, samples);
+}
+
+static float * put_vector(codebook_t * book, PutBitContext * pb, float * num) {
+ int i, entry = -1;
+ float distance = FLT_MAX;
+ assert(book->dimentions);
+ for (i = 0; i < book->nentries; i++) {
+ float * vec = book->dimentions + i * book->ndimentions, d = book->pow2[i];
+ int j;
+ if (!book->lens[i]) continue;
+ for (j = 0; j < book->ndimentions; j++)
+ d -= vec[j] * num[j];
+ if (distance > d) {
+ entry = i;
+ distance = d;
+ }
+ }
+ put_codeword(pb, book, entry);
+ return &book->dimentions[entry * book->ndimentions];
+}
+
+static void residue_encode(venc_context_t * venc, residue_t * rc, PutBitContext * pb, float * coeffs, int samples, int real_ch) {
+ int pass, i, j, p, k;
+ int psize = rc->partition_size;
+ int partitions = (rc->end - rc->begin) / psize;
+ int channels = (rc->type == 2) ? 1 : real_ch;
+ int classes[channels][partitions];
+ int classwords = venc->codebooks[rc->classbook].ndimentions;
+
+ assert(rc->type == 2);
+ assert(real_ch == 2);
+ for (p = 0; p < partitions; p++) {
+ float max1 = 0., max2 = 0.;
+ int s = rc->begin + p * psize;
+ for (k = s; k < s + psize; k += 2) {
+ max1 = FFMAX(max1, fabs(coeffs[ k / real_ch]));
+ max2 = FFMAX(max2, fabs(coeffs[samples + k / real_ch]));
+ }
+
+ for (i = 0; i < rc->classifications - 1; i++) {
+ if (max1 < rc->maxes[i][0] && max2 < rc->maxes[i][1]) break;
+ }
+ classes[0][p] = i;
+ }
+
+ for (pass = 0; pass < 8; pass++) {
+ p = 0;
+ while (p < partitions) {
+ if (pass == 0)
+ for (j = 0; j < channels; j++) {
+ codebook_t * book = &venc->codebooks[rc->classbook];
+ int entry = 0;
+ for (i = 0; i < classwords; i++) {
+ entry *= rc->classifications;
+ entry += classes[j][p + i];
+ }
+ put_codeword(pb, book, entry);
+ }
+ for (i = 0; i < classwords && p < partitions; i++, p++) {
+ for (j = 0; j < channels; j++) {
+ int nbook = rc->books[classes[j][p]][pass];
+ codebook_t * book = &venc->codebooks[nbook];
+ float * buf = coeffs + samples*j + rc->begin + p*psize;
+ if (nbook == -1) continue;
+
+ assert(rc->type == 0 || rc->type == 2);
+ assert(!(psize % book->ndimentions));
+
+ if (rc->type == 0) {
+ for (k = 0; k < psize; k += book->ndimentions) {
+ float * a = put_vector(book, pb, &buf[k]);
+ int l;
+ for (l = 0; l < book->ndimentions; l++)
+ buf[k + l] -= a[l];
+ }
+ } else {
+ int s = rc->begin + p * psize, a1, b1;
+ a1 = (s % real_ch) * samples;
+ b1 = s / real_ch;
+ s = real_ch * samples;
+ for (k = 0; k < psize; k += book->ndimentions) {
+ int dim, a2 = a1, b2 = b1;
+ float vec[book->ndimentions], * pv = vec;
+ for (dim = book->ndimentions; dim--; ) {
+ *pv++ = coeffs[a2 + b2];
+ if ((a2 += samples) == s) {
+ a2=0;
+ b2++;
+ }
+ }
+ pv = put_vector(book, pb, vec);
+ for (dim = book->ndimentions; dim--; ) {
+ coeffs[a1 + b1] -= *pv++;
+ if ((a1 += samples) == s) {
+ a1=0;
+ b1++;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static int apply_window_and_mdct(venc_context_t * venc, signed short * audio, int samples) {
+ int i, j, channel;
+ const float * win = venc->win[0];
+ int window_len = 1 << (venc->log2_blocksize[0] - 1);
+ float n = (float)(1 << venc->log2_blocksize[0]) / 4.;
+ // FIXME use dsp
+
+ if (!venc->have_saved && !samples) return 0;
+
+ if (venc->have_saved) {
+ for (channel = 0; channel < venc->channels; channel++) {
+ memcpy(venc->samples + channel*window_len*2, venc->saved + channel*window_len, sizeof(float)*window_len);
+ }
+ } else {
+ for (channel = 0; channel < venc->channels; channel++) {
+ memset(venc->samples + channel*window_len*2, 0, sizeof(float)*window_len);
+ }
+ }
+
+ if (samples) {
+ for (channel = 0; channel < venc->channels; channel++) {
+ float * offset = venc->samples + channel*window_len*2 + window_len;
+ j = channel;
+ for (i = 0; i < samples; i++, j += venc->channels)
+ offset[i] = audio[j] / 32768. / n * win[window_len - i - 1];
+ }
+ } else {
+ for (channel = 0; channel < venc->channels; channel++) {
+ memset(venc->samples + channel*window_len*2 + window_len, 0, sizeof(float)*window_len);
+ }
+ }
+
+ for (channel = 0; channel < venc->channels; channel++) {
+ ff_mdct_calc(&venc->mdct[0], venc->coeffs + channel*window_len, venc->samples + channel*window_len*2, venc->floor/*tmp*/);
+ }
+
+ if (samples) {
+ for (channel = 0; channel < venc->channels; channel++) {
+ float * offset = venc->saved + channel*window_len;
+ j = channel;
+ for (i = 0; i < samples; i++, j += venc->channels)
+ offset[i] = audio[j] / 32768. / n * win[i];
+ }
+ venc->have_saved = 1;
+ } else {
+ venc->have_saved = 0;
+ }
+ return 1;
+}
+
+static int vorbis_encode_init(AVCodecContext * avccontext)
+{
+ venc_context_t * venc = avccontext->priv_data;
+
+ if (avccontext->channels != 2) {
+ av_log(avccontext, AV_LOG_ERROR, "Current FFmpeg Vorbis encoder only supports 2 channels.\n");
+ return -1;
+ }
+
+ create_vorbis_context(venc, avccontext);
+
+ if (avccontext->flags & CODEC_FLAG_QSCALE)
+ venc->quality = avccontext->global_quality / (float)FF_QP2LAMBDA / 10.;
+ else
+ venc->quality = 1.;
+ venc->quality *= venc->quality;
+
+ avccontext->extradata_size = put_main_header(venc, (uint8_t**)&avccontext->extradata);
+
+ avccontext->frame_size = 1 << (venc->log2_blocksize[0] - 1);
+
+ avccontext->coded_frame = avcodec_alloc_frame();
+ avccontext->coded_frame->key_frame = 1;
+
+ return 0;
+}
+
+static int vorbis_encode_frame(AVCodecContext * avccontext, unsigned char * packets, int buf_size, void *data)
+{
+ venc_context_t * venc = avccontext->priv_data;
+ signed short * audio = data;
+ int samples = data ? avccontext->frame_size : 0;
+ vorbis_mode_t * mode;
+ mapping_t * mapping;
+ PutBitContext pb;
+ int i;
+
+ if (!apply_window_and_mdct(venc, audio, samples)) return 0;
+ samples = 1 << (venc->log2_blocksize[0] - 1);
+
+ init_put_bits(&pb, packets, buf_size);
+
+ put_bits(&pb, 1, 0); // magic bit
+
+ put_bits(&pb, ilog(venc->nmodes - 1), 0); // 0 bits, the mode
+
+ mode = &venc->modes[0];
+ mapping = &venc->mappings[mode->mapping];
+ if (mode->blockflag) {
+ put_bits(&pb, 1, 0);
+ put_bits(&pb, 1, 0);
+ }
+
+ for (i = 0; i < venc->channels; i++) {
+ floor_t * fc = &venc->floors[mapping->floor[mapping->mux[i]]];
+ uint_fast16_t posts[fc->values];
+ floor_fit(venc, fc, &venc->coeffs[i * samples], posts, samples);
+ floor_encode(venc, fc, &pb, posts, &venc->floor[i * samples], samples);
+ }
+
+ for (i = 0; i < venc->channels * samples; i++) {
+ venc->coeffs[i] /= venc->floor[i];
+ }
+
+ for (i = 0; i < mapping->coupling_steps; i++) {
+ float * mag = venc->coeffs + mapping->magnitude[i] * samples;
+ float * ang = venc->coeffs + mapping->angle[i] * samples;
+ int j;
+ for (j = 0; j < samples; j++) {
+ float a = ang[j];
+ ang[j] -= mag[j];
+ if (mag[j] > 0) ang[j] = -ang[j];
+ if (ang[j] < 0) mag[j] = a;
+ }
+ }
+
+ residue_encode(venc, &venc->residues[mapping->residue[mapping->mux[0]]], &pb, venc->coeffs, samples, venc->channels);
+
+ flush_put_bits(&pb);
+ return (put_bits_count(&pb) + 7) / 8;
+}
+
+
+static int vorbis_encode_close(AVCodecContext * avccontext)
+{
+ venc_context_t * venc = avccontext->priv_data;
+ int i;
+
+ if (venc->codebooks)
+ for (i = 0; i < venc->ncodebooks; i++) {
+ av_freep(&venc->codebooks[i].lens);
+ av_freep(&venc->codebooks[i].codewords);
+ av_freep(&venc->codebooks[i].quantlist);
+ av_freep(&venc->codebooks[i].dimentions);
+ av_freep(&venc->codebooks[i].pow2);
+ }
+ av_freep(&venc->codebooks);
+
+ if (venc->floors)
+ for (i = 0; i < venc->nfloors; i++) {
+ int j;
+ if (venc->floors[i].classes)
+ for (j = 0; j < venc->floors[i].nclasses; j++)
+ av_freep(&venc->floors[i].classes[j].books);
+ av_freep(&venc->floors[i].classes);
+ av_freep(&venc->floors[i].partition_to_class);
+ av_freep(&venc->floors[i].list);
+ }
+ av_freep(&venc->floors);
+
+ if (venc->residues)
+ for (i = 0; i < venc->nresidues; i++) {
+ av_freep(&venc->residues[i].books);
+ av_freep(&venc->residues[i].maxes);
+ }
+ av_freep(&venc->residues);
+
+ if (venc->mappings)
+ for (i = 0; i < venc->nmappings; i++) {
+ av_freep(&venc->mappings[i].mux);
+ av_freep(&venc->mappings[i].floor);
+ av_freep(&venc->mappings[i].residue);
+ av_freep(&venc->mappings[i].magnitude);
+ av_freep(&venc->mappings[i].angle);
+ }
+ av_freep(&venc->mappings);
+
+ av_freep(&venc->modes);
+
+ av_freep(&venc->saved);
+ av_freep(&venc->samples);
+ av_freep(&venc->floor);
+ av_freep(&venc->coeffs);
+
+ ff_mdct_end(&venc->mdct[0]);
+ ff_mdct_end(&venc->mdct[1]);
+
+ av_freep(&avccontext->coded_frame);
+ av_freep(&avccontext->extradata);
+
+ return 0 ;
+}
+
+AVCodec vorbis_encoder = {
+ "vorbis",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_VORBIS,
+ sizeof(venc_context_t),
+ vorbis_encode_init,
+ vorbis_encode_frame,
+ vorbis_encode_close,
+ .capabilities= CODEC_CAP_DELAY,
+};
diff --git a/contrib/ffmpeg/libavcodec/vorbis_enc_data.h b/contrib/ffmpeg/libavcodec/vorbis_enc_data.h
new file mode 100644
index 000000000..e56dc5df5
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vorbis_enc_data.h
@@ -0,0 +1,498 @@
+/*
+ * copyright (c) 2006 Oded Shimon <ods15@ods15.dyndns.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+static const uint8_t codebook0[] = {
+ 2, 10, 8, 14, 7, 12, 11, 14, 1, 5, 3, 7, 4, 9, 7,
+ 13,
+};
+
+static const uint8_t codebook1[] = {
+ 1, 4, 2, 6, 3, 7, 5, 7,
+};
+
+static const uint8_t codebook2[] = {
+ 1, 5, 7, 21, 5, 8, 9, 21, 10, 9, 12, 20, 20, 16, 20,
+ 20, 4, 8, 9, 20, 6, 8, 9, 20, 11, 11, 13, 20, 20, 15,
+ 17, 20, 9, 11, 14, 20, 8, 10, 15, 20, 11, 13, 15, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 13, 20, 20, 20, 18, 18, 20, 20,
+ 20, 20, 20, 20, 3, 6, 8, 20, 6, 7, 9, 20, 10, 9, 12,
+ 20, 20, 20, 20, 20, 5, 7, 9, 20, 6, 6, 9, 20, 10, 9,
+ 12, 20, 20, 20, 20, 20, 8, 10, 13, 20, 8, 9, 12, 20, 11,
+ 10, 12, 20, 20, 20, 20, 20, 18, 20, 20, 20, 15, 17, 18, 20,
+ 18, 17, 18, 20, 20, 20, 20, 20, 7, 10, 12, 20, 8, 9, 11,
+ 20, 14, 13, 14, 20, 20, 20, 20, 20, 6, 9, 12, 20, 7, 8,
+ 11, 20, 12, 11, 13, 20, 20, 20, 20, 20, 9, 11, 15, 20, 8,
+ 10, 14, 20, 12, 11, 14, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 11, 16, 18,
+ 20, 15, 15, 17, 20, 20, 17, 20, 20, 20, 20, 20, 20, 9, 14,
+ 16, 20, 12, 12, 15, 20, 17, 15, 18, 20, 20, 20, 20, 20, 16,
+ 19, 18, 20, 15, 16, 20, 20, 17, 17, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 20,
+};
+
+static const uint8_t codebook3[] = {
+ 2, 3, 7, 13, 4, 4, 7, 15, 8, 6, 9, 17, 21, 16, 15,
+ 21, 2, 5, 7, 11, 5, 5, 7, 14, 9, 7, 10, 16, 17, 15,
+ 16, 21, 4, 7, 10, 17, 7, 7, 9, 15, 11, 9, 11, 16, 21,
+ 18, 15, 21, 18, 21, 21, 21, 15, 17, 17, 19, 21, 19, 18, 20,
+ 21, 21, 21, 20,
+};
+
+static const uint8_t codebook4[] = {
+ 5, 5, 5, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6,
+ 5, 6, 5, 6, 5, 6, 5, 6, 5, 7, 5, 7, 5, 7, 5,
+ 7, 5, 8, 6, 8, 6, 8, 6, 9, 6, 9, 6, 10, 6, 10,
+ 6, 11, 6, 11, 7, 11, 7, 12, 7, 12, 7, 12, 7, 12, 7,
+ 12, 7, 12, 7, 12, 7, 12, 8, 13, 8, 12, 8, 12, 8, 13,
+ 8, 13, 9, 13, 9, 13, 9, 13, 9, 12, 10, 12, 10, 13, 10,
+ 14, 11, 14, 12, 14, 13, 14, 13, 14, 14, 15, 16, 15, 15, 15,
+ 14, 15, 17, 21, 22, 22, 21, 22, 22, 22, 22, 22, 22, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21,
+};
+
+static const uint8_t codebook5[] = {
+ 2, 5, 5, 4, 5, 4, 5, 4, 5, 4, 6, 5, 6, 5, 6,
+ 5, 6, 5, 7, 5, 7, 6, 8, 6, 8, 6, 8, 6, 9, 6,
+ 9, 6,
+};
+
+static const uint8_t codebook6[] = {
+ 8, 5, 8, 4, 9, 4, 9, 4, 9, 4, 9, 4, 9, 4, 9,
+ 4, 9, 4, 9, 4, 9, 4, 8, 4, 8, 4, 9, 5, 9, 5,
+ 9, 5, 9, 5, 9, 6, 10, 6, 10, 7, 10, 8, 11, 9, 11,
+ 11, 12, 13, 12, 14, 13, 15, 13, 15, 14, 16, 14, 17, 15, 17,
+ 15, 15, 16, 16, 15, 16, 16, 16, 15, 18, 16, 15, 17, 17, 19,
+ 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19,
+ 19, 19, 19, 19, 19, 19,
+};
+
+static const uint8_t codebook7[] = {
+ 1, 5, 5, 5, 5, 5, 5, 5, 6, 5, 6, 5, 6, 5, 6,
+ 5, 6, 6, 7, 7, 7, 7, 8, 7, 8, 8, 9, 8, 10, 9,
+ 10, 9,
+};
+
+static const uint8_t codebook8[] = {
+ 4, 3, 4, 3, 4, 4, 5, 4, 5, 4, 5, 5, 6, 5, 6,
+ 5, 7, 5, 7, 6, 7, 6, 8, 7, 8, 7, 8, 7, 9, 8,
+ 9, 9, 9, 9, 10, 10, 10, 11, 9, 12, 9, 12, 9, 15, 10,
+ 14, 9, 13, 10, 13, 10, 12, 10, 12, 10, 13, 10, 12, 11, 13,
+ 11, 14, 12, 13, 13, 14, 14, 13, 14, 15, 14, 16, 13, 13, 14,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 15, 15,
+};
+
+static const uint8_t codebook9[] = {
+ 4, 5, 4, 5, 3, 5, 3, 5, 3, 5, 4, 4, 4, 4, 5,
+ 5, 5,
+};
+
+static const uint8_t codebook10[] = {
+ 3, 3, 4, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 5,
+ 7, 5, 8, 6, 8, 6, 9, 7, 10, 7, 10, 8, 10, 8, 11,
+ 9, 11,
+};
+
+static const uint8_t codebook11[] = {
+ 3, 7, 3, 8, 3, 10, 3, 8, 3, 9, 3, 8, 4, 9, 4,
+ 9, 5, 9, 6, 10, 6, 9, 7, 11, 7, 12, 9, 13, 10, 13,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12,
+};
+
+static const uint8_t codebook12[] = {
+ 4, 5, 4, 5, 4, 5, 4, 5, 3, 5, 3, 5, 3, 5, 4,
+ 5, 4,
+};
+
+static const uint8_t codebook13[] = {
+ 4, 2, 4, 2, 5, 3, 5, 4, 6, 6, 6, 7, 7, 8, 7,
+ 8, 7, 8, 7, 9, 8, 9, 8, 9, 8, 10, 8, 11, 9, 12,
+ 9, 12,
+};
+
+static const uint8_t codebook14[] = {
+ 2, 5, 2, 6, 3, 6, 4, 7, 4, 7, 5, 9, 5, 11, 6,
+ 11, 6, 11, 7, 11, 6, 11, 6, 11, 9, 11, 8, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10,
+ 10, 10, 10,
+};
+
+static const uint8_t codebook15[] = {
+ 5, 6, 11, 11, 11, 11, 10, 10, 12, 11, 5, 2, 11, 5, 6,
+ 6, 7, 9, 11, 13, 13, 10, 7, 11, 6, 7, 8, 9, 10, 12,
+ 11, 5, 11, 6, 8, 7, 9, 11, 14, 15, 11, 6, 6, 8, 4,
+ 5, 7, 8, 10, 13, 10, 5, 7, 7, 5, 5, 6, 8, 10, 11,
+ 10, 7, 7, 8, 6, 5, 5, 7, 9, 9, 11, 8, 8, 11, 8,
+ 7, 6, 6, 7, 9, 12, 11, 10, 13, 9, 9, 7, 7, 7, 9,
+ 11, 13, 12, 15, 12, 11, 9, 8, 8, 8,
+};
+
+static const uint8_t codebook16[] = {
+ 2, 4, 4, 0, 0, 0, 0, 0, 0, 5, 6, 6, 0, 0, 0,
+ 0, 0, 0, 5, 6, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 5, 7, 7, 0, 0, 0, 0, 0, 0,
+ 7, 8, 8, 0, 0, 0, 0, 0, 0, 6, 7, 8, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 7,
+ 0, 0, 0, 0, 0, 0, 6, 8, 7, 0, 0, 0, 0, 0, 0,
+ 7, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 7, 0, 0, 0,
+ 0, 0, 0, 7, 8, 8, 0, 0, 0, 0, 0, 0, 7, 8, 8,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 7, 8, 8, 0, 0, 0, 0, 0, 0, 8, 8, 9, 0, 0, 0,
+ 0, 0, 0, 8, 9, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 6, 8, 8, 0, 0, 0, 0, 0, 0,
+ 7, 9, 8, 0, 0, 0, 0, 0, 0, 8, 9, 9, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 5, 7, 7, 0, 0, 0, 0, 0, 0, 7, 8, 8,
+ 0, 0, 0, 0, 0, 0, 7, 8, 8, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 8, 8, 0, 0, 0,
+ 0, 0, 0, 8, 9, 9, 0, 0, 0, 0, 0, 0, 7, 8, 9,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 6, 8, 8, 0, 0, 0, 0, 0, 0, 8, 9, 9, 0, 0, 0,
+ 0, 0, 0, 8, 9, 8,
+};
+
+static const uint8_t codebook17[] = {
+ 2, 5, 5, 0, 0, 0, 5, 5, 0, 0, 0, 5, 5, 0, 0,
+ 0, 7, 8, 0, 0, 0, 0, 0, 0, 0, 5, 6, 6, 0, 0,
+ 0, 7, 7, 0, 0, 0, 7, 7, 0, 0, 0, 10, 10, 0, 0,
+ 0, 0, 0, 0, 0, 5, 6, 6, 0, 0, 0, 7, 7, 0, 0,
+ 0, 7, 7, 0, 0, 0, 10, 10, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 5, 7, 7, 0, 0, 0, 7, 7, 0, 0, 0, 7, 7, 0, 0,
+ 0, 9, 9, 0, 0, 0, 0, 0, 0, 0, 5, 7, 7, 0, 0,
+ 0, 7, 7, 0, 0, 0, 7, 7, 0, 0, 0, 9, 9, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 5, 7, 7, 0, 0, 0, 7, 7, 0, 0,
+ 0, 7, 7, 0, 0, 0, 9, 9, 0, 0, 0, 0, 0, 0, 0,
+ 5, 7, 7, 0, 0, 0, 7, 7, 0, 0, 0, 7, 7, 0, 0,
+ 0, 9, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 10, 10, 0, 0,
+ 0, 9, 9, 0, 0, 0, 9, 9, 0, 0, 0, 10, 10, 0, 0,
+ 0, 0, 0, 0, 0, 8, 10, 10, 0, 0, 0, 9, 9, 0, 0,
+ 0, 9, 9, 0, 0, 0, 10, 10,
+};
+
+static const uint8_t codebook18[] = {
+ 2, 4, 3, 6, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 4, 6, 6,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 4, 4, 4, 6, 6, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 6, 6, 6, 9, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 6, 7, 9, 9,
+};
+
+static const uint8_t codebook19[] = {
+ 2, 3, 3, 6, 6, 0, 0, 0, 0, 0, 4, 4, 6, 6, 0,
+ 0, 0, 0, 0, 4, 4, 6, 6, 0, 0, 0, 0, 0, 5, 5,
+ 6, 6, 0, 0, 0, 0, 0, 0, 0, 6, 6, 0, 0, 0, 0,
+ 0, 0, 0, 7, 8, 0, 0, 0, 0, 0, 0, 0, 7, 7, 0,
+ 0, 0, 0, 0, 0, 0, 9, 9,
+};
+
+static const uint8_t codebook20[] = {
+ 1, 3, 4, 6, 6, 7, 7, 9, 9, 0, 5, 5, 7, 7, 7,
+ 8, 9, 9, 0, 5, 5, 7, 7, 8, 8, 9, 9, 0, 7, 7,
+ 8, 8, 8, 8, 10, 10, 0, 0, 0, 8, 8, 8, 8, 10, 10,
+ 0, 0, 0, 9, 9, 9, 9, 10, 10, 0, 0, 0, 9, 9, 9,
+ 9, 10, 10, 0, 0, 0, 10, 10, 10, 10, 11, 11, 0, 0, 0,
+ 0, 0, 10, 10, 11, 11,
+};
+
+static const uint8_t codebook21[] = {
+ 2, 3, 3, 6, 6, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10,
+ 11, 10, 0, 5, 5, 7, 7, 8, 8, 9, 9, 9, 9, 10, 10,
+ 10, 10, 11, 11, 0, 5, 5, 7, 7, 8, 8, 9, 9, 9, 9,
+ 10, 10, 10, 10, 11, 11, 0, 6, 6, 7, 7, 8, 8, 9, 9,
+ 9, 9, 10, 10, 11, 11, 11, 11, 0, 0, 0, 7, 7, 8, 8,
+ 9, 9, 9, 9, 10, 10, 11, 11, 11, 12, 0, 0, 0, 8, 8,
+ 8, 8, 9, 9, 9, 9, 10, 10, 11, 11, 12, 12, 0, 0, 0,
+ 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 11, 11, 12, 12, 0,
+ 0, 0, 9, 9, 9, 9, 10, 10, 10, 10, 11, 10, 11, 11, 12,
+ 12, 0, 0, 0, 0, 0, 9, 9, 10, 10, 10, 10, 11, 11, 11,
+ 11, 12, 12, 0, 0, 0, 0, 0, 9, 8, 9, 9, 10, 10, 11,
+ 11, 12, 12, 12, 12, 0, 0, 0, 0, 0, 8, 8, 9, 9, 10,
+ 10, 11, 11, 12, 11, 12, 12, 0, 0, 0, 0, 0, 9, 10, 10,
+ 10, 11, 11, 11, 11, 12, 12, 13, 13, 0, 0, 0, 0, 0, 0,
+ 0, 10, 10, 10, 10, 11, 11, 12, 12, 13, 13, 0, 0, 0, 0,
+ 0, 0, 0, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 0, 0,
+ 0, 0, 0, 0, 0, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13,
+ 0, 0, 0, 0, 0, 0, 0, 11, 11, 12, 12, 12, 12, 13, 13,
+ 13, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 12, 12,
+ 13, 13, 13, 13,
+};
+
+static const uint8_t codebook22[] = {
+ 1, 4, 4, 7, 6, 6, 7, 6, 6, 4, 7, 7, 10, 9, 9,
+ 11, 9, 9, 4, 7, 7, 10, 9, 9, 11, 9, 9, 7, 10, 10,
+ 11, 11, 10, 12, 11, 11, 6, 9, 9, 11, 10, 10, 11, 10, 10,
+ 6, 9, 9, 11, 10, 10, 11, 10, 10, 7, 11, 11, 11, 11, 11,
+ 12, 11, 11, 6, 9, 9, 11, 10, 10, 11, 10, 10, 6, 9, 9,
+ 11, 10, 10, 11, 10, 10,
+};
+
+static const uint8_t codebook23[] = {
+ 2, 4, 4, 6, 6, 7, 7, 7, 7, 8, 8, 10, 5, 5, 6,
+ 6, 7, 7, 8, 8, 8, 8, 10, 5, 5, 6, 6, 7, 7, 8,
+ 8, 8, 8, 10, 6, 6, 7, 7, 8, 8, 8, 8, 8, 8, 10,
+ 10, 10, 7, 7, 8, 7, 8, 8, 8, 8, 10, 10, 10, 8, 8,
+ 8, 8, 8, 8, 8, 8, 10, 10, 10, 7, 8, 8, 8, 8, 8,
+ 8, 8, 10, 10, 10, 8, 8, 8, 8, 8, 8, 8, 8, 10, 10,
+ 10, 10, 10, 8, 8, 8, 8, 8, 8, 10, 10, 10, 10, 10, 9,
+ 9, 8, 8, 9, 8, 10, 10, 10, 10, 10, 8, 8, 8, 8, 8,
+ 8,
+};
+
+static const uint8_t codebook24[] = {
+ 1, 4, 4, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 6, 5,
+ 5, 7, 7, 8, 8, 8, 8, 9, 9, 10, 10, 7, 5, 5, 7,
+ 7, 8, 8, 8, 8, 9, 9, 11, 10, 0, 8, 8, 8, 8, 9,
+ 9, 9, 9, 10, 10, 11, 11, 0, 8, 8, 8, 8, 9, 9, 9,
+ 9, 10, 10, 11, 11, 0, 12, 12, 9, 9, 10, 10, 10, 10, 11,
+ 11, 11, 12, 0, 13, 13, 9, 9, 10, 10, 10, 10, 11, 11, 12,
+ 12, 0, 0, 0, 10, 10, 10, 10, 11, 11, 12, 12, 12, 12, 0,
+ 0, 0, 10, 10, 10, 10, 11, 11, 12, 12, 12, 12, 0, 0, 0,
+ 14, 14, 11, 11, 11, 11, 12, 12, 13, 13, 0, 0, 0, 14, 14,
+ 11, 11, 11, 11, 12, 12, 13, 13, 0, 0, 0, 0, 0, 12, 12,
+ 12, 12, 13, 13, 14, 13, 0, 0, 0, 0, 0, 13, 13, 12, 12,
+ 13, 12, 14, 13,
+};
+
+static const uint8_t codebook25[] = {
+ 2, 4, 4, 5, 5, 6, 5, 5, 5, 5, 6, 4, 5, 5, 5,
+ 6, 5, 5, 5, 5, 6, 6, 6, 5, 5,
+};
+
+static const uint8_t codebook26[] = {
+ 1, 4, 4, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 4, 9,
+ 8, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 2, 9, 7, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 11, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11,
+};
+
+static const uint8_t codebook27[] = {
+ 1, 4, 4, 6, 6, 7, 7, 8, 7, 9, 9, 10, 10, 10, 10,
+ 6, 5, 5, 7, 7, 8, 8, 10, 8, 11, 10, 12, 12, 13, 13,
+ 6, 5, 5, 7, 7, 8, 8, 10, 9, 11, 11, 12, 12, 13, 12,
+ 18, 8, 8, 8, 8, 9, 9, 10, 9, 11, 10, 12, 12, 13, 13,
+ 18, 8, 8, 8, 8, 9, 9, 10, 10, 11, 11, 13, 12, 14, 13,
+ 18, 11, 11, 9, 9, 10, 10, 11, 11, 11, 12, 13, 12, 13, 14,
+ 18, 11, 11, 9, 8, 11, 10, 11, 11, 11, 11, 12, 12, 14, 13,
+ 18, 18, 18, 10, 11, 10, 11, 12, 12, 12, 12, 13, 12, 14, 13,
+ 18, 18, 18, 10, 11, 11, 9, 12, 11, 12, 12, 12, 13, 13, 13,
+ 18, 18, 17, 14, 14, 11, 11, 12, 12, 13, 12, 14, 12, 14, 13,
+ 18, 18, 18, 14, 14, 11, 10, 12, 9, 12, 13, 13, 13, 13, 13,
+ 18, 18, 17, 16, 18, 13, 13, 12, 12, 13, 11, 14, 12, 14, 14,
+ 17, 18, 18, 17, 18, 13, 12, 13, 10, 12, 11, 14, 14, 14, 14,
+ 17, 18, 18, 18, 18, 15, 16, 12, 12, 13, 10, 14, 12, 14, 15,
+ 18, 18, 18, 16, 17, 16, 14, 12, 11, 13, 10, 13, 13, 14, 15,
+};
+
+static const uint8_t codebook28[] = {
+ 2, 5, 5, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8,
+ 8, 8, 10, 6, 6, 7, 7, 8, 7, 8, 8, 8, 8, 8, 9,
+ 9, 9, 9, 9, 10, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
+ 9, 9, 9, 9, 9, 9, 10, 7, 7, 7, 7, 8, 8, 8, 8,
+ 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 7, 7, 8, 8,
+ 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 8, 8,
+ 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10,
+ 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10,
+ 10, 10, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10,
+ 9, 10, 10, 10, 11, 11, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 11, 10, 11, 11, 11, 9, 9, 9, 9, 9, 9, 10,
+ 10, 9, 9, 10, 9, 11, 10, 11, 11, 11, 9, 9, 9, 9, 9,
+ 9, 9, 9, 10, 10, 10, 9, 11, 11, 11, 11, 11, 9, 9, 9,
+ 9, 10, 10, 9, 9, 9, 9, 10, 9, 11, 11, 11, 11, 11, 11,
+ 11, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11,
+ 11, 11, 11, 10, 9, 10, 10, 9, 10, 9, 9, 10, 9, 11, 10,
+ 10, 11, 11, 11, 11, 9, 10, 9, 9, 9, 9, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 10, 10, 10, 9, 9, 10, 9, 10, 9,
+ 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 9, 9, 9, 9,
+ 9, 10, 10, 10,
+};
+
+static const struct {
+ int dim;
+ int len;
+ int real_len;
+ const uint8_t * clens;
+ int lookup;
+ float min;
+ float delta;
+ const uint8_t * quant;
+} cvectors[] = {
+ { 2, 16, 16, codebook0, 0 },
+ { 2, 8, 8, codebook1, 0 },
+ { 2, 256, 256, codebook2, 0 },
+ { 2, 64, 64, codebook3, 0 },
+ { 2, 128, 128, codebook4, 0 },
+ { 2, 32, 32, codebook5, 0 },
+ { 2, 96, 96, codebook6, 0 },
+ { 2, 32, 32, codebook7, 0 },
+ { 2, 96, 96, codebook8, 0 },
+ { 2, 17, 17, codebook9, 0 },
+ { 2, 32, 32, codebook10, 0 },
+ { 2, 78, 78, codebook11, 0 },
+ { 2, 17, 17, codebook12, 0 },
+ { 2, 32, 32, codebook13, 0 },
+ { 2, 78, 78, codebook14, 0 },
+ { 2, 100, 100, codebook15, 0 },
+ { 8, 1641, 6561, codebook16, 1, -1.0, 1.0, (const uint8_t[]){ 1, 0, 2, } },
+ { 4, 443, 625, codebook17, 1, -2.0, 1.0, (const uint8_t[]){ 2, 1, 3, 0, 4, } },
+ { 4, 105, 625, codebook18, 1, -2.0, 1.0, (const uint8_t[]){ 2, 1, 3, 0, 4, } },
+ { 2, 68, 81, codebook19, 1, -4.0, 1.0, (const uint8_t[]){ 4, 3, 5, 2, 6, 1, 7, 0, 8, } },
+ { 2, 81, 81, codebook20, 1, -4.0, 1.0, (const uint8_t[]){ 4, 3, 5, 2, 6, 1, 7, 0, 8, } },
+ { 2, 289, 289, codebook21, 1, -8.0, 1.0, (const uint8_t[]){ 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0, 16, } },
+ { 4, 81, 81, codebook22, 1, -11.0, 11.0, (const uint8_t[]){ 1, 0, 2, } },
+ { 2, 121, 121, codebook23, 1, -5.0, 1.0, (const uint8_t[]){ 5, 4, 6, 3, 7, 2, 8, 1, 9, 0, 10, } },
+ { 2, 169, 169, codebook24, 1, -30.0, 5.0, (const uint8_t[]){ 6, 5, 7, 4, 8, 3, 9, 2, 10, 1, 11, 0, 12, } },
+ { 2, 25, 25, codebook25, 1, -2.0, 1.0, (const uint8_t[]){ 2, 1, 3, 0, 4, } },
+ { 2, 169, 169, codebook26, 1, -1530.0, 255.0, (const uint8_t[]){ 6, 5, 7, 4, 8, 3, 9, 2, 10, 1, 11, 0, 12, } },
+ { 2, 225, 225, codebook27, 1, -119.0, 17.0, (const uint8_t[]){ 7, 6, 8, 5, 9, 4, 10, 3, 11, 2, 12, 1, 13, 0, 14, } },
+ { 2, 289, 289, codebook28, 1, -8.0, 1.0, (const uint8_t[]){ 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0, 16, } },
+};
+
+static const struct {
+ int dim;
+ int subclass;
+ int masterbook;
+ const int * nbooks;
+} floor_classes[] = {
+ { 3, 0, 0, (const int[]){ 4 } },
+ { 4, 1, 0, (const int[]){ 5, 6 } },
+ { 3, 1, 1, (const int[]){ 7, 8 } },
+ { 4, 2, 2, (const int[]){ -1, 9, 10, 11 } },
+ { 3, 2, 3, (const int[]){ -1, 12, 13, 14 } },
+};
diff --git a/src/libffmpeg/libavcodec/vp3.c b/contrib/ffmpeg/libavcodec/vp3.c
index b5cfbb02c..6a398693a 100644
--- a/src/libffmpeg/libavcodec/vp3.c
+++ b/contrib/ffmpeg/libavcodec/vp3.c
@@ -1,18 +1,20 @@
/*
* Copyright (C) 2003-2004 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -315,7 +317,7 @@ typedef struct Vp3DecodeContext {
int last_coded_c_fragment;
uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc
- uint8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
+ int8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
/* Huffman decode */
int hti;
@@ -607,7 +609,7 @@ static void init_dequantizer(Vp3DecodeContext *s)
{
int ac_scale_factor = s->coded_ac_scale_factor[s->quality_index];
int dc_scale_factor = s->coded_dc_scale_factor[s->quality_index];
- int i, j, plane, inter, qri, bmi, bmj, qistart;
+ int i, plane, inter, qri, bmi, bmj, qistart;
debug_vp3(" vp3: initializing dequantization tables\n");
@@ -1327,7 +1329,7 @@ static void reverse_dc_prediction(Vp3DecodeContext *s,
int x, y;
int i = first_fragment;
- short predicted_dc;
+ int predicted_dc;
/* DC values for the left, up-left, up, and up-right fragments */
int vl, vul, vu, vur;
@@ -1453,11 +1455,11 @@ static void reverse_dc_prediction(Vp3DecodeContext *s,
/* check for outranging on the [ul u l] and
* [ul u ur l] predictors */
if ((transform == 13) || (transform == 15)) {
- if (ABS(predicted_dc - vu) > 128)
+ if (FFABS(predicted_dc - vu) > 128)
predicted_dc = vu;
- else if (ABS(predicted_dc - vl) > 128)
+ else if (FFABS(predicted_dc - vl) > 128)
predicted_dc = vl;
- else if (ABS(predicted_dc - vul) > 128)
+ else if (FFABS(predicted_dc - vul) > 128)
predicted_dc = vul;
}
@@ -1525,7 +1527,7 @@ static void render_slice(Vp3DecodeContext *s, int slice)
if (!s->flipped_image) stride = -stride;
- if(ABS(stride) > 2048)
+ if(FFABS(stride) > 2048)
return; //various tables are fixed size
/* for each fragment row in the slice (both of them)... */
@@ -2015,18 +2017,14 @@ static int vp3_decode_init(AVCodecContext *avctx)
if (!s->theora_tables)
{
- for (i = 0; i < 64; i++)
+ for (i = 0; i < 64; i++) {
s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
- for (i = 0; i < 64; i++)
s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
- for (i = 0; i < 64; i++)
s->base_matrix[0][i] = vp31_intra_y_dequant[i];
- for (i = 0; i < 64; i++)
s->base_matrix[1][i] = vp31_intra_c_dequant[i];
- for (i = 0; i < 64; i++)
s->base_matrix[2][i] = vp31_inter_dequant[i];
- for (i = 0; i < 64; i++)
s->filter_limit_values[i] = vp31_filter_limit_values[i];
+ }
for(inter=0; inter<2; inter++){
for(plane=0; plane<3; plane++){
diff --git a/src/libffmpeg/libavcodec/vp3data.h b/contrib/ffmpeg/libavcodec/vp3data.h
index 51cbae8db..d69ddfa28 100644
--- a/src/libffmpeg/libavcodec/vp3data.h
+++ b/contrib/ffmpeg/libavcodec/vp3data.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (C) 2003 the ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#ifndef VP3DATA_H
#define VP3DATA_H
diff --git a/src/libffmpeg/libavcodec/vp3dsp.c b/contrib/ffmpeg/libavcodec/vp3dsp.c
index f5a1fb6ff..a48515a5e 100644
--- a/src/libffmpeg/libavcodec/vp3dsp.c
+++ b/contrib/ffmpeg/libavcodec/vp3dsp.c
@@ -1,18 +1,20 @@
/*
* Copyright (C) 2004 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -40,7 +42,7 @@
static always_inline void idct(uint8_t *dst, int stride, int16_t *input, int type)
{
int16_t *ip = input;
- uint8_t *cm = cropTbl + MAX_NEG_CROP;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
int A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;
int Ed, Gd, Add, Bdd, Fd, Hd;
diff --git a/contrib/ffmpeg/libavcodec/vp5.c b/contrib/ffmpeg/libavcodec/vp5.c
new file mode 100644
index 000000000..ac953c7aa
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vp5.c
@@ -0,0 +1,290 @@
+/**
+ * @file vp5.c
+ * VP5 compatible video decoder
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "bitstream.h"
+#include "mpegvideo.h"
+
+#include "vp56.h"
+#include "vp56data.h"
+#include "vp5data.h"
+
+
+static int vp5_parse_header(vp56_context_t *s, uint8_t *buf, int buf_size,
+ int *golden_frame)
+{
+ vp56_range_coder_t *c = &s->c;
+ int rows, cols;
+
+ vp56_init_range_decoder(&s->c, buf, buf_size);
+ s->frames[VP56_FRAME_CURRENT].key_frame = !vp56_rac_get(c);
+ vp56_rac_get(c);
+ vp56_init_dequant(s, vp56_rac_gets(c, 6));
+ if (s->frames[VP56_FRAME_CURRENT].key_frame)
+ {
+ vp56_rac_gets(c, 8);
+ if(vp56_rac_gets(c, 5) > 5)
+ return 0;
+ vp56_rac_gets(c, 2);
+ if (vp56_rac_get(c)) {
+ av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
+ return 0;
+ }
+ rows = vp56_rac_gets(c, 8); /* number of stored macroblock rows */
+ cols = vp56_rac_gets(c, 8); /* number of stored macroblock cols */
+ vp56_rac_gets(c, 8); /* number of displayed macroblock rows */
+ vp56_rac_gets(c, 8); /* number of displayed macroblock cols */
+ vp56_rac_gets(c, 2);
+ if (16*cols != s->avctx->coded_width ||
+ 16*rows != s->avctx->coded_height) {
+ avcodec_set_dimensions(s->avctx, 16*cols, 16*rows);
+ return 2;
+ }
+ }
+ return 1;
+}
+
+/* Gives very similar result than the vp6 version except in a few cases */
+static int vp5_adjust(int v, int t)
+{
+ int s2, s1 = v >> 31;
+ v ^= s1;
+ v -= s1;
+ v *= v < 2*t;
+ v -= t;
+ s2 = v >> 31;
+ v ^= s2;
+ v -= s2;
+ v = t - v;
+ v += s1;
+ v ^= s1;
+ return v;
+}
+
+static void vp5_parse_vector_adjustment(vp56_context_t *s, vp56_mv_t *vect)
+{
+ vp56_range_coder_t *c = &s->c;
+ int comp, di;
+
+ for (comp=0; comp<2; comp++) {
+ int delta = 0;
+ if (vp56_rac_get_prob(c, s->vector_model_dct[comp])) {
+ int sign = vp56_rac_get_prob(c, s->vector_model_sig[comp]);
+ di = vp56_rac_get_prob(c, s->vector_model_pdi[comp][0]);
+ di |= vp56_rac_get_prob(c, s->vector_model_pdi[comp][1]) << 1;
+ delta = vp56_rac_get_tree(c, vp56_pva_tree,
+ s->vector_model_pdv[comp]);
+ delta = di | (delta << 2);
+ delta = (delta ^ -sign) + sign;
+ }
+ if (!comp)
+ vect->x = delta;
+ else
+ vect->y = delta;
+ }
+}
+
+static void vp5_parse_vector_models(vp56_context_t *s)
+{
+ vp56_range_coder_t *c = &s->c;
+ int comp, node;
+
+ for (comp=0; comp<2; comp++) {
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][0]))
+ s->vector_model_dct[comp] = vp56_rac_gets_nn(c, 7);
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][1]))
+ s->vector_model_sig[comp] = vp56_rac_gets_nn(c, 7);
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][2]))
+ s->vector_model_pdi[comp][0] = vp56_rac_gets_nn(c, 7);
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][3]))
+ s->vector_model_pdi[comp][1] = vp56_rac_gets_nn(c, 7);
+ }
+
+ for (comp=0; comp<2; comp++)
+ for (node=0; node<7; node++)
+ if (vp56_rac_get_prob(c, vp5_vmc_pct[comp][4 + node]))
+ s->vector_model_pdv[comp][node] = vp56_rac_gets_nn(c, 7);
+}
+
+static void vp5_parse_coeff_models(vp56_context_t *s)
+{
+ vp56_range_coder_t *c = &s->c;
+ uint8_t def_prob[11];
+ int node, cg, ctx;
+ int ct; /* code type */
+ int pt; /* plane type (0 for Y, 1 for U or V) */
+
+ memset(def_prob, 0x80, sizeof(def_prob));
+
+ for (pt=0; pt<2; pt++)
+ for (node=0; node<11; node++)
+ if (vp56_rac_get_prob(c, vp5_dccv_pct[pt][node])) {
+ def_prob[node] = vp56_rac_gets_nn(c, 7);
+ s->coeff_model_dccv[pt][node] = def_prob[node];
+ } else if (s->frames[VP56_FRAME_CURRENT].key_frame) {
+ s->coeff_model_dccv[pt][node] = def_prob[node];
+ }
+
+ for (ct=0; ct<3; ct++)
+ for (pt=0; pt<2; pt++)
+ for (cg=0; cg<6; cg++)
+ for (node=0; node<11; node++)
+ if (vp56_rac_get_prob(c, vp5_ract_pct[ct][pt][cg][node])) {
+ def_prob[node] = vp56_rac_gets_nn(c, 7);
+ s->coeff_model_ract[pt][ct][cg][node] = def_prob[node];
+ } else if (s->frames[VP56_FRAME_CURRENT].key_frame) {
+ s->coeff_model_ract[pt][ct][cg][node] = def_prob[node];
+ }
+
+ /* coeff_model_dcct is a linear combination of coeff_model_dccv */
+ for (pt=0; pt<2; pt++)
+ for (ctx=0; ctx<36; ctx++)
+ for (node=0; node<5; node++)
+ s->coeff_model_dcct[pt][ctx][node] = clip(((s->coeff_model_dccv[pt][node] * vp5_dccv_lc[node][ctx][0] + 128) >> 8) + vp5_dccv_lc[node][ctx][1], 1, 254);
+
+ /* coeff_model_acct is a linear combination of coeff_model_ract */
+ for (ct=0; ct<3; ct++)
+ for (pt=0; pt<2; pt++)
+ for (cg=0; cg<3; cg++)
+ for (ctx=0; ctx<6; ctx++)
+ for (node=0; node<5; node++)
+ s->coeff_model_acct[pt][ct][cg][ctx][node] = clip(((s->coeff_model_ract[pt][ct][cg][node] * vp5_ract_lc[ct][cg][node][ctx][0] + 128) >> 8) + vp5_ract_lc[ct][cg][node][ctx][1], 1, 254);
+}
+
+static void vp5_parse_coeff(vp56_context_t *s)
+{
+ vp56_range_coder_t *c = &s->c;
+ uint8_t *permute = s->scantable.permutated;
+ uint8_t *model, *model2;
+ int coeff, sign, coeff_idx;
+ int b, i, cg, idx, ctx, ctx_last;
+ int pt = 0; /* plane type (0 for Y, 1 for U or V) */
+
+ for (b=0; b<6; b++) {
+ int ct = 1; /* code type */
+
+ if (b > 3) pt = 1;
+
+ ctx = 6*s->coeff_ctx[vp56_b6to4[b]][0]
+ + s->above_blocks[s->above_block_idx[b]].not_null_dc;
+ model = s->coeff_model_dccv[pt];
+ model2 = s->coeff_model_dcct[pt][ctx];
+
+ for (coeff_idx=0; coeff_idx<64; ) {
+ if (vp56_rac_get_prob(c, model2[0])) {
+ if (vp56_rac_get_prob(c, model2[2])) {
+ if (vp56_rac_get_prob(c, model2[3])) {
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 4;
+ idx = vp56_rac_get_tree(c, vp56_pc_tree, model);
+ sign = vp56_rac_get(c);
+ coeff = vp56_coeff_bias[idx];
+ for (i=vp56_coeff_bit_length[idx]; i>=0; i--)
+ coeff += vp56_rac_get_prob(c, vp56_coeff_parse_table[idx][i]) << i;
+ } else {
+ if (vp56_rac_get_prob(c, model2[4])) {
+ coeff = 3 + vp56_rac_get_prob(c, model[5]);
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 3;
+ } else {
+ coeff = 2;
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 2;
+ }
+ sign = vp56_rac_get(c);
+ }
+ ct = 2;
+ } else {
+ ct = 1;
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 1;
+ sign = vp56_rac_get(c);
+ coeff = 1;
+ }
+ coeff = (coeff ^ -sign) + sign;
+ if (coeff_idx)
+ coeff *= s->dequant_ac;
+ s->block_coeff[b][permute[coeff_idx]] = coeff;
+ } else {
+ if (ct && !vp56_rac_get_prob(c, model2[1]))
+ break;
+ ct = 0;
+ s->coeff_ctx[vp56_b6to4[b]][coeff_idx] = 0;
+ }
+
+ cg = vp5_coeff_groups[++coeff_idx];
+ ctx = s->coeff_ctx[vp56_b6to4[b]][coeff_idx];
+ model = s->coeff_model_ract[pt][ct][cg];
+ model2 = cg > 2 ? model : s->coeff_model_acct[pt][ct][cg][ctx];
+ }
+
+ ctx_last = FFMIN(s->coeff_ctx_last[vp56_b6to4[b]], 24);
+ s->coeff_ctx_last[vp56_b6to4[b]] = coeff_idx;
+ if (coeff_idx < ctx_last)
+ for (i=coeff_idx; i<=ctx_last; i++)
+ s->coeff_ctx[vp56_b6to4[b]][i] = 5;
+ s->above_blocks[s->above_block_idx[b]].not_null_dc = s->coeff_ctx[vp56_b6to4[b]][0];
+ }
+}
+
+static void vp5_default_models_init(vp56_context_t *s)
+{
+ int i;
+
+ for (i=0; i<2; i++) {
+ s->vector_model_sig[i] = 0x80;
+ s->vector_model_dct[i] = 0x80;
+ s->vector_model_pdi[i][0] = 0x55;
+ s->vector_model_pdi[i][1] = 0x80;
+ }
+ memcpy(s->mb_types_stats, vp56_def_mb_types_stats, sizeof(s->mb_types_stats));
+ memset(s->vector_model_pdv, 0x80, sizeof(s->vector_model_pdv));
+}
+
+static int vp5_decode_init(AVCodecContext *avctx)
+{
+ vp56_context_t *s = avctx->priv_data;
+
+ vp56_init(s, avctx, 1);
+ s->vp56_coord_div = vp5_coord_div;
+ s->parse_vector_adjustment = vp5_parse_vector_adjustment;
+ s->adjust = vp5_adjust;
+ s->parse_coeff = vp5_parse_coeff;
+ s->default_models_init = vp5_default_models_init;
+ s->parse_vector_models = vp5_parse_vector_models;
+ s->parse_coeff_models = vp5_parse_coeff_models;
+ s->parse_header = vp5_parse_header;
+
+ return 0;
+}
+
+AVCodec vp5_decoder = {
+ "vp5",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VP5,
+ sizeof(vp56_context_t),
+ vp5_decode_init,
+ NULL,
+ vp56_free,
+ vp56_decode_frame,
+};
diff --git a/contrib/ffmpeg/libavcodec/vp56.c b/contrib/ffmpeg/libavcodec/vp56.c
new file mode 100644
index 000000000..eb78d02e4
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vp56.c
@@ -0,0 +1,665 @@
+/**
+ * @file vp56.c
+ * VP5 and VP6 compatible video decoder (common features)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+
+#include "vp56.h"
+#include "vp56data.h"
+
+
+void vp56_init_dequant(vp56_context_t *s, int quantizer)
+{
+ s->quantizer = quantizer;
+ s->dequant_dc = vp56_dc_dequant[quantizer] << 2;
+ s->dequant_ac = vp56_ac_dequant[quantizer] << 2;
+}
+
+static int vp56_get_vectors_predictors(vp56_context_t *s, int row, int col,
+ vp56_frame_t ref_frame)
+{
+ int nb_pred = 0;
+ vp56_mv_t vect[2] = {{0,0}, {0,0}};
+ int pos, offset;
+ vp56_mv_t mvp;
+
+ for (pos=0; pos<12; pos++) {
+ mvp.x = col + vp56_candidate_predictor_pos[pos][0];
+ mvp.y = row + vp56_candidate_predictor_pos[pos][1];
+ if (mvp.x < 0 || mvp.x >= s->mb_width ||
+ mvp.y < 0 || mvp.y >= s->mb_height)
+ continue;
+ offset = mvp.x + s->mb_width*mvp.y;
+
+ if (vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
+ continue;
+ if ((s->macroblocks[offset].mv.x == vect[0].x &&
+ s->macroblocks[offset].mv.y == vect[0].y) ||
+ (s->macroblocks[offset].mv.x == 0 &&
+ s->macroblocks[offset].mv.y == 0))
+ continue;
+
+ vect[nb_pred++] = s->macroblocks[offset].mv;
+ if (nb_pred > 1) {
+ nb_pred = -1;
+ break;
+ }
+ s->vector_candidate_pos = pos;
+ }
+
+ s->vector_candidate[0] = vect[0];
+ s->vector_candidate[1] = vect[1];
+
+ return nb_pred+1;
+}
+
+static void vp56_parse_mb_type_models(vp56_context_t *s)
+{
+ vp56_range_coder_t *c = &s->c;
+ int i, ctx, type;
+
+ for (ctx=0; ctx<3; ctx++) {
+ if (vp56_rac_get_prob(c, 174)) {
+ int idx = vp56_rac_gets(c, 4);
+ memcpy(s->mb_types_stats[ctx],vp56_pre_def_mb_type_stats[idx][ctx],
+ sizeof(s->mb_types_stats[ctx]));
+ }
+ if (vp56_rac_get_prob(c, 254)) {
+ for (type=0; type<10; type++) {
+ for(i=0; i<2; i++) {
+ if (vp56_rac_get_prob(c, 205)) {
+ int delta, sign = vp56_rac_get(c);
+
+ delta = vp56_rac_get_tree(c, vp56_pmbtm_tree,
+ vp56_mb_type_model_model);
+ if (!delta)
+ delta = 4 * vp56_rac_gets(c, 7);
+ s->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign;
+ }
+ }
+ }
+ }
+ }
+
+ /* compute MB type probability tables based on previous MB type */
+ for (ctx=0; ctx<3; ctx++) {
+ int p[10];
+
+ for (type=0; type<10; type++)
+ p[type] = 100 * s->mb_types_stats[ctx][type][1];
+
+ for (type=0; type<10; type++) {
+ int p02, p34, p0234, p17, p56, p89, p5689, p156789;
+
+ /* conservative MB type probability */
+ s->mb_type_model[ctx][type][0] = 255 - (255 * s->mb_types_stats[ctx][type][0]) / (1 + s->mb_types_stats[ctx][type][0] + s->mb_types_stats[ctx][type][1]);
+
+ p[type] = 0; /* same MB type => weight is null */
+
+ /* binary tree parsing probabilities */
+ p02 = p[0] + p[2];
+ p34 = p[3] + p[4];
+ p0234 = p02 + p34;
+ p17 = p[1] + p[7];
+ p56 = p[5] + p[6];
+ p89 = p[8] + p[9];
+ p5689 = p56 + p89;
+ p156789 = p17 + p5689;
+
+ s->mb_type_model[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789);
+ s->mb_type_model[ctx][type][2] = 1 + 255 * p02 / (1+p0234);
+ s->mb_type_model[ctx][type][3] = 1 + 255 * p17 / (1+p156789);
+ s->mb_type_model[ctx][type][4] = 1 + 255 * p[0] / (1+p02);
+ s->mb_type_model[ctx][type][5] = 1 + 255 * p[3] / (1+p34);
+ s->mb_type_model[ctx][type][6] = 1 + 255 * p[1] / (1+p17);
+ s->mb_type_model[ctx][type][7] = 1 + 255 * p56 / (1+p5689);
+ s->mb_type_model[ctx][type][8] = 1 + 255 * p[5] / (1+p56);
+ s->mb_type_model[ctx][type][9] = 1 + 255 * p[8] / (1+p89);
+
+ /* restore initial value */
+ p[type] = 100 * s->mb_types_stats[ctx][type][1];
+ }
+ }
+}
+
+static vp56_mb_t vp56_parse_mb_type(vp56_context_t *s,
+ vp56_mb_t prev_type, int ctx)
+{
+ uint8_t *mb_type_model = s->mb_type_model[ctx][prev_type];
+ vp56_range_coder_t *c = &s->c;
+
+ if (vp56_rac_get_prob(c, mb_type_model[0]))
+ return prev_type;
+ else
+ return vp56_rac_get_tree(c, vp56_pmbt_tree, mb_type_model);
+}
+
+static void vp56_decode_4mv(vp56_context_t *s, int row, int col)
+{
+ vp56_mv_t mv = {0,0};
+ int type[4];
+ int b;
+
+ /* parse each block type */
+ for (b=0; b<4; b++) {
+ type[b] = vp56_rac_gets(&s->c, 2);
+ if (type[b])
+ type[b]++; /* only returns 0, 2, 3 or 4 (all INTER_PF) */
+ }
+
+ /* get vectors */
+ for (b=0; b<4; b++) {
+ switch (type[b]) {
+ case VP56_MB_INTER_NOVEC_PF:
+ s->mv[b] = (vp56_mv_t) {0,0};
+ break;
+ case VP56_MB_INTER_DELTA_PF:
+ s->parse_vector_adjustment(s, &s->mv[b]);
+ break;
+ case VP56_MB_INTER_V1_PF:
+ s->mv[b] = s->vector_candidate[0];
+ break;
+ case VP56_MB_INTER_V2_PF:
+ s->mv[b] = s->vector_candidate[1];
+ break;
+ }
+ mv.x += s->mv[b].x;
+ mv.y += s->mv[b].y;
+ }
+
+ /* this is the one selected for the whole MB for prediction */
+ s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
+
+ /* chroma vectors are average luma vectors */
+ if (s->avctx->codec->id == CODEC_ID_VP5) {
+ s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2);
+ s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2);
+ } else {
+ s->mv[4] = s->mv[5] = (vp56_mv_t) {mv.x/4, mv.y/4};
+ }
+}
+
+static vp56_mb_t vp56_decode_mv(vp56_context_t *s, int row, int col)
+{
+ vp56_mv_t *mv, vect = {0,0};
+ int ctx, b;
+
+ ctx = vp56_get_vectors_predictors(s, row, col, VP56_FRAME_PREVIOUS);
+ s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx);
+ s->macroblocks[row * s->mb_width + col].type = s->mb_type;
+
+ switch (s->mb_type) {
+ case VP56_MB_INTER_V1_PF:
+ mv = &s->vector_candidate[0];
+ break;
+
+ case VP56_MB_INTER_V2_PF:
+ mv = &s->vector_candidate[1];
+ break;
+
+ case VP56_MB_INTER_V1_GF:
+ vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
+ mv = &s->vector_candidate[0];
+ break;
+
+ case VP56_MB_INTER_V2_GF:
+ vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
+ mv = &s->vector_candidate[1];
+ break;
+
+ case VP56_MB_INTER_DELTA_PF:
+ s->parse_vector_adjustment(s, &vect);
+ mv = &vect;
+ break;
+
+ case VP56_MB_INTER_DELTA_GF:
+ vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
+ s->parse_vector_adjustment(s, &vect);
+ mv = &vect;
+ break;
+
+ case VP56_MB_INTER_4V:
+ vp56_decode_4mv(s, row, col);
+ return s->mb_type;
+
+ default:
+ mv = &vect;
+ break;
+ }
+
+ s->macroblocks[row*s->mb_width + col].mv = *mv;
+
+ /* same vector for all blocks */
+ for (b=0; b<6; b++)
+ s->mv[b] = *mv;
+
+ return s->mb_type;
+}
+
+static void vp56_add_predictors_dc(vp56_context_t *s, vp56_frame_t ref_frame)
+{
+ int idx = s->scantable.permutated[0];
+ int i;
+
+ for (i=0; i<6; i++) {
+ vp56_ref_dc_t *ab = &s->above_blocks[s->above_block_idx[i]];
+ vp56_ref_dc_t *lb = &s->left_block[vp56_b6to4[i]];
+ int count = 0;
+ int dc = 0;
+
+ if (ref_frame == lb->ref_frame) {
+ dc += lb->dc_coeff;
+ count++;
+ }
+ if (ref_frame == ab->ref_frame) {
+ dc += ab->dc_coeff;
+ count++;
+ }
+ if (s->avctx->codec->id == CODEC_ID_VP5) {
+ if (count < 2 && ref_frame == ab[-1].ref_frame) {
+ dc += ab[-1].dc_coeff;
+ count++;
+ }
+ if (count < 2 && ref_frame == ab[1].ref_frame) {
+ dc += ab[1].dc_coeff;
+ count++;
+ }
+ }
+ if (count == 0)
+ dc = s->prev_dc[vp56_b6to3[i]][ref_frame];
+ else if (count == 2)
+ dc /= 2;
+
+ s->block_coeff[i][idx] += dc;
+ s->prev_dc[vp56_b6to3[i]][ref_frame] = s->block_coeff[i][idx];
+ ab->dc_coeff = s->block_coeff[i][idx];
+ ab->ref_frame = ref_frame;
+ lb->dc_coeff = s->block_coeff[i][idx];
+ lb->ref_frame = ref_frame;
+ s->block_coeff[i][idx] *= s->dequant_dc;
+ }
+}
+
+static void vp56_edge_filter(vp56_context_t *s, uint8_t *yuv,
+ int pix_inc, int line_inc, int t)
+{
+ int pix2_inc = 2 * pix_inc;
+ int i, v;
+
+ for (i=0; i<12; i++) {
+ v = (yuv[-pix2_inc] + 3*(yuv[0]-yuv[-pix_inc]) - yuv[pix_inc] + 4) >>3;
+ v = s->adjust(v, t);
+ yuv[-pix_inc] = clip_uint8(yuv[-pix_inc] + v);
+ yuv[0] = clip_uint8(yuv[0] - v);
+ yuv += line_inc;
+ }
+}
+
+static void vp56_deblock_filter(vp56_context_t *s, uint8_t *yuv,
+ int stride, int dx, int dy)
+{
+ int t = vp56_filter_threshold[s->quantizer];
+ if (dx) vp56_edge_filter(s, yuv + 10-dx , 1, stride, t);
+ if (dy) vp56_edge_filter(s, yuv + stride*(10-dy), stride, 1, t);
+}
+
+static void vp56_mc(vp56_context_t *s, int b, uint8_t *src,
+ int stride, int x, int y)
+{
+ int plane = vp56_b6to3[b];
+ uint8_t *dst= s->frames[VP56_FRAME_CURRENT].data[plane]+s->block_offset[b];
+ uint8_t *src_block;
+ int src_offset;
+ int overlap_offset = 0;
+ int mask = s->vp56_coord_div[b] - 1;
+ int deblock_filtering = s->deblock_filtering;
+ int dx;
+ int dy;
+
+ if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
+ (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY
+ && !s->frames[VP56_FRAME_CURRENT].key_frame))
+ deblock_filtering = 0;
+
+ dx = s->mv[b].x / s->vp56_coord_div[b];
+ dy = s->mv[b].y / s->vp56_coord_div[b];
+
+ if (b >= 4) {
+ x /= 2;
+ y /= 2;
+ }
+ x += dx - 2;
+ y += dy - 2;
+
+ if (x<0 || x+12>=s->plane_width[plane] ||
+ y<0 || y+12>=s->plane_height[plane]) {
+ ff_emulated_edge_mc(s->edge_emu_buffer,
+ src + s->block_offset[b] + (dy-2)*stride + (dx-2),
+ stride, 12, 12, x, y,
+ s->plane_width[plane],
+ s->plane_height[plane]);
+ src_block = s->edge_emu_buffer;
+ src_offset = 2 + 2*stride;
+ } else if (deblock_filtering) {
+ /* only need a 12x12 block, but there is no such dsp function, */
+ /* so copy a 16x12 block */
+ s->dsp.put_pixels_tab[0][0](s->edge_emu_buffer,
+ src + s->block_offset[b] + (dy-2)*stride + (dx-2),
+ stride, 12);
+ src_block = s->edge_emu_buffer;
+ src_offset = 2 + 2*stride;
+ } else {
+ src_block = src;
+ src_offset = s->block_offset[b] + dy*stride + dx;
+ }
+
+ if (deblock_filtering)
+ vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);
+
+ if (s->mv[b].x & mask)
+ overlap_offset += (s->mv[b].x > 0) ? 1 : -1;
+ if (s->mv[b].y & mask)
+ overlap_offset += (s->mv[b].y > 0) ? stride : -stride;
+
+ if (overlap_offset) {
+ if (s->filter)
+ s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
+ stride, s->mv[b], mask, s->filter_selection, b<4);
+ else
+ s->dsp.put_no_rnd_pixels_l2[1](dst, src_block+src_offset,
+ src_block+src_offset+overlap_offset,
+ stride, 8);
+ } else {
+ s->dsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);
+ }
+}
+
+static void vp56_decode_mb(vp56_context_t *s, int row, int col)
+{
+ AVFrame *frame_current, *frame_ref;
+ vp56_mb_t mb_type;
+ vp56_frame_t ref_frame;
+ int b, plan, off;
+
+ if (s->frames[VP56_FRAME_CURRENT].key_frame)
+ mb_type = VP56_MB_INTRA;
+ else
+ mb_type = vp56_decode_mv(s, row, col);
+ ref_frame = vp56_reference_frame[mb_type];
+
+ memset(s->block_coeff, 0, sizeof(s->block_coeff));
+
+ s->parse_coeff(s);
+
+ vp56_add_predictors_dc(s, ref_frame);
+
+ frame_current = &s->frames[VP56_FRAME_CURRENT];
+ frame_ref = &s->frames[ref_frame];
+
+ switch (mb_type) {
+ case VP56_MB_INTRA:
+ for (b=0; b<6; b++) {
+ plan = vp56_b6to3[b];
+ s->dsp.idct_put(frame_current->data[plan] + s->block_offset[b],
+ s->stride[plan], s->block_coeff[b]);
+ }
+ break;
+
+ case VP56_MB_INTER_NOVEC_PF:
+ case VP56_MB_INTER_NOVEC_GF:
+ for (b=0; b<6; b++) {
+ plan = vp56_b6to3[b];
+ off = s->block_offset[b];
+ s->dsp.put_pixels_tab[1][0](frame_current->data[plan] + off,
+ frame_ref->data[plan] + off,
+ s->stride[plan], 8);
+ s->dsp.idct_add(frame_current->data[plan] + off,
+ s->stride[plan], s->block_coeff[b]);
+ }
+ break;
+
+ case VP56_MB_INTER_DELTA_PF:
+ case VP56_MB_INTER_V1_PF:
+ case VP56_MB_INTER_V2_PF:
+ case VP56_MB_INTER_DELTA_GF:
+ case VP56_MB_INTER_4V:
+ case VP56_MB_INTER_V1_GF:
+ case VP56_MB_INTER_V2_GF:
+ for (b=0; b<6; b++) {
+ int x_off = b==1 || b==3 ? 8 : 0;
+ int y_off = b==2 || b==3 ? 8 : 0;
+ plan = vp56_b6to3[b];
+ vp56_mc(s, b, frame_ref->data[plan], s->stride[plan],
+ 16*col+x_off, 16*row+y_off);
+ s->dsp.idct_add(frame_current->data[plan] + s->block_offset[b],
+ s->stride[plan], s->block_coeff[b]);
+ }
+ break;
+ }
+}
+
+static int vp56_size_changed(AVCodecContext *avctx, vp56_context_t *s)
+{
+ int stride = s->frames[VP56_FRAME_CURRENT].linesize[0];
+ int i;
+
+ s->plane_width[0] = s->avctx->coded_width;
+ s->plane_width[1] = s->plane_width[2] = s->avctx->coded_width/2;
+ s->plane_height[0] = s->avctx->coded_height;
+ s->plane_height[1] = s->plane_height[2] = s->avctx->coded_height/2;
+
+ for (i=0; i<3; i++)
+ s->stride[i] = s->flip * s->frames[VP56_FRAME_CURRENT].linesize[i];
+
+ s->mb_width = (s->avctx->coded_width+15) / 16;
+ s->mb_height = (s->avctx->coded_height+15) / 16;
+
+ if (s->mb_width > 1000 || s->mb_height > 1000) {
+ av_log(avctx, AV_LOG_ERROR, "picture too big\n");
+ return -1;
+ }
+
+ s->above_blocks = av_realloc(s->above_blocks,
+ (4*s->mb_width+6) * sizeof(*s->above_blocks));
+ s->macroblocks = av_realloc(s->macroblocks,
+ s->mb_width*s->mb_height*sizeof(*s->macroblocks));
+ av_free(s->edge_emu_buffer_alloc);
+ s->edge_emu_buffer_alloc = av_malloc(16*stride);
+ s->edge_emu_buffer = s->edge_emu_buffer_alloc;
+ if (s->flip < 0)
+ s->edge_emu_buffer += 15 * stride;
+
+ return 0;
+}
+
+int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ vp56_context_t *s = avctx->priv_data;
+ AVFrame *const p = &s->frames[VP56_FRAME_CURRENT];
+ AVFrame *picture = data;
+ int mb_row, mb_col, mb_row_flip, mb_offset = 0;
+ int block, y, uv, stride_y, stride_uv;
+ int golden_frame = 0;
+ int res;
+
+ res = s->parse_header(s, buf, buf_size, &golden_frame);
+ if (!res)
+ return -1;
+
+ p->reference = 1;
+ if (avctx->get_buffer(avctx, p) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+ return -1;
+ }
+
+ if (res == 2)
+ if (vp56_size_changed(avctx, s)) {
+ avctx->release_buffer(avctx, p);
+ return -1;
+ }
+
+ if (p->key_frame) {
+ p->pict_type = FF_I_TYPE;
+ s->default_models_init(s);
+ for (block=0; block<s->mb_height*s->mb_width; block++)
+ s->macroblocks[block].type = VP56_MB_INTRA;
+ } else {
+ p->pict_type = FF_P_TYPE;
+ vp56_parse_mb_type_models(s);
+ s->parse_vector_models(s);
+ s->mb_type = VP56_MB_INTER_NOVEC_PF;
+ }
+
+ s->parse_coeff_models(s);
+
+ memset(s->prev_dc, 0, sizeof(s->prev_dc));
+ s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
+ s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
+
+ for (block=0; block < 4*s->mb_width+6; block++) {
+ s->above_blocks[block].ref_frame = -1;
+ s->above_blocks[block].dc_coeff = 0;
+ s->above_blocks[block].not_null_dc = 0;
+ }
+ s->above_blocks[2*s->mb_width + 2].ref_frame = 0;
+ s->above_blocks[3*s->mb_width + 4].ref_frame = 0;
+
+ stride_y = p->linesize[0];
+ stride_uv = p->linesize[1];
+
+ if (s->flip < 0)
+ mb_offset = 7;
+
+ /* main macroblocks loop */
+ for (mb_row=0; mb_row<s->mb_height; mb_row++) {
+ if (s->flip < 0)
+ mb_row_flip = s->mb_height - mb_row - 1;
+ else
+ mb_row_flip = mb_row;
+
+ for (block=0; block<4; block++) {
+ s->left_block[block].ref_frame = -1;
+ s->left_block[block].dc_coeff = 0;
+ s->left_block[block].not_null_dc = 0;
+ memset(s->coeff_ctx[block], 0, 64*sizeof(s->coeff_ctx[block][0]));
+ }
+ memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));
+
+ s->above_block_idx[0] = 1;
+ s->above_block_idx[1] = 2;
+ s->above_block_idx[2] = 1;
+ s->above_block_idx[3] = 2;
+ s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
+ s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
+
+ s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
+ s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
+ s->block_offset[1] = s->block_offset[0] + 8;
+ s->block_offset[3] = s->block_offset[2] + 8;
+ s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
+ s->block_offset[5] = s->block_offset[4];
+
+ for (mb_col=0; mb_col<s->mb_width; mb_col++) {
+ vp56_decode_mb(s, mb_row, mb_col);
+
+ for (y=0; y<4; y++) {
+ s->above_block_idx[y] += 2;
+ s->block_offset[y] += 16;
+ }
+
+ for (uv=4; uv<6; uv++) {
+ s->above_block_idx[uv] += 1;
+ s->block_offset[uv] += 8;
+ }
+ }
+ }
+
+ if (s->frames[VP56_FRAME_PREVIOUS].data[0]
+ && (s->frames[VP56_FRAME_PREVIOUS].data[0]
+ != s->frames[VP56_FRAME_GOLDEN].data[0])) {
+ avctx->release_buffer(avctx, &s->frames[VP56_FRAME_PREVIOUS]);
+ }
+ if (p->key_frame || golden_frame) {
+ if (s->frames[VP56_FRAME_GOLDEN].data[0])
+ avctx->release_buffer(avctx, &s->frames[VP56_FRAME_GOLDEN]);
+ s->frames[VP56_FRAME_GOLDEN] = *p;
+ }
+ s->frames[VP56_FRAME_PREVIOUS] = *p;
+
+ *picture = *p;
+ *data_size = sizeof(AVPicture);
+
+ return buf_size;
+}
+
+void vp56_init(vp56_context_t *s, AVCodecContext *avctx, int flip)
+{
+ int i;
+
+ s->avctx = avctx;
+ avctx->pix_fmt = PIX_FMT_YUV420P;
+
+ if (s->avctx->idct_algo == FF_IDCT_AUTO)
+ s->avctx->idct_algo = FF_IDCT_VP3;
+ dsputil_init(&s->dsp, s->avctx);
+ ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);
+
+ avcodec_set_dimensions(s->avctx, 0, 0);
+
+ for (i=0; i<3; i++)
+ s->frames[i].data[0] = NULL;
+ s->edge_emu_buffer_alloc = NULL;
+
+ s->above_blocks = NULL;
+ s->macroblocks = NULL;
+ s->quantizer = -1;
+ s->deblock_filtering = 1;
+
+ s->filter = NULL;
+
+ if (flip) {
+ s->flip = -1;
+ s->frbi = 2;
+ s->srbi = 0;
+ } else {
+ s->flip = 1;
+ s->frbi = 0;
+ s->srbi = 2;
+ }
+}
+
+int vp56_free(AVCodecContext *avctx)
+{
+ vp56_context_t *s = avctx->priv_data;
+
+ av_free(s->above_blocks);
+ av_free(s->macroblocks);
+ av_free(s->edge_emu_buffer_alloc);
+ if (s->frames[VP56_FRAME_GOLDEN].data[0]
+ && (s->frames[VP56_FRAME_PREVIOUS].data[0]
+ != s->frames[VP56_FRAME_GOLDEN].data[0]))
+ avctx->release_buffer(avctx, &s->frames[VP56_FRAME_GOLDEN]);
+ if (s->frames[VP56_FRAME_PREVIOUS].data[0])
+ avctx->release_buffer(avctx, &s->frames[VP56_FRAME_PREVIOUS]);
+ return 0;
+}
diff --git a/contrib/ffmpeg/libavcodec/vp56.h b/contrib/ffmpeg/libavcodec/vp56.h
new file mode 100644
index 000000000..d6808b1e5
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vp56.h
@@ -0,0 +1,248 @@
+/**
+ * @file vp56.h
+ * VP5 and VP6 compatible video decoder (common features)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef VP56_H
+#define VP56_H
+
+#include "vp56data.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+
+
+typedef struct vp56_context vp56_context_t;
+typedef struct vp56_mv vp56_mv_t;
+
+typedef void (*vp56_parse_vector_adjustment_t)(vp56_context_t *s,
+ vp56_mv_t *vect);
+typedef int (*vp56_adjust_t)(int v, int t);
+typedef void (*vp56_filter_t)(vp56_context_t *s, uint8_t *dst, uint8_t *src,
+ int offset1, int offset2, int stride,
+ vp56_mv_t mv, int mask, int select, int luma);
+typedef void (*vp56_parse_coeff_t)(vp56_context_t *s);
+typedef void (*vp56_default_models_init_t)(vp56_context_t *s);
+typedef void (*vp56_parse_vector_models_t)(vp56_context_t *s);
+typedef void (*vp56_parse_coeff_models_t)(vp56_context_t *s);
+typedef int (*vp56_parse_header_t)(vp56_context_t *s, uint8_t *buf,
+ int buf_size, int *golden_frame);
+
+typedef struct {
+ int high;
+ int bits;
+ const uint8_t *buffer;
+ unsigned long code_word;
+} vp56_range_coder_t;
+
+typedef struct {
+ uint8_t not_null_dc;
+ vp56_frame_t ref_frame;
+ DCTELEM dc_coeff;
+} vp56_ref_dc_t;
+
+struct vp56_mv {
+ int x;
+ int y;
+};
+
+typedef struct {
+ uint8_t type;
+ vp56_mv_t mv;
+} vp56_macroblock_t;
+
+struct vp56_context {
+ AVCodecContext *avctx;
+ DSPContext dsp;
+ ScanTable scantable;
+ AVFrame frames[3];
+ uint8_t *edge_emu_buffer_alloc;
+ uint8_t *edge_emu_buffer;
+ vp56_range_coder_t c;
+
+ /* frame info */
+ int plane_width[3];
+ int plane_height[3];
+ int mb_width; /* number of horizontal MB */
+ int mb_height; /* number of vertical MB */
+ int block_offset[6];
+
+ int quantizer;
+ uint16_t dequant_dc;
+ uint16_t dequant_ac;
+
+ /* DC predictors management */
+ vp56_ref_dc_t *above_blocks;
+ vp56_ref_dc_t left_block[4];
+ int above_block_idx[6];
+ DCTELEM prev_dc[3][3]; /* [plan][ref_frame] */
+
+ /* blocks / macroblock */
+ vp56_mb_t mb_type;
+ vp56_macroblock_t *macroblocks;
+ DECLARE_ALIGNED_16(DCTELEM, block_coeff[6][64]);
+ uint8_t coeff_reorder[64]; /* used in vp6 only */
+ uint8_t coeff_index_to_pos[64]; /* used in vp6 only */
+
+ /* motion vectors */
+ vp56_mv_t mv[6]; /* vectors for each block in MB */
+ vp56_mv_t vector_candidate[2];
+ int vector_candidate_pos;
+
+ /* filtering hints */
+ int deblock_filtering;
+ int filter_selection;
+ int filter_mode;
+ int max_vector_length;
+ int sample_variance_threshold;
+
+ /* AC models */
+ uint8_t vector_model_sig[2]; /* delta sign */
+ uint8_t vector_model_dct[2]; /* delta coding types */
+ uint8_t vector_model_pdi[2][2]; /* predefined delta init */
+ uint8_t vector_model_pdv[2][7]; /* predefined delta values */
+ uint8_t vector_model_fdv[2][8]; /* 8 bit delta value definition */
+ uint8_t mb_type_model[3][10][10]; /* model for decoding MB type */
+ uint8_t coeff_model_dccv[2][11]; /* DC coeff value */
+ uint8_t coeff_model_ract[2][3][6][11]; /* Run/AC coding type and AC coeff value */
+ uint8_t coeff_model_acct[2][3][3][6][5];/* vp5 only AC coding type for coding group < 3 */
+ uint8_t coeff_model_dcct[2][36][5]; /* DC coeff coding type */
+ uint8_t coeff_model_runv[2][14]; /* run value (vp6 only) */
+ uint8_t mb_types_stats[3][10][2]; /* contextual, next MB type stats */
+ uint8_t coeff_ctx[4][64]; /* used in vp5 only */
+ uint8_t coeff_ctx_last[4]; /* used in vp5 only */
+
+ /* upside-down flipping hints */
+ int flip; /* are we flipping ? */
+ int frbi; /* first row block index in MB */
+ int srbi; /* second row block index in MB */
+ int stride[3]; /* stride for each plan */
+
+ const uint8_t *vp56_coord_div;
+ vp56_parse_vector_adjustment_t parse_vector_adjustment;
+ vp56_adjust_t adjust;
+ vp56_filter_t filter;
+ vp56_parse_coeff_t parse_coeff;
+ vp56_default_models_init_t default_models_init;
+ vp56_parse_vector_models_t parse_vector_models;
+ vp56_parse_coeff_models_t parse_coeff_models;
+ vp56_parse_header_t parse_header;
+};
+
+
+void vp56_init(vp56_context_t *s, AVCodecContext *avctx, int flip);
+int vp56_free(AVCodecContext *avctx);
+void vp56_init_dequant(vp56_context_t *s, int quantizer);
+int vp56_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
+ uint8_t *buf, int buf_size);
+
+
+/**
+ * vp56 specific range coder implementation
+ */
+
+static inline void vp56_init_range_decoder(vp56_range_coder_t *c,
+ const uint8_t *buf, int buf_size)
+{
+ c->high = 255;
+ c->bits = 8;
+ c->buffer = buf;
+ c->code_word = *c->buffer++ << 8;
+ c->code_word |= *c->buffer++;
+}
+
+static inline int vp56_rac_get_prob(vp56_range_coder_t *c, uint8_t prob)
+{
+ unsigned int low = 1 + (((c->high - 1) * prob) / 256);
+ unsigned int low_shift = low << 8;
+ int bit = c->code_word >= low_shift;
+
+ if (bit) {
+ c->high -= low;
+ c->code_word -= low_shift;
+ } else {
+ c->high = low;
+ }
+
+ /* normalize */
+ while (c->high < 128) {
+ c->high <<= 1;
+ c->code_word <<= 1;
+ if (--c->bits == 0) {
+ c->bits = 8;
+ c->code_word |= *c->buffer++;
+ }
+ }
+ return bit;
+}
+
+static inline int vp56_rac_get(vp56_range_coder_t *c)
+{
+ /* equiprobable */
+ int low = (c->high + 1) >> 1;
+ unsigned int low_shift = low << 8;
+ int bit = c->code_word >= low_shift;
+ if (bit) {
+ c->high = (c->high - low) << 1;
+ c->code_word -= low_shift;
+ } else {
+ c->high = low << 1;
+ }
+
+ /* normalize */
+ c->code_word <<= 1;
+ if (--c->bits == 0) {
+ c->bits = 8;
+ c->code_word |= *c->buffer++;
+ }
+ return bit;
+}
+
+static inline int vp56_rac_gets(vp56_range_coder_t *c, int bits)
+{
+ int value = 0;
+
+ while (bits--) {
+ value = (value << 1) | vp56_rac_get(c);
+ }
+
+ return value;
+}
+
+static inline int vp56_rac_gets_nn(vp56_range_coder_t *c, int bits)
+{
+ int v = vp56_rac_gets(c, 7) << 1;
+ return v + !v;
+}
+
+static inline int vp56_rac_get_tree(vp56_range_coder_t *c,
+ const vp56_tree_t *tree,
+ const uint8_t *probs)
+{
+ while (tree->val > 0) {
+ if (vp56_rac_get_prob(c, probs[tree->prob_idx]))
+ tree += tree->val;
+ else
+ tree++;
+ }
+ return -tree->val;
+}
+
+#endif /* VP56_H */
diff --git a/contrib/ffmpeg/libavcodec/vp56data.c b/contrib/ffmpeg/libavcodec/vp56data.c
new file mode 100644
index 000000000..e75c6d1ce
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vp56data.c
@@ -0,0 +1,66 @@
+/**
+ * @file vp56data.c
+ * VP5 and VP6 compatible video decoder (common data)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vp56data.h"
+
+const uint8_t vp56_b6to3[] = { 0, 0, 0, 0, 1, 2 };
+const uint8_t vp56_b6to4[] = { 0, 0, 1, 1, 2, 3 };
+
+const uint8_t vp56_coeff_parse_table[6][11] = {
+ { 159, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 145, 165, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 140, 148, 173, 0, 0, 0, 0, 0, 0, 0, 0 },
+ { 135, 140, 155, 176, 0, 0, 0, 0, 0, 0, 0 },
+ { 130, 134, 141, 157, 180, 0, 0, 0, 0, 0, 0 },
+ { 129, 130, 133, 140, 153, 177, 196, 230, 243, 254, 254 },
+};
+
+const uint8_t vp56_def_mb_types_stats[3][10][2] = {
+ { { 69, 42 }, { 1, 2 }, { 1, 7 }, { 44, 42 }, { 6, 22 },
+ { 1, 3 }, { 0, 2 }, { 1, 5 }, { 0, 1 }, { 0, 0 }, },
+ { { 229, 8 }, { 1, 1 }, { 0, 8 }, { 0, 0 }, { 0, 0 },
+ { 1, 2 }, { 0, 1 }, { 0, 0 }, { 1, 1 }, { 0, 0 }, },
+ { { 122, 35 }, { 1, 1 }, { 1, 6 }, { 46, 34 }, { 0, 0 },
+ { 1, 2 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, },
+};
+
+const vp56_tree_t vp56_pva_tree[] = {
+ { 8, 0},
+ { 4, 1},
+ { 2, 2}, {-0}, {-1},
+ { 2, 3}, {-2}, {-3},
+ { 4, 4},
+ { 2, 5}, {-4}, {-5},
+ { 2, 6}, {-6}, {-7},
+};
+
+const vp56_tree_t vp56_pc_tree[] = {
+ { 4, 6},
+ { 2, 7}, {-0}, {-1},
+ { 4, 8},
+ { 2, 9}, {-2}, {-3},
+ { 2,10}, {-4}, {-5},
+};
+
+const uint8_t vp56_coeff_bias[] = { 5, 7, 11, 19, 35, 67 };
+const uint8_t vp56_coeff_bit_length[] = { 0, 1, 2, 3, 4, 10 };
diff --git a/contrib/ffmpeg/libavcodec/vp56data.h b/contrib/ffmpeg/libavcodec/vp56data.h
new file mode 100644
index 000000000..dbf92dd68
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vp56data.h
@@ -0,0 +1,248 @@
+/**
+ * @file vp56data.h
+ * VP5 and VP6 compatible video decoder (common data)
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef VP56DATA_H
+#define VP56DATA_H
+
+#include "common.h"
+
+typedef enum {
+ VP56_FRAME_CURRENT = 0,
+ VP56_FRAME_PREVIOUS = 1,
+ VP56_FRAME_GOLDEN = 2,
+} vp56_frame_t;
+
+typedef enum {
+ VP56_MB_INTER_NOVEC_PF = 0, /**< Inter MB, no vector, from previous frame */
+ VP56_MB_INTRA = 1, /**< Intra MB */
+ VP56_MB_INTER_DELTA_PF = 2, /**< Inter MB, above/left vector + delta, from previous frame */
+ VP56_MB_INTER_V1_PF = 3, /**< Inter MB, first vector, from previous frame */
+ VP56_MB_INTER_V2_PF = 4, /**< Inter MB, second vector, from previous frame */
+ VP56_MB_INTER_NOVEC_GF = 5, /**< Inter MB, no vector, from golden frame */
+ VP56_MB_INTER_DELTA_GF = 6, /**< Inter MB, above/left vector + delta, from golden frame */
+ VP56_MB_INTER_4V = 7, /**< Inter MB, 4 vectors, from previous frame */
+ VP56_MB_INTER_V1_GF = 8, /**< Inter MB, first vector, from golden frame */
+ VP56_MB_INTER_V2_GF = 9, /**< Inter MB, second vector, from golden frame */
+} vp56_mb_t;
+
+typedef struct {
+ int8_t val;
+ int8_t prob_idx;
+} vp56_tree_t;
+
+extern const uint8_t vp56_b6to3[];
+extern const uint8_t vp56_b6to4[];
+extern const uint8_t vp56_coeff_parse_table[6][11];
+extern const uint8_t vp56_def_mb_types_stats[3][10][2];
+extern const vp56_tree_t vp56_pva_tree[];
+extern const vp56_tree_t vp56_pc_tree[];
+extern const uint8_t vp56_coeff_bias[];
+extern const uint8_t vp56_coeff_bit_length[];
+
+static const vp56_frame_t vp56_reference_frame[] = {
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_NOVEC_PF */
+ VP56_FRAME_CURRENT, /* VP56_MB_INTRA */
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_DELTA_PF */
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_V1_PF */
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_V2_PF */
+ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_NOVEC_GF */
+ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_DELTA_GF */
+ VP56_FRAME_PREVIOUS, /* VP56_MB_INTER_4V */
+ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_V1_GF */
+ VP56_FRAME_GOLDEN, /* VP56_MB_INTER_V2_GF */
+};
+
+static const uint8_t vp56_ac_dequant[64] = {
+ 94, 92, 90, 88, 86, 82, 78, 74,
+ 70, 66, 62, 58, 54, 53, 52, 51,
+ 50, 49, 48, 47, 46, 45, 44, 43,
+ 42, 40, 39, 37, 36, 35, 34, 33,
+ 32, 31, 30, 29, 28, 27, 26, 25,
+ 24, 23, 22, 21, 20, 19, 18, 17,
+ 16, 15, 14, 13, 12, 11, 10, 9,
+ 8, 7, 6, 5, 4, 3, 2, 1,
+};
+
+static const uint8_t vp56_dc_dequant[64] = {
+ 47, 47, 47, 47, 45, 43, 43, 43,
+ 43, 43, 42, 41, 41, 40, 40, 40,
+ 40, 35, 35, 35, 35, 33, 33, 33,
+ 33, 32, 32, 32, 27, 27, 26, 26,
+ 25, 25, 24, 24, 23, 23, 19, 19,
+ 19, 19, 18, 18, 17, 16, 16, 16,
+ 16, 16, 15, 11, 11, 11, 10, 10,
+ 9, 8, 7, 5, 3, 3, 2, 2,
+};
+
+static const uint8_t vp56_pre_def_mb_type_stats[16][3][10][2] = {
+ { { { 9, 15 }, { 32, 25 }, { 7, 19 }, { 9, 21 }, { 1, 12 },
+ { 14, 12 }, { 3, 18 }, { 14, 23 }, { 3, 10 }, { 0, 4 }, },
+ { { 41, 22 }, { 1, 0 }, { 1, 31 }, { 0, 0 }, { 0, 0 },
+ { 0, 1 }, { 1, 7 }, { 0, 1 }, { 98, 25 }, { 4, 10 }, },
+ { { 2, 3 }, { 2, 3 }, { 0, 2 }, { 0, 2 }, { 0, 0 },
+ { 11, 4 }, { 1, 4 }, { 0, 2 }, { 3, 2 }, { 0, 4 }, }, },
+ { { { 48, 39 }, { 1, 2 }, { 11, 27 }, { 29, 44 }, { 7, 27 },
+ { 1, 4 }, { 0, 3 }, { 1, 6 }, { 1, 2 }, { 0, 0 }, },
+ { { 123, 37 }, { 6, 4 }, { 1, 27 }, { 0, 0 }, { 0, 0 },
+ { 5, 8 }, { 1, 7 }, { 0, 1 }, { 12, 10 }, { 0, 2 }, },
+ { { 49, 46 }, { 3, 4 }, { 7, 31 }, { 42, 41 }, { 0, 0 },
+ { 2, 6 }, { 1, 7 }, { 1, 4 }, { 2, 4 }, { 0, 1 }, }, },
+ { { { 21, 32 }, { 1, 2 }, { 4, 10 }, { 32, 43 }, { 6, 23 },
+ { 2, 3 }, { 1, 19 }, { 1, 6 }, { 12, 21 }, { 0, 7 }, },
+ { { 26, 14 }, { 14, 12 }, { 0, 24 }, { 0, 0 }, { 0, 0 },
+ { 55, 17 }, { 1, 9 }, { 0, 36 }, { 5, 7 }, { 1, 3 }, },
+ { { 26, 25 }, { 1, 1 }, { 2, 10 }, { 67, 39 }, { 0, 0 },
+ { 1, 1 }, { 0, 14 }, { 0, 2 }, { 31, 26 }, { 1, 6 }, }, },
+ { { { 69, 83 }, { 0, 0 }, { 0, 2 }, { 10, 29 }, { 3, 12 },
+ { 0, 1 }, { 0, 3 }, { 0, 3 }, { 2, 2 }, { 0, 0 }, },
+ { { 209, 5 }, { 0, 0 }, { 0, 27 }, { 0, 0 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, },
+ { { 103, 46 }, { 1, 2 }, { 2, 10 }, { 33, 42 }, { 0, 0 },
+ { 1, 4 }, { 0, 3 }, { 0, 1 }, { 1, 3 }, { 0, 0 }, }, },
+ { { { 11, 20 }, { 1, 4 }, { 18, 36 }, { 43, 48 }, { 13, 35 },
+ { 0, 2 }, { 0, 5 }, { 3, 12 }, { 1, 2 }, { 0, 0 }, },
+ { { 2, 5 }, { 4, 5 }, { 0, 121 }, { 0, 0 }, { 0, 0 },
+ { 0, 3 }, { 2, 4 }, { 1, 4 }, { 2, 2 }, { 0, 1 }, },
+ { { 14, 31 }, { 9, 13 }, { 14, 54 }, { 22, 29 }, { 0, 0 },
+ { 2, 6 }, { 4, 18 }, { 6, 13 }, { 1, 5 }, { 0, 1 }, }, },
+ { { { 70, 44 }, { 0, 1 }, { 2, 10 }, { 37, 46 }, { 8, 26 },
+ { 0, 2 }, { 0, 2 }, { 0, 2 }, { 0, 1 }, { 0, 0 }, },
+ { { 175, 5 }, { 0, 1 }, { 0, 48 }, { 0, 0 }, { 0, 0 },
+ { 0, 2 }, { 0, 1 }, { 0, 2 }, { 0, 1 }, { 0, 0 }, },
+ { { 85, 39 }, { 0, 0 }, { 1, 9 }, { 69, 40 }, { 0, 0 },
+ { 0, 1 }, { 0, 3 }, { 0, 1 }, { 2, 3 }, { 0, 0 }, }, },
+ { { { 8, 15 }, { 0, 1 }, { 8, 21 }, { 74, 53 }, { 22, 42 },
+ { 0, 1 }, { 0, 2 }, { 0, 3 }, { 1, 2 }, { 0, 0 }, },
+ { { 83, 5 }, { 2, 3 }, { 0, 102 }, { 0, 0 }, { 0, 0 },
+ { 1, 3 }, { 0, 2 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, },
+ { { 31, 28 }, { 0, 0 }, { 3, 14 }, { 130, 34 }, { 0, 0 },
+ { 0, 1 }, { 0, 3 }, { 0, 1 }, { 3, 3 }, { 0, 1 }, }, },
+ { { { 141, 42 }, { 0, 0 }, { 1, 4 }, { 11, 24 }, { 1, 11 },
+ { 0, 1 }, { 0, 1 }, { 0, 2 }, { 0, 0 }, { 0, 0 }, },
+ { { 233, 6 }, { 0, 0 }, { 0, 8 }, { 0, 0 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 1 }, { 0, 0 }, },
+ { { 171, 25 }, { 0, 0 }, { 1, 5 }, { 25, 21 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, }, },
+ { { { 8, 19 }, { 4, 10 }, { 24, 45 }, { 21, 37 }, { 9, 29 },
+ { 0, 3 }, { 1, 7 }, { 11, 25 }, { 0, 2 }, { 0, 1 }, },
+ { { 34, 16 }, { 112, 21 }, { 1, 28 }, { 0, 0 }, { 0, 0 },
+ { 6, 8 }, { 1, 7 }, { 0, 3 }, { 2, 5 }, { 0, 2 }, },
+ { { 17, 21 }, { 68, 29 }, { 6, 15 }, { 13, 22 }, { 0, 0 },
+ { 6, 12 }, { 3, 14 }, { 4, 10 }, { 1, 7 }, { 0, 3 }, }, },
+ { { { 46, 42 }, { 0, 1 }, { 2, 10 }, { 54, 51 }, { 10, 30 },
+ { 0, 2 }, { 0, 2 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, },
+ { { 159, 35 }, { 2, 2 }, { 0, 25 }, { 0, 0 }, { 0, 0 },
+ { 3, 6 }, { 0, 5 }, { 0, 1 }, { 4, 4 }, { 0, 1 }, },
+ { { 51, 39 }, { 0, 1 }, { 2, 12 }, { 91, 44 }, { 0, 0 },
+ { 0, 2 }, { 0, 3 }, { 0, 1 }, { 2, 3 }, { 0, 1 }, }, },
+ { { { 28, 32 }, { 0, 0 }, { 3, 10 }, { 75, 51 }, { 14, 33 },
+ { 0, 1 }, { 0, 2 }, { 0, 1 }, { 1, 2 }, { 0, 0 }, },
+ { { 75, 39 }, { 5, 7 }, { 2, 48 }, { 0, 0 }, { 0, 0 },
+ { 3, 11 }, { 2, 16 }, { 1, 4 }, { 7, 10 }, { 0, 2 }, },
+ { { 81, 25 }, { 0, 0 }, { 2, 9 }, { 106, 26 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, }, },
+ { { { 100, 46 }, { 0, 1 }, { 3, 9 }, { 21, 37 }, { 5, 20 },
+ { 0, 1 }, { 0, 2 }, { 1, 2 }, { 0, 1 }, { 0, 0 }, },
+ { { 212, 21 }, { 0, 1 }, { 0, 9 }, { 0, 0 }, { 0, 0 },
+ { 1, 2 }, { 0, 2 }, { 0, 0 }, { 2, 2 }, { 0, 0 }, },
+ { { 140, 37 }, { 0, 1 }, { 1, 8 }, { 24, 33 }, { 0, 0 },
+ { 1, 2 }, { 0, 2 }, { 0, 1 }, { 1, 2 }, { 0, 0 }, }, },
+ { { { 27, 29 }, { 0, 1 }, { 9, 25 }, { 53, 51 }, { 12, 34 },
+ { 0, 1 }, { 0, 3 }, { 1, 5 }, { 0, 2 }, { 0, 0 }, },
+ { { 4, 2 }, { 0, 0 }, { 0, 172 }, { 0, 0 }, { 0, 0 },
+ { 0, 1 }, { 0, 2 }, { 0, 0 }, { 2, 0 }, { 0, 0 }, },
+ { { 14, 23 }, { 1, 3 }, { 11, 53 }, { 90, 31 }, { 0, 0 },
+ { 0, 3 }, { 1, 5 }, { 2, 6 }, { 1, 2 }, { 0, 0 }, }, },
+ { { { 80, 38 }, { 0, 0 }, { 1, 4 }, { 69, 33 }, { 5, 16 },
+ { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 1 }, { 0, 0 }, },
+ { { 187, 22 }, { 1, 1 }, { 0, 17 }, { 0, 0 }, { 0, 0 },
+ { 3, 6 }, { 0, 4 }, { 0, 1 }, { 4, 4 }, { 0, 1 }, },
+ { { 123, 29 }, { 0, 0 }, { 1, 7 }, { 57, 30 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, }, },
+ { { { 16, 20 }, { 0, 0 }, { 2, 8 }, { 104, 49 }, { 15, 33 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, },
+ { { 133, 6 }, { 1, 2 }, { 1, 70 }, { 0, 0 }, { 0, 0 },
+ { 0, 2 }, { 0, 4 }, { 0, 3 }, { 1, 1 }, { 0, 0 }, },
+ { { 13, 14 }, { 0, 0 }, { 4, 20 }, { 175, 20 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 1, 1 }, { 0, 0 }, }, },
+ { { { 194, 16 }, { 0, 0 }, { 1, 1 }, { 1, 9 }, { 1, 3 },
+ { 0, 0 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, },
+ { { 251, 1 }, { 0, 0 }, { 0, 2 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, },
+ { { 202, 23 }, { 0, 0 }, { 1, 3 }, { 2, 9 }, { 0, 0 },
+ { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 0 }, { 0, 0 }, }, },
+};
+
+static const uint8_t vp56_filter_threshold[] = {
+ 14, 14, 13, 13, 12, 12, 10, 10,
+ 10, 10, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 7, 7, 7, 7,
+ 7, 7, 6, 6, 6, 6, 6, 6,
+ 5, 5, 5, 5, 4, 4, 4, 4,
+ 4, 4, 4, 3, 3, 3, 3, 2,
+};
+
+static const uint8_t vp56_mb_type_model_model[] = {
+ 171, 83, 199, 140, 125, 104,
+};
+
+static const vp56_tree_t vp56_pmbtm_tree[] = {
+ { 4, 0},
+ { 2, 1}, {-8}, {-4},
+ { 8, 2},
+ { 6, 3},
+ { 4, 4},
+ { 2, 5}, {-24}, {-20}, {-16}, {-12}, {-0},
+};
+
+static const vp56_tree_t vp56_pmbt_tree[] = {
+ { 8, 1},
+ { 4, 2},
+ { 2, 4}, {-VP56_MB_INTER_NOVEC_PF}, {-VP56_MB_INTER_DELTA_PF},
+ { 2, 5}, {-VP56_MB_INTER_V1_PF}, {-VP56_MB_INTER_V2_PF},
+ { 4, 3},
+ { 2, 6}, {-VP56_MB_INTRA}, {-VP56_MB_INTER_4V},
+ { 4, 7},
+ { 2, 8}, {-VP56_MB_INTER_NOVEC_GF}, {-VP56_MB_INTER_DELTA_GF},
+ { 2, 9}, {-VP56_MB_INTER_V1_GF}, {-VP56_MB_INTER_V2_GF},
+};
+
+/* relative pos of surrounding blocks, from closest to farthest */
+static const int8_t vp56_candidate_predictor_pos[12][2] = {
+ { 0, -1 },
+ { -1, 0 },
+ { -1, -1 },
+ { 1, -1 },
+ { 0, -2 },
+ { -2, 0 },
+ { -2, -1 },
+ { -1, -2 },
+ { 1, -2 },
+ { 2, -1 },
+ { -2, -2 },
+ { 2, -2 },
+};
+
+#endif /* VP56DATA */
diff --git a/contrib/ffmpeg/libavcodec/vp5data.h b/contrib/ffmpeg/libavcodec/vp5data.h
new file mode 100644
index 000000000..effc17c2c
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vp5data.h
@@ -0,0 +1,173 @@
+/**
+ * @file vp5data.h
+ * VP5 compatible video decoder
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef VP5DATA_H
+#define VP5DATA_H
+
+static const uint8_t vp5_coeff_groups[] = {
+ -1, 0, 1, 1, 2, 1, 1, 2,
+ 2, 1, 1, 2, 2, 2, 1, 2,
+ 2, 2, 2, 2, 1, 1, 2, 2,
+ 3, 3, 4, 3, 4, 4, 4, 3,
+ 3, 3, 3, 3, 4, 3, 3, 3,
+ 4, 4, 4, 4, 4, 3, 3, 4,
+ 4, 4, 3, 4, 4, 4, 4, 4,
+ 4, 4, 5, 5, 5, 5, 5, 5,
+};
+
+static const uint8_t vp5_vmc_pct[2][11] = {
+ { 243, 220, 251, 253, 237, 232, 241, 245, 247, 251, 253 },
+ { 235, 211, 246, 249, 234, 231, 248, 249, 252, 252, 254 },
+};
+
+static const uint8_t vp5_dccv_pct[2][11] = {
+ { 146, 197, 181, 207, 232, 243, 238, 251, 244, 250, 249 },
+ { 179, 219, 214, 240, 250, 254, 244, 254, 254, 254, 254 },
+};
+
+static const uint8_t vp5_ract_pct[3][2][6][11] = {
+ { { { 227, 246, 230, 247, 244, 254, 254, 254, 254, 254, 254 },
+ { 202, 254, 209, 231, 231, 249, 249, 253, 254, 254, 254 },
+ { 206, 254, 225, 242, 241, 251, 253, 254, 254, 254, 254 },
+ { 235, 254, 241, 253, 252, 254, 254, 254, 254, 254, 254 },
+ { 234, 254, 248, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } },
+ { { 240, 254, 248, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 238, 254, 240, 253, 254, 254, 254, 254, 254, 254, 254 },
+ { 244, 254, 251, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } } },
+ { { { 206, 203, 227, 239, 247, 254, 253, 254, 254, 254, 254 },
+ { 207, 199, 220, 236, 243, 252, 252, 254, 254, 254, 254 },
+ { 212, 219, 230, 243, 244, 253, 252, 254, 254, 254, 254 },
+ { 236, 237, 247, 252, 253, 254, 254, 254, 254, 254, 254 },
+ { 240, 240, 248, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } },
+ { { 230, 233, 249, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 238, 238, 250, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 248, 251, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } } },
+ { { { 225, 239, 227, 231, 244, 253, 243, 254, 254, 253, 254 },
+ { 232, 234, 224, 228, 242, 249, 242, 252, 251, 251, 254 },
+ { 235, 249, 238, 240, 251, 254, 249, 254, 253, 253, 254 },
+ { 249, 253, 251, 250, 254, 254, 254, 254, 254, 254, 254 },
+ { 251, 250, 249, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } },
+ { { 243, 244, 250, 250, 254, 254, 254, 254, 254, 254, 254 },
+ { 249, 248, 250, 253, 254, 254, 254, 254, 254, 254, 254 },
+ { 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 },
+ { 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254 } } },
+};
+
+static const int16_t vp5_dccv_lc[5][36][2] = {
+ { {154, 61}, {141, 54}, { 90, 45}, { 54, 34}, { 54, 13}, {128, 109},
+ {136, 54}, {148, 45}, { 92, 41}, { 54, 33}, { 51, 15}, { 87, 113},
+ { 87, 44}, { 97, 40}, { 67, 36}, { 46, 29}, { 41, 15}, { 64, 80},
+ { 59, 33}, { 61, 31}, { 51, 28}, { 44, 22}, { 33, 12}, { 49, 63},
+ { 69, 12}, { 59, 16}, { 46, 14}, { 31, 13}, { 26, 6}, { 92, 26},
+ {128, 108}, { 77, 119}, { 54, 84}, { 26, 71}, { 87, 19}, { 95, 155} },
+ { {154, 4}, {182, 0}, {159, -8}, {128, -5}, {143, -5}, {187, 55},
+ {182, 0}, {228, -3}, {187, -7}, {174, -9}, {189, -11}, {169, 79},
+ {161, -9}, {192, -8}, {187, -9}, {169, -10}, {136, -9}, {184, 40},
+ {164, -11}, {179, -10}, {174, -10}, {161, -10}, {115, -7}, {197, 20},
+ {195, -11}, {195, -11}, {146, -10}, {110, -6}, { 95, -4}, {195, 39},
+ {182, 55}, {172, 77}, {177, 37}, {169, 29}, {172, 52}, { 92, 162} },
+ { {174, 80}, {164, 80}, { 95, 80}, { 46, 66}, { 56, 24}, { 36, 193},
+ {164, 80}, {166, 77}, {105, 76}, { 49, 68}, { 46, 31}, { 49, 186},
+ { 97, 78}, {110, 74}, { 72, 72}, { 44, 60}, { 33, 30}, { 69, 131},
+ { 61, 61}, { 69, 63}, { 51, 57}, { 31, 48}, { 26, 27}, { 64, 89},
+ { 67, 23}, { 51, 32}, { 36, 33}, { 26, 28}, { 20, 12}, { 44, 68},
+ { 26, 197}, { 41, 189}, { 61, 129}, { 28, 103}, { 49, 52}, {-12, 245} },
+ { {102, 141}, { 79, 166}, { 72, 162}, { 97, 125}, {179, 4}, {307, 0},
+ { 72, 168}, { 69, 175}, { 84, 160}, {105, 127}, {148, 34}, {310, 0},
+ { 84, 151}, { 82, 161}, { 87, 153}, { 87, 135}, {115, 51}, {317, 0},
+ { 97, 125}, {102, 131}, {105, 125}, { 87, 122}, { 84, 64}, { 54, 184},
+ {166, 18}, {146, 43}, {125, 51}, { 90, 64}, { 95, 7}, { 38, 154},
+ {294, 0}, { 13, 225}, { 10, 225}, { 67, 168}, { 0, 167}, {161, 94} },
+ { {172, 76}, {172, 75}, {136, 80}, { 64, 98}, { 74, 67}, {315, 0},
+ {169, 76}, {207, 56}, {164, 66}, { 97, 80}, { 67, 72}, {328, 0},
+ {136, 80}, {187, 53}, {154, 62}, { 72, 85}, { -2, 105}, {305, 0},
+ { 74, 91}, {128, 64}, {113, 64}, { 61, 77}, { 41, 75}, {259, 0},
+ { 46, 84}, { 51, 81}, { 28, 89}, { 31, 78}, { 23, 77}, {202, 0},
+ {323, 0}, {323, 0}, {300, 0}, {236, 0}, {195, 0}, {328, 0} },
+};
+
+static const int16_t vp5_ract_lc[3][3][5][6][2] = {
+ { { { {276, 0}, {238, 0}, {195, 0}, {156, 0}, {113, 0}, {274, 0} },
+ { { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1} },
+ { {192, 59}, {182, 50}, {141, 48}, {110, 40}, { 92, 19}, {125,128} },
+ { {169, 87}, {169, 83}, {184, 62}, {220, 16}, {184, 0}, {264, 0} },
+ { {212, 40}, {212, 36}, {169, 49}, {174, 27}, { 8,120}, {182, 71} } },
+ { { {259, 10}, {197, 19}, {143, 22}, {123, 16}, {110, 8}, {133, 88} },
+ { { 0, 1}, {256, 0}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1} },
+ { {207, 46}, {187, 50}, { 97, 83}, { 23,100}, { 41, 56}, { 56,188} },
+ { {166, 90}, {146,108}, {161, 88}, {136, 95}, {174, 0}, {266, 0} },
+ { {264, 7}, {243, 18}, {184, 43}, {-14,154}, { 20,112}, { 20,199} } },
+ { { {230, 26}, {197, 22}, {159, 20}, {146, 12}, {136, 4}, { 54,162} },
+ { { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1}, { 0, 1} },
+ { {192, 59}, {156, 72}, { 84,101}, { 49,101}, { 79, 47}, { 79,167} },
+ { {138,115}, {136,116}, {166, 80}, {238, 0}, {195, 0}, {261, 0} },
+ { {225, 33}, {205, 42}, {159, 61}, { 79, 96}, { 92, 66}, { 28,195} } },
+ }, {
+ { { {200, 37}, {197, 18}, {159, 13}, {143, 7}, {102, 5}, {123,126} },
+ { {197, 3}, {220, -9}, {210,-12}, {187, -6}, {151, -2}, {174, 80} },
+ { {200, 53}, {187, 47}, {159, 40}, {118, 38}, {100, 18}, {141,111} },
+ { {179, 78}, {166, 86}, {197, 50}, {207, 27}, {187, 0}, {115,139} },
+ { {218, 34}, {220, 29}, {174, 46}, {128, 61}, { 54, 89}, {187, 65} } },
+ { { {238, 14}, {197, 18}, {125, 26}, { 90, 25}, { 82, 13}, {161, 86} },
+ { {189, 1}, {205, -2}, {156, -4}, {143, -4}, {146, -4}, {172, 72} },
+ { {230, 31}, {192, 45}, {102, 76}, { 38, 85}, { 56, 41}, { 64,173} },
+ { {166, 91}, {141,111}, {128,116}, {118,109}, {177, 0}, { 23,222} },
+ { {253, 14}, {236, 21}, {174, 49}, { 33,118}, { 44, 93}, { 23,187} } },
+ { { {218, 28}, {179, 28}, {118, 35}, { 95, 30}, { 72, 24}, {128,108} },
+ { {187, 1}, {174, -1}, {125, -1}, {110, -1}, {108, -1}, {202, 52} },
+ { {197, 53}, {146, 75}, { 46,118}, { 33,103}, { 64, 50}, {118,126} },
+ { {138,114}, {128,122}, {161, 86}, {243, -6}, {195, 0}, { 38,210} },
+ { {215, 39}, {179, 58}, { 97,101}, { 95, 85}, { 87, 70}, { 69,152} } },
+ }, {
+ { { {236, 24}, {205, 18}, {172, 12}, {154, 6}, {125, 1}, {169, 75} },
+ { {187, 4}, {230, -2}, {228, -4}, {236, -4}, {241, -2}, {192, 66} },
+ { {200, 46}, {187, 42}, {159, 34}, {136, 25}, {105, 10}, {179, 62} },
+ { {207, 55}, {192, 63}, {192, 54}, {195, 36}, {177, 1}, {143, 98} },
+ { {225, 27}, {207, 34}, {200, 30}, {131, 57}, { 97, 60}, {197, 45} } },
+ { { {271, 8}, {218, 13}, {133, 19}, { 90, 19}, { 72, 7}, {182, 51} },
+ { {179, 1}, {225, -1}, {154, -2}, {110, -1}, { 92, 0}, {195, 41} },
+ { {241, 26}, {189, 40}, { 82, 64}, { 33, 60}, { 67, 17}, {120, 94} },
+ { {192, 68}, {151, 94}, {146, 90}, {143, 72}, {161, 0}, {113,128} },
+ { {256, 12}, {218, 29}, {166, 48}, { 44, 99}, { 31, 87}, {148, 78} } },
+ { { {238, 20}, {184, 22}, {113, 27}, { 90, 22}, { 74, 9}, {192, 37} },
+ { {184, 0}, {215, -1}, {141, -1}, { 97, 0}, { 49, 0}, {264, 13} },
+ { {182, 51}, {138, 61}, { 95, 63}, { 54, 59}, { 64, 25}, {200, 45} },
+ { {179, 75}, {156, 87}, {174, 65}, {177, 44}, {174, 0}, {164, 85} },
+ { {195, 45}, {148, 65}, {105, 79}, { 95, 72}, { 87, 60}, {169, 63} } },
+ }
+};
+
+static const uint8_t vp5_coord_div[] = { 2, 2, 2, 2, 4, 4 };
+
+#endif /* VP5DATA_H */
diff --git a/contrib/ffmpeg/libavcodec/vp6.c b/contrib/ffmpeg/libavcodec/vp6.c
new file mode 100644
index 000000000..b7ff004cc
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vp6.c
@@ -0,0 +1,522 @@
+/**
+ * @file vp6.c
+ * VP6 compatible video decoder
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ * The VP6F decoder accept an optional 1 byte extradata. It is composed of:
+ * - upper 4bits: difference between encoded width and visible width
+ * - lower 4bits: difference between encoded height and visible height
+ */
+
+#include <stdlib.h>
+
+#include "avcodec.h"
+#include "dsputil.h"
+#include "bitstream.h"
+#include "mpegvideo.h"
+
+#include "vp56.h"
+#include "vp56data.h"
+#include "vp6data.h"
+
+
+static int vp6_parse_header(vp56_context_t *s, uint8_t *buf, int buf_size,
+ int *golden_frame)
+{
+ vp56_range_coder_t *c = &s->c;
+ int parse_filter_info;
+ int rows, cols;
+ int res = 1;
+
+ if (buf[0] & 1)
+ return 0;
+
+ s->frames[VP56_FRAME_CURRENT].key_frame = !(buf[0] & 0x80);
+ vp56_init_dequant(s, (buf[0] >> 1) & 0x3F);
+
+ if (s->frames[VP56_FRAME_CURRENT].key_frame) {
+ if ((buf[1] & 0xFE) != 0x46) /* would be 0x36 for VP61 */
+ return 0;
+ if (buf[1] & 1) {
+ av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n");
+ return 0;
+ }
+
+ rows = buf[2]; /* number of stored macroblock rows */
+ cols = buf[3]; /* number of stored macroblock cols */
+ /* buf[4] is number of displayed macroblock rows */
+ /* buf[5] is number of displayed macroblock cols */
+
+ if (16*cols != s->avctx->coded_width ||
+ 16*rows != s->avctx->coded_height) {
+ avcodec_set_dimensions(s->avctx, 16*cols, 16*rows);
+ if (s->avctx->extradata_size == 1) {
+ s->avctx->width -= s->avctx->extradata[0] >> 4;
+ s->avctx->height -= s->avctx->extradata[0] & 0x0F;
+ }
+ res = 2;
+ }
+
+ vp56_init_range_decoder(c, buf+6, buf_size-6);
+ vp56_rac_gets(c, 2);
+
+ parse_filter_info = 1;
+ } else {
+ vp56_init_range_decoder(c, buf+1, buf_size-1);
+
+ *golden_frame = vp56_rac_get(c);
+ s->deblock_filtering = vp56_rac_get(c);
+ if (s->deblock_filtering)
+ vp56_rac_get(c);
+ parse_filter_info = vp56_rac_get(c);
+ }
+
+ if (parse_filter_info) {
+ if (vp56_rac_get(c)) {
+ s->filter_mode = 2;
+ s->sample_variance_threshold = vp56_rac_gets(c, 5);
+ s->max_vector_length = 2 << vp56_rac_gets(c, 3);
+ } else if (vp56_rac_get(c)) {
+ s->filter_mode = 1;
+ } else {
+ s->filter_mode = 0;
+ }
+ s->filter_selection = vp56_rac_gets(c, 4);
+ }
+
+ vp56_rac_get(c);
+ return res;
+}
+
+static void vp6_coeff_order_table_init(vp56_context_t *s)
+{
+ int i, pos, idx = 1;
+
+ s->coeff_index_to_pos[0] = 0;
+ for (i=0; i<16; i++)
+ for (pos=1; pos<64; pos++)
+ if (s->coeff_reorder[pos] == i)
+ s->coeff_index_to_pos[idx++] = pos;
+}
+
+static void vp6_default_models_init(vp56_context_t *s)
+{
+ s->vector_model_dct[0] = 0xA2;
+ s->vector_model_dct[1] = 0xA4;
+ s->vector_model_sig[0] = 0x80;
+ s->vector_model_sig[1] = 0x80;
+
+ memcpy(s->mb_types_stats, vp56_def_mb_types_stats, sizeof(s->mb_types_stats));
+ memcpy(s->vector_model_fdv, vp6_def_fdv_vector_model, sizeof(s->vector_model_fdv));
+ memcpy(s->vector_model_pdv, vp6_def_pdv_vector_model, sizeof(s->vector_model_pdv));
+ memcpy(s->coeff_model_runv, vp6_def_runv_coeff_model, sizeof(s->coeff_model_runv));
+ memcpy(s->coeff_reorder, vp6_def_coeff_reorder, sizeof(s->coeff_reorder));
+
+ vp6_coeff_order_table_init(s);
+}
+
+static void vp6_parse_vector_models(vp56_context_t *s)
+{
+ vp56_range_coder_t *c = &s->c;
+ int comp, node;
+
+ for (comp=0; comp<2; comp++) {
+ if (vp56_rac_get_prob(c, vp6_sig_dct_pct[comp][0]))
+ s->vector_model_dct[comp] = vp56_rac_gets_nn(c, 7);
+ if (vp56_rac_get_prob(c, vp6_sig_dct_pct[comp][1]))
+ s->vector_model_sig[comp] = vp56_rac_gets_nn(c, 7);
+ }
+
+ for (comp=0; comp<2; comp++)
+ for (node=0; node<7; node++)
+ if (vp56_rac_get_prob(c, vp6_pdv_pct[comp][node]))
+ s->vector_model_pdv[comp][node] = vp56_rac_gets_nn(c, 7);
+
+ for (comp=0; comp<2; comp++)
+ for (node=0; node<8; node++)
+ if (vp56_rac_get_prob(c, vp6_fdv_pct[comp][node]))
+ s->vector_model_fdv[comp][node] = vp56_rac_gets_nn(c, 7);
+}
+
+static void vp6_parse_coeff_models(vp56_context_t *s)
+{
+ vp56_range_coder_t *c = &s->c;
+ int def_prob[11];
+ int node, cg, ctx, pos;
+ int ct; /* code type */
+ int pt; /* plane type (0 for Y, 1 for U or V) */
+
+ memset(def_prob, 0x80, sizeof(def_prob));
+
+ for (pt=0; pt<2; pt++)
+ for (node=0; node<11; node++)
+ if (vp56_rac_get_prob(c, vp6_dccv_pct[pt][node])) {
+ def_prob[node] = vp56_rac_gets_nn(c, 7);
+ s->coeff_model_dccv[pt][node] = def_prob[node];
+ } else if (s->frames[VP56_FRAME_CURRENT].key_frame) {
+ s->coeff_model_dccv[pt][node] = def_prob[node];
+ }
+
+ if (vp56_rac_get(c)) {
+ for (pos=1; pos<64; pos++)
+ if (vp56_rac_get_prob(c, vp6_coeff_reorder_pct[pos]))
+ s->coeff_reorder[pos] = vp56_rac_gets(c, 4);
+ vp6_coeff_order_table_init(s);
+ }
+
+ for (cg=0; cg<2; cg++)
+ for (node=0; node<14; node++)
+ if (vp56_rac_get_prob(c, vp6_runv_pct[cg][node]))
+ s->coeff_model_runv[cg][node] = vp56_rac_gets_nn(c, 7);
+
+ for (ct=0; ct<3; ct++)
+ for (pt=0; pt<2; pt++)
+ for (cg=0; cg<6; cg++)
+ for (node=0; node<11; node++)
+ if (vp56_rac_get_prob(c, vp6_ract_pct[ct][pt][cg][node])) {
+ def_prob[node] = vp56_rac_gets_nn(c, 7);
+ s->coeff_model_ract[pt][ct][cg][node] = def_prob[node];
+ } else if (s->frames[VP56_FRAME_CURRENT].key_frame) {
+ s->coeff_model_ract[pt][ct][cg][node] = def_prob[node];
+ }
+
+ /* coeff_model_dcct is a linear combination of coeff_model_dccv */
+ for (pt=0; pt<2; pt++)
+ for (ctx=0; ctx<3; ctx++)
+ for (node=0; node<5; node++)
+ s->coeff_model_dcct[pt][ctx][node] = clip(((s->coeff_model_dccv[pt][node] * vp6_dccv_lc[ctx][node][0] + 128) >> 8) + vp6_dccv_lc[ctx][node][1], 1, 255);
+}
+
+static void vp6_parse_vector_adjustment(vp56_context_t *s, vp56_mv_t *vect)
+{
+ vp56_range_coder_t *c = &s->c;
+ int comp;
+
+ *vect = (vp56_mv_t) {0,0};
+ if (s->vector_candidate_pos < 2)
+ *vect = s->vector_candidate[0];
+
+ for (comp=0; comp<2; comp++) {
+ int i, delta = 0;
+
+ if (vp56_rac_get_prob(c, s->vector_model_dct[comp])) {
+ static const uint8_t prob_order[] = {0, 1, 2, 7, 6, 5, 4};
+ for (i=0; i<sizeof(prob_order); i++) {
+ int j = prob_order[i];
+ delta |= vp56_rac_get_prob(c, s->vector_model_fdv[comp][j])<<j;
+ }
+ if (delta & 0xF0)
+ delta |= vp56_rac_get_prob(c, s->vector_model_fdv[comp][3])<<3;
+ else
+ delta |= 8;
+ } else {
+ delta = vp56_rac_get_tree(c, vp56_pva_tree,
+ s->vector_model_pdv[comp]);
+ }
+
+ if (delta && vp56_rac_get_prob(c, s->vector_model_sig[comp]))
+ delta = -delta;
+
+ if (!comp)
+ vect->x += delta;
+ else
+ vect->y += delta;
+ }
+}
+
+static void vp6_parse_coeff(vp56_context_t *s)
+{
+ vp56_range_coder_t *c = &s->c;
+ uint8_t *permute = s->scantable.permutated;
+ uint8_t *model, *model2, *model3;
+ int coeff, sign, coeff_idx;
+ int b, i, cg, idx, ctx;
+ int pt = 0; /* plane type (0 for Y, 1 for U or V) */
+
+ for (b=0; b<6; b++) {
+ int ct = 1; /* code type */
+ int run = 1;
+
+ if (b > 3) pt = 1;
+
+ ctx = s->left_block[vp56_b6to4[b]].not_null_dc
+ + s->above_blocks[s->above_block_idx[b]].not_null_dc;
+ model = s->coeff_model_dccv[pt];
+ model2 = s->coeff_model_dcct[pt][ctx];
+
+ for (coeff_idx=0; coeff_idx<64; ) {
+ if ((coeff_idx>1 && ct==0) || vp56_rac_get_prob(c, model2[0])) {
+ /* parse a coeff */
+ if (coeff_idx == 0) {
+ s->left_block[vp56_b6to4[b]].not_null_dc = 1;
+ s->above_blocks[s->above_block_idx[b]].not_null_dc = 1;
+ }
+
+ if (vp56_rac_get_prob(c, model2[2])) {
+ if (vp56_rac_get_prob(c, model2[3])) {
+ idx = vp56_rac_get_tree(c, vp56_pc_tree, model);
+ coeff = vp56_coeff_bias[idx];
+ for (i=vp56_coeff_bit_length[idx]; i>=0; i--)
+ coeff += vp56_rac_get_prob(c, vp56_coeff_parse_table[idx][i]) << i;
+ } else {
+ if (vp56_rac_get_prob(c, model2[4]))
+ coeff = 3 + vp56_rac_get_prob(c, model[5]);
+ else
+ coeff = 2;
+ }
+ ct = 2;
+ } else {
+ ct = 1;
+ coeff = 1;
+ }
+ sign = vp56_rac_get(c);
+ coeff = (coeff ^ -sign) + sign;
+ if (coeff_idx)
+ coeff *= s->dequant_ac;
+ idx = s->coeff_index_to_pos[coeff_idx];
+ s->block_coeff[b][permute[idx]] = coeff;
+ run = 1;
+ } else {
+ /* parse a run */
+ ct = 0;
+ if (coeff_idx == 0) {
+ s->left_block[vp56_b6to4[b]].not_null_dc = 0;
+ s->above_blocks[s->above_block_idx[b]].not_null_dc = 0;
+ } else {
+ if (!vp56_rac_get_prob(c, model2[1]))
+ break;
+
+ model3 = s->coeff_model_runv[coeff_idx >= 6];
+ run = vp56_rac_get_tree(c, vp6_pcr_tree, model3);
+ if (!run)
+ for (run=9, i=0; i<6; i++)
+ run += vp56_rac_get_prob(c, model3[i+8]) << i;
+ }
+ }
+
+ cg = vp6_coeff_groups[coeff_idx+=run];
+ model = model2 = s->coeff_model_ract[pt][ct][cg];
+ }
+ }
+}
+
+static int vp6_adjust(int v, int t)
+{
+ int V = v, s = v >> 31;
+ V ^= s;
+ V -= s;
+ if (V-t-1 >= (unsigned)(t-1))
+ return v;
+ V = 2*t - V;
+ V += s;
+ V ^= s;
+ return V;
+}
+
+static int vp6_block_variance(uint8_t *src, int stride)
+{
+ int sum = 0, square_sum = 0;
+ int y, x;
+
+ for (y=0; y<8; y+=2) {
+ for (x=0; x<8; x+=2) {
+ sum += src[x];
+ square_sum += src[x]*src[x];
+ }
+ src += 2*stride;
+ }
+ return (16*square_sum - sum*sum) / (16*16);
+}
+
+static void vp6_filter_hv2(vp56_context_t *s, uint8_t *dst, uint8_t *src,
+ int stride, int delta, int16_t weight)
+{
+ s->dsp.put_pixels_tab[1][0](dst, src, stride, 8);
+ s->dsp.biweight_h264_pixels_tab[3](dst, src+delta, stride, 2,
+ 8-weight, weight, 0);
+}
+
+static void vp6_filter_hv4(uint8_t *dst, uint8_t *src, int stride,
+ int delta, const int16_t *weights)
+{
+ int x, y;
+
+ for (y=0; y<8; y++) {
+ for (x=0; x<8; x++) {
+ dst[x] = clip_uint8(( src[x-delta ] * weights[0]
+ + src[x ] * weights[1]
+ + src[x+delta ] * weights[2]
+ + src[x+2*delta] * weights[3] + 64) >> 7);
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+static void vp6_filter_diag2(vp56_context_t *s, uint8_t *dst, uint8_t *src,
+ int stride, int h_weight, int v_weight)
+{
+ uint8_t *tmp = s->edge_emu_buffer+16;
+ int x, xmax;
+
+ s->dsp.put_pixels_tab[1][0](tmp, src, stride, 8);
+ s->dsp.biweight_h264_pixels_tab[3](tmp, src+1, stride, 2,
+ 8-h_weight, h_weight, 0);
+ /* we need a 8x9 block to do vertical filter, so compute one more line */
+ for (x=8*stride, xmax=x+8; x<xmax; x++)
+ tmp[x] = (src[x]*(8-h_weight) + src[x+1]*h_weight + 4) >> 3;
+
+ s->dsp.put_pixels_tab[1][0](dst, tmp, stride, 8);
+ s->dsp.biweight_h264_pixels_tab[3](dst, tmp+stride, stride, 2,
+ 8-v_weight, v_weight, 0);
+}
+
+static void vp6_filter_diag4(uint8_t *dst, uint8_t *src, int stride,
+ const int16_t *h_weights,const int16_t *v_weights)
+{
+ int x, y;
+ int tmp[8*11];
+ int *t = tmp;
+
+ src -= stride;
+
+ for (y=0; y<11; y++) {
+ for (x=0; x<8; x++) {
+ t[x] = clip_uint8(( src[x-1] * h_weights[0]
+ + src[x ] * h_weights[1]
+ + src[x+1] * h_weights[2]
+ + src[x+2] * h_weights[3] + 64) >> 7);
+ }
+ src += stride;
+ t += 8;
+ }
+
+ t = tmp + 8;
+ for (y=0; y<8; y++) {
+ for (x=0; x<8; x++) {
+ dst[x] = clip_uint8(( t[x-8 ] * v_weights[0]
+ + t[x ] * v_weights[1]
+ + t[x+8 ] * v_weights[2]
+ + t[x+16] * v_weights[3] + 64) >> 7);
+ }
+ dst += stride;
+ t += 8;
+ }
+}
+
+static void vp6_filter(vp56_context_t *s, uint8_t *dst, uint8_t *src,
+ int offset1, int offset2, int stride,
+ vp56_mv_t mv, int mask, int select, int luma)
+{
+ int filter4 = 0;
+ int x8 = mv.x & mask;
+ int y8 = mv.y & mask;
+
+ if (luma) {
+ x8 *= 2;
+ y8 *= 2;
+ filter4 = s->filter_mode;
+ if (filter4 == 2) {
+ if (s->max_vector_length &&
+ (FFABS(mv.x) > s->max_vector_length ||
+ FFABS(mv.y) > s->max_vector_length)) {
+ filter4 = 0;
+ } else if (!s->sample_variance_threshold
+ || (vp6_block_variance(src+offset1, stride)
+ < s->sample_variance_threshold)) {
+ filter4 = 0;
+ }
+ }
+ }
+
+ if ((y8 && (offset2-offset1)*s->flip<0) || (!y8 && offset1 > offset2)) {
+ offset1 = offset2;
+ }
+
+ if (filter4) {
+ if (!y8) { /* left or right combine */
+ vp6_filter_hv4(dst, src+offset1, stride, 1,
+ vp6_block_copy_filter[select][x8]);
+ } else if (!x8) { /* above or below combine */
+ vp6_filter_hv4(dst, src+offset1, stride, stride,
+ vp6_block_copy_filter[select][y8]);
+ } else if ((mv.x^mv.y) >> 31) { /* lower-left or upper-right combine */
+ vp6_filter_diag4(dst, src+offset1-1, stride,
+ vp6_block_copy_filter[select][x8],
+ vp6_block_copy_filter[select][y8]);
+ } else { /* lower-right or upper-left combine */
+ vp6_filter_diag4(dst, src+offset1, stride,
+ vp6_block_copy_filter[select][x8],
+ vp6_block_copy_filter[select][y8]);
+ }
+ } else {
+ if (!y8) { /* left or right combine */
+ vp6_filter_hv2(s, dst, src+offset1, stride, 1, x8);
+ } else if (!x8) { /* above or below combine */
+ vp6_filter_hv2(s, dst, src+offset1, stride, stride, y8);
+ } else if ((mv.x^mv.y) >> 31) { /* lower-left or upper-right combine */
+ vp6_filter_diag2(s, dst, src+offset1-1, stride, x8, y8);
+ } else { /* lower-right or upper-left combine */
+ vp6_filter_diag2(s, dst, src+offset1, stride, x8, y8);
+ }
+ }
+}
+
+static int vp6_decode_init(AVCodecContext *avctx)
+{
+ vp56_context_t *s = avctx->priv_data;
+
+ vp56_init(s, avctx, avctx->codec->id == CODEC_ID_VP6);
+ s->vp56_coord_div = vp6_coord_div;
+ s->parse_vector_adjustment = vp6_parse_vector_adjustment;
+ s->adjust = vp6_adjust;
+ s->filter = vp6_filter;
+ s->parse_coeff = vp6_parse_coeff;
+ s->default_models_init = vp6_default_models_init;
+ s->parse_vector_models = vp6_parse_vector_models;
+ s->parse_coeff_models = vp6_parse_coeff_models;
+ s->parse_header = vp6_parse_header;
+
+ return 0;
+}
+
+AVCodec vp6_decoder = {
+ "vp6",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VP6,
+ sizeof(vp56_context_t),
+ vp6_decode_init,
+ NULL,
+ vp56_free,
+ vp56_decode_frame,
+};
+
+/* flash version, not flipped upside-down */
+AVCodec vp6f_decoder = {
+ "vp6f",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_VP6F,
+ sizeof(vp56_context_t),
+ vp6_decode_init,
+ NULL,
+ vp56_free,
+ vp56_decode_frame,
+};
diff --git a/contrib/ffmpeg/libavcodec/vp6data.h b/contrib/ffmpeg/libavcodec/vp6data.h
new file mode 100644
index 000000000..ba4c7abf7
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/vp6data.h
@@ -0,0 +1,292 @@
+/**
+ * @file vp6data.h
+ * VP6 compatible video decoder
+ *
+ * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef VP6DATA_H
+#define VP6DATA_H
+
+#include "vp56data.h"
+
+static const uint8_t vp6_def_fdv_vector_model[2][8] = {
+ { 247, 210, 135, 68, 138, 220, 239, 246 },
+ { 244, 184, 201, 44, 173, 221, 239, 253 },
+};
+
+static const uint8_t vp6_def_pdv_vector_model[2][7] = {
+ { 225, 146, 172, 147, 214, 39, 156 },
+ { 204, 170, 119, 235, 140, 230, 228 },
+};
+
+static const uint8_t vp6_def_coeff_reorder[] = {
+ 0, 0, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 3, 3, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 7, 7,
+ 7, 7, 7, 8, 8, 9, 9, 9,
+ 9, 9, 9, 10, 10, 11, 11, 11,
+ 11, 11, 11, 12, 12, 12, 12, 12,
+ 12, 13, 13, 13, 13, 13, 14, 14,
+ 14, 14, 15, 15, 15, 15, 15, 15,
+};
+
+static const uint8_t vp6_def_runv_coeff_model[2][14] = {
+ { 198, 197, 196, 146, 198, 204, 169, 142, 130, 136, 149, 149, 191, 249 },
+ { 135, 201, 181, 154, 98, 117, 132, 126, 146, 169, 184, 240, 246, 254 },
+};
+
+static const uint8_t vp6_sig_dct_pct[2][2] = {
+ { 237, 246 },
+ { 231, 243 },
+};
+
+static const uint8_t vp6_pdv_pct[2][7] = {
+ { 253, 253, 254, 254, 254, 254, 254 },
+ { 245, 253, 254, 254, 254, 254, 254 },
+};
+
+static const uint8_t vp6_fdv_pct[2][8] = {
+ { 254, 254, 254, 254, 254, 250, 250, 252 },
+ { 254, 254, 254, 254, 254, 251, 251, 254 },
+};
+
+static const uint8_t vp6_dccv_pct[2][11] = {
+ { 146, 255, 181, 207, 232, 243, 238, 251, 244, 250, 249 },
+ { 179, 255, 214, 240, 250, 255, 244, 255, 255, 255, 255 },
+};
+
+static const uint8_t vp6_coeff_reorder_pct[] = {
+ 255, 132, 132, 159, 153, 151, 161, 170,
+ 164, 162, 136, 110, 103, 114, 129, 118,
+ 124, 125, 132, 136, 114, 110, 142, 135,
+ 134, 123, 143, 126, 153, 183, 166, 161,
+ 171, 180, 179, 164, 203, 218, 225, 217,
+ 215, 206, 203, 217, 229, 241, 248, 243,
+ 253, 255, 253, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255,
+};
+
+static const uint8_t vp6_runv_pct[2][14] = {
+ { 219, 246, 238, 249, 232, 239, 249, 255, 248, 253, 239, 244, 241, 248 },
+ { 198, 232, 251, 253, 219, 241, 253, 255, 248, 249, 244, 238, 251, 255 },
+};
+
+static const uint8_t vp6_ract_pct[3][2][6][11] = {
+ { { { 227, 246, 230, 247, 244, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 209, 231, 231, 249, 249, 253, 255, 255, 255 },
+ { 255, 255, 225, 242, 241, 251, 253, 255, 255, 255, 255 },
+ { 255, 255, 241, 253, 252, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 248, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } },
+ { { 240, 255, 248, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 240, 253, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } } },
+ { { { 206, 203, 227, 239, 247, 255, 253, 255, 255, 255, 255 },
+ { 207, 199, 220, 236, 243, 252, 252, 255, 255, 255, 255 },
+ { 212, 219, 230, 243, 244, 253, 252, 255, 255, 255, 255 },
+ { 236, 237, 247, 252, 253, 255, 255, 255, 255, 255, 255 },
+ { 240, 240, 248, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } },
+ { { 230, 233, 249, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 238, 238, 250, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 248, 251, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } } },
+ { { { 225, 239, 227, 231, 244, 253, 243, 255, 255, 253, 255 },
+ { 232, 234, 224, 228, 242, 249, 242, 252, 251, 251, 255 },
+ { 235, 249, 238, 240, 251, 255, 249, 255, 253, 253, 255 },
+ { 249, 253, 251, 250, 255, 255, 255, 255, 255, 255, 255 },
+ { 251, 250, 249, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } },
+ { { 243, 244, 250, 250, 255, 255, 255, 255, 255, 255, 255 },
+ { 249, 248, 250, 253, 255, 255, 255, 255, 255, 255, 255 },
+ { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 },
+ { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } } }
+};
+
+static const int vp6_dccv_lc[3][5][2] = {
+ { { 122, 133 }, { 0, 1 }, { 78, 171 }, { 139, 117 }, { 168, 79 } },
+ { { 133, 51 }, { 0, 1 }, { 169, 71 }, { 214, 44 }, { 210, 38 } },
+ { { 142, -16 }, { 0, 1 }, { 221, -30 }, { 246, -3 }, { 203, 17 } },
+};
+
+static const uint8_t vp6_coeff_groups[] = {
+ 0, 0, 1, 1, 1, 2, 2, 2,
+ 2, 2, 2, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+};
+
+static const int16_t vp6_block_copy_filter[16][8][4] = {
+ { { 0, 128, 0, 0 }, /* 0 */
+ { -3, 122, 9, 0 },
+ { -4, 109, 24, -1 },
+ { -5, 91, 45, -3 },
+ { -4, 68, 68, -4 },
+ { -3, 45, 91, -5 },
+ { -1, 24, 109, -4 },
+ { 0, 9, 122, -3 } },
+ { { 0, 128, 0, 0 }, /* 1 */
+ { -4, 124, 9, -1 },
+ { -5, 110, 25, -2 },
+ { -6, 91, 46, -3 },
+ { -5, 69, 69, -5 },
+ { -3, 46, 91, -6 },
+ { -2, 25, 110, -5 },
+ { -1, 9, 124, -4 } },
+ { { 0, 128, 0, 0 }, /* 2 */
+ { -4, 123, 10, -1 },
+ { -6, 110, 26, -2 },
+ { -7, 92, 47, -4 },
+ { -6, 70, 70, -6 },
+ { -4, 47, 92, -7 },
+ { -2, 26, 110, -6 },
+ { -1, 10, 123, -4 } },
+ { { 0, 128, 0, 0 }, /* 3 */
+ { -5, 124, 10, -1 },
+ { -7, 110, 27, -2 },
+ { -7, 91, 48, -4 },
+ { -6, 70, 70, -6 },
+ { -4, 48, 92, -8 },
+ { -2, 27, 110, -7 },
+ { -1, 10, 124, -5 } },
+ { { 0, 128, 0, 0 }, /* 4 */
+ { -6, 124, 11, -1 },
+ { -8, 111, 28, -3 },
+ { -8, 92, 49, -5 },
+ { -7, 71, 71, -7 },
+ { -5, 49, 92, -8 },
+ { -3, 28, 111, -8 },
+ { -1, 11, 124, -6 } },
+ { { 0, 128, 0, 0 }, /* 5 */
+ { -6, 123, 12, -1 },
+ { -9, 111, 29, -3 },
+ { -9, 93, 50, -6 },
+ { -8, 72, 72, -8 },
+ { -6, 50, 93, -9 },
+ { -3, 29, 111, -9 },
+ { -1, 12, 123, -6 } },
+ { { 0, 128, 0, 0 }, /* 6 */
+ { -7, 124, 12, -1 },
+ { -10, 111, 30, -3 },
+ { -10, 93, 51, -6 },
+ { -9, 73, 73, -9 },
+ { -6, 51, 93, -10 },
+ { -3, 30, 111, -10 },
+ { -1, 12, 124, -7 } },
+ { { 0, 128, 0, 0 }, /* 7 */
+ { -7, 123, 13, -1 },
+ { -11, 112, 31, -4 },
+ { -11, 94, 52, -7 },
+ { -10, 74, 74, -10 },
+ { -7, 52, 94, -11 },
+ { -4, 31, 112, -11 },
+ { -1, 13, 123, -7 } },
+ { { 0, 128, 0, 0 }, /* 8 */
+ { -8, 124, 13, -1 },
+ { -12, 112, 32, -4 },
+ { -12, 94, 53, -7 },
+ { -10, 74, 74, -10 },
+ { -7, 53, 94, -12 },
+ { -4, 32, 112, -12 },
+ { -1, 13, 124, -8 } },
+ { { 0, 128, 0, 0 }, /* 9 */
+ { -9, 124, 14, -1 },
+ { -13, 112, 33, -4 },
+ { -13, 95, 54, -8 },
+ { -11, 75, 75, -11 },
+ { -8, 54, 95, -13 },
+ { -4, 33, 112, -13 },
+ { -1, 14, 124, -9 } },
+ { { 0, 128, 0, 0 }, /* 10 */
+ { -9, 123, 15, -1 },
+ { -14, 113, 34, -5 },
+ { -14, 95, 55, -8 },
+ { -12, 76, 76, -12 },
+ { -8, 55, 95, -14 },
+ { -5, 34, 112, -13 },
+ { -1, 15, 123, -9 } },
+ { { 0, 128, 0, 0 }, /* 11 */
+ { -10, 124, 15, -1 },
+ { -14, 113, 34, -5 },
+ { -15, 96, 56, -9 },
+ { -13, 77, 77, -13 },
+ { -9, 56, 96, -15 },
+ { -5, 34, 113, -14 },
+ { -1, 15, 124, -10 } },
+ { { 0, 128, 0, 0 }, /* 12 */
+ { -10, 123, 16, -1 },
+ { -15, 113, 35, -5 },
+ { -16, 98, 56, -10 },
+ { -14, 78, 78, -14 },
+ { -10, 56, 98, -16 },
+ { -5, 35, 113, -15 },
+ { -1, 16, 123, -10 } },
+ { { 0, 128, 0, 0 }, /* 13 */
+ { -11, 124, 17, -2 },
+ { -16, 113, 36, -5 },
+ { -17, 98, 57, -10 },
+ { -14, 78, 78, -14 },
+ { -10, 57, 98, -17 },
+ { -5, 36, 113, -16 },
+ { -2, 17, 124, -11 } },
+ { { 0, 128, 0, 0 }, /* 14 */
+ { -12, 125, 17, -2 },
+ { -17, 114, 37, -6 },
+ { -18, 99, 58, -11 },
+ { -15, 79, 79, -15 },
+ { -11, 58, 99, -18 },
+ { -6, 37, 114, -17 },
+ { -2, 17, 125, -12 } },
+ { { 0, 128, 0, 0 }, /* 15 */
+ { -12, 124, 18, -2 },
+ { -18, 114, 38, -6 },
+ { -19, 99, 59, -11 },
+ { -16, 80, 80, -16 },
+ { -11, 59, 99, -19 },
+ { -6, 38, 114, -18 },
+ { -2, 18, 124, -12 } },
+};
+
+static const vp56_tree_t vp6_pcr_tree[] = {
+ { 8, 0},
+ { 4, 1},
+ { 2, 2}, {-1}, {-2},
+ { 2, 3}, {-3}, {-4},
+ { 8, 4},
+ { 4, 5},
+ { 2, 6}, {-5}, {-6},
+ { 2, 7}, {-7}, {-8},
+ {-0},
+};
+
+static const uint8_t vp6_coord_div[] = { 4, 4, 4, 4, 8, 8 };
+
+#endif /* VP6DATA_H */
diff --git a/src/libffmpeg/libavcodec/vqavideo.c b/contrib/ffmpeg/libavcodec/vqavideo.c
index 7f0c95206..912ced0df 100644
--- a/src/libffmpeg/libavcodec/vqavideo.c
+++ b/contrib/ffmpeg/libavcodec/vqavideo.c
@@ -2,18 +2,20 @@
* Westwood Studios VQA Video Decoder
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -21,8 +23,8 @@
/**
* @file vqavideo.c
* VQA Video Decoder by Mike Melanson (melanson@pcisys.net)
- * For more information about the RPZA format, visit:
- * http://www.pcisys.net/~melanson/codecs/
+ * For more information about the VQA format, visit:
+ * http://wiki.multimedia.cx/index.php?title=VQA
*
* The VQA video decoder outputs PAL8 or RGB555 colorspace data, depending
* on the type of data in the file.
@@ -107,7 +109,7 @@ typedef struct VqaContext {
unsigned char *buf;
int size;
- unsigned int palette[PALETTE_COUNT];
+ uint32_t palette[PALETTE_COUNT];
int width; /* width of a frame */
int height; /* height of a frame */
@@ -469,7 +471,22 @@ static void vqa_decode_chunk(VqaContext *s)
case 1:
/* still need sample media for this case (only one game, "Legend of
* Kyrandia III : Malcolm's Revenge", is known to use this version) */
- lines = 0;
+ lobyte = s->decode_buffer[lobytes * 2];
+ hibyte = s->decode_buffer[(lobytes * 2) + 1];
+ vector_index = ((hibyte << 8) | lobyte) >> 3;
+ vector_index <<= index_shift;
+ lines = s->vector_height;
+ /* uniform color fill - a quick hack */
+ if (hibyte == 0xFF) {
+ while (lines--) {
+ s->frame.data[0][pixel_ptr + 0] = 255 - lobyte;
+ s->frame.data[0][pixel_ptr + 1] = 255 - lobyte;
+ s->frame.data[0][pixel_ptr + 2] = 255 - lobyte;
+ s->frame.data[0][pixel_ptr + 3] = 255 - lobyte;
+ pixel_ptr += s->frame.linesize[0];
+ }
+ lines=0;
+ }
break;
case 2:
diff --git a/contrib/ffmpeg/libavcodec/w32thread.c b/contrib/ffmpeg/libavcodec/w32thread.c
new file mode 100644
index 000000000..e749a64af
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/w32thread.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+//#define DEBUG
+
+#include "avcodec.h"
+#include "common.h"
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <process.h>
+
+typedef struct ThreadContext{
+ AVCodecContext *avctx;
+ HANDLE thread;
+ HANDLE work_sem;
+ HANDLE done_sem;
+ int (*func)(AVCodecContext *c, void *arg);
+ void *arg;
+ int ret;
+}ThreadContext;
+
+
+static unsigned __stdcall thread_func(void *v){
+ ThreadContext *c= v;
+
+ for(;;){
+//printf("thread_func %X enter wait\n", (int)v); fflush(stdout);
+ WaitForSingleObject(c->work_sem, INFINITE);
+//printf("thread_func %X after wait (func=%X)\n", (int)v, (int)c->func); fflush(stdout);
+ if(c->func)
+ c->ret= c->func(c->avctx, c->arg);
+ else
+ return 0;
+//printf("thread_func %X signal complete\n", (int)v); fflush(stdout);
+ ReleaseSemaphore(c->done_sem, 1, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * free what has been allocated by avcodec_thread_init().
+ * must be called after decoding has finished, especially dont call while avcodec_thread_execute() is running
+ */
+void avcodec_thread_free(AVCodecContext *s){
+ ThreadContext *c= s->thread_opaque;
+ int i;
+
+ for(i=0; i<s->thread_count; i++){
+
+ c[i].func= NULL;
+ ReleaseSemaphore(c[i].work_sem, 1, 0);
+ WaitForSingleObject(c[i].thread, INFINITE);
+ if(c[i].work_sem) CloseHandle(c[i].work_sem);
+ if(c[i].done_sem) CloseHandle(c[i].done_sem);
+ }
+
+ av_freep(&s->thread_opaque);
+}
+
+int avcodec_thread_execute(AVCodecContext *s, int (*func)(AVCodecContext *c2, void *arg2),void **arg, int *ret, int count){
+ ThreadContext *c= s->thread_opaque;
+ int i;
+
+ assert(s == c->avctx);
+ assert(count <= s->thread_count);
+
+ /* note, we can be certain that this is not called with the same AVCodecContext by different threads at the same time */
+
+ for(i=0; i<count; i++){
+ c[i].arg= arg[i];
+ c[i].func= func;
+ c[i].ret= 12345;
+
+ ReleaseSemaphore(c[i].work_sem, 1, 0);
+ }
+ for(i=0; i<count; i++){
+ WaitForSingleObject(c[i].done_sem, INFINITE);
+
+ c[i].func= NULL;
+ if(ret) ret[i]= c[i].ret;
+ }
+ return 0;
+}
+
+int avcodec_thread_init(AVCodecContext *s, int thread_count){
+ int i;
+ ThreadContext *c;
+ uint32_t threadid;
+
+ s->thread_count= thread_count;
+
+ assert(!s->thread_opaque);
+ c= av_mallocz(sizeof(ThreadContext)*thread_count);
+ s->thread_opaque= c;
+
+ for(i=0; i<thread_count; i++){
+//printf("init semaphors %d\n", i); fflush(stdout);
+ c[i].avctx= s;
+
+ if(!(c[i].work_sem = CreateSemaphore(NULL, 0, s->thread_count, NULL)))
+ goto fail;
+ if(!(c[i].done_sem = CreateSemaphore(NULL, 0, s->thread_count, NULL)))
+ goto fail;
+
+//printf("create thread %d\n", i); fflush(stdout);
+ c[i].thread = (HANDLE)_beginthreadex(NULL, 0, thread_func, &c[i], 0, &threadid );
+ if( !c[i].thread ) goto fail;
+ }
+//printf("init done\n"); fflush(stdout);
+
+ s->execute= avcodec_thread_execute;
+
+ return 0;
+fail:
+ avcodec_thread_free(s);
+ return -1;
+}
diff --git a/contrib/ffmpeg/libavcodec/wavpack.c b/contrib/ffmpeg/libavcodec/wavpack.c
new file mode 100644
index 000000000..5a54f7d0e
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/wavpack.c
@@ -0,0 +1,556 @@
+/*
+ * WavPack lossless audio decoder
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#define ALT_BITSTREAM_READER_LE
+#include "avcodec.h"
+#include "bitstream.h"
+
+/**
+ * @file wavpack.c
+ * WavPack lossless audio decoder
+ */
+
+#define WV_JOINT 0x0010
+
+enum WP_ID_Flags{
+ WP_IDF_MASK = 0x1F,
+ WP_IDF_IGNORE = 0x20,
+ WP_IDF_ODD = 0x40,
+ WP_IDF_LONG = 0x80
+};
+
+enum WP_ID{
+ WP_ID_DUMMY = 0,
+ WP_ID_ENCINFO,
+ WP_ID_DECTERMS,
+ WP_ID_DECWEIGHTS,
+ WP_ID_DECSAMPLES,
+ WP_ID_ENTROPY,
+ WP_ID_HYBRID,
+ WP_ID_SHAPING,
+ WP_ID_FLOATINFO,
+ WP_ID_INT32INFO,
+ WP_ID_DATA,
+ WP_ID_CORR,
+ WP_ID_FLT,
+ WP_ID_CHANINFO
+};
+
+#define MAX_TERMS 16
+
+typedef struct Decorr {
+ int delta;
+ int value;
+ int weightA;
+ int weightB;
+ int samplesA[8];
+ int samplesB[8];
+} Decorr;
+
+typedef struct WavpackContext {
+ AVCodecContext *avctx;
+ int stereo;
+ int joint;
+ uint32_t CRC;
+ GetBitContext gb;
+ int data_size; // in bits
+ int samples;
+ int median[6];
+ int terms;
+ Decorr decorr[MAX_TERMS];
+ int zero, one, zeroes;
+} WavpackContext;
+
+// exponent table copied from WavPack source
+static const uint8_t wp_exp2_table [256] = {
+ 0x00, 0x01, 0x01, 0x02, 0x03, 0x03, 0x04, 0x05, 0x06, 0x06, 0x07, 0x08, 0x08, 0x09, 0x0a, 0x0b,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0e, 0x0f, 0x10, 0x10, 0x11, 0x12, 0x13, 0x13, 0x14, 0x15, 0x16, 0x16,
+ 0x17, 0x18, 0x19, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1d, 0x1e, 0x1f, 0x20, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x24, 0x25, 0x26, 0x27, 0x28, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3a, 0x3b, 0x3c, 0x3d,
+ 0x3e, 0x3f, 0x40, 0x41, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x48, 0x49, 0x4a, 0x4b,
+ 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a,
+ 0x5b, 0x5c, 0x5d, 0x5e, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x87, 0x88, 0x89, 0x8a,
+ 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
+ 0x9c, 0x9d, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad,
+ 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0,
+ 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc8, 0xc9, 0xca, 0xcb, 0xcd, 0xce, 0xcf, 0xd0, 0xd2, 0xd3, 0xd4,
+ 0xd6, 0xd7, 0xd8, 0xd9, 0xdb, 0xdc, 0xdd, 0xde, 0xe0, 0xe1, 0xe2, 0xe4, 0xe5, 0xe6, 0xe8, 0xe9,
+ 0xea, 0xec, 0xed, 0xee, 0xf0, 0xf1, 0xf2, 0xf4, 0xf5, 0xf6, 0xf8, 0xf9, 0xfa, 0xfc, 0xfd, 0xff
+};
+
+static always_inline int wp_exp2(int16_t val)
+{
+ int res, neg = 0;
+
+ if(val < 0){
+ val = -val;
+ neg = 1;
+ }
+
+ res = wp_exp2_table[val & 0xFF] | 0x100;
+ val >>= 8;
+ res = (val > 9) ? (res << (val - 9)) : (res >> (9 - val));
+ return neg ? -res : res;
+}
+
+static inline int get_unary(GetBitContext *gb){
+ int r=0;
+ while(get_bits1(gb) && r<33)r++;
+ return r;
+}
+
+// macros for manipulating median values
+#define GET_MED(n) ((median[n] >> 4) + 1)
+#define DEC_MED(n) median[n] -= ((median[n] + (128>>n) - 2) / (128>>n)) * 2
+#define INC_MED(n) median[n] += ((median[n] + (128>>n)) / (128>>n)) * 5
+
+// macros for applying weight
+#define UPDATE_WEIGHT_CLIP(weight, delta, samples, in) \
+ if(samples && in){ \
+ if((samples ^ in) < 0){ \
+ weight -= delta; \
+ if(weight < -1024) weight = -1024; \
+ }else{ \
+ weight += delta; \
+ if(weight > 1024) weight = 1024; \
+ } \
+ }
+
+
+static always_inline int get_tail(GetBitContext *gb, int k)
+{
+ int p, e, res;
+
+ if(k<1 || k>65535)return 0;
+ p = av_log2_16bit(k);
+ e = (1 << (p + 1)) - k - 1;
+ res = p ? get_bits(gb, p) : 0;
+ if(res >= e){
+ res = (res<<1) - e + get_bits1(gb);
+ }
+ return res;
+}
+
+static int wv_get_value(WavpackContext *ctx, GetBitContext *gb, int *median, int *last)
+{
+ int t, t2;
+ int sign, base, add, ret;
+
+ *last = 0;
+
+ if((ctx->median[0] < 2U) && (ctx->median[3] < 2U) && !ctx->zero && !ctx->one){
+ if(ctx->zeroes){
+ ctx->zeroes--;
+ if(ctx->zeroes)
+ return 0;
+ }else{
+ t = get_unary(gb);
+ if(t >= 2) t = get_bits(gb, t - 1) | (1 << (t-1));
+ ctx->zeroes = t;
+ if(ctx->zeroes){
+ memset(ctx->median, 0, sizeof(ctx->median));
+ return 0;
+ }
+ }
+ }
+
+ if(get_bits_count(gb) >= ctx->data_size){
+ *last = 1;
+ return 0;
+ }
+
+ if(ctx->zero){
+ t = 0;
+ ctx->zero = 0;
+ }else{
+ t = get_unary(gb);
+ if(get_bits_count(gb) >= ctx->data_size){
+ *last = 1;
+ return 0;
+ }
+ if(t == 16) {
+ t2 = get_unary(gb);
+ if(t2 < 2) t += t2;
+ else t += get_bits(gb, t2 - 1) | (1 << (t2 - 1));
+ }
+
+ if(ctx->one){
+ ctx->one = t&1;
+ t = (t>>1) + 1;
+ }else{
+ ctx->one = t&1;
+ t >>= 1;
+ }
+ ctx->zero = !ctx->one;
+ }
+
+ if(!t){
+ base = 0;
+ add = GET_MED(0) - 1;
+ DEC_MED(0);
+ }else if(t == 1){
+ base = GET_MED(0);
+ add = GET_MED(1) - 1;
+ INC_MED(0);
+ DEC_MED(1);
+ }else if(t == 2){
+ base = GET_MED(0) + GET_MED(1);
+ add = GET_MED(2) - 1;
+ INC_MED(0);
+ INC_MED(1);
+ DEC_MED(2);
+ }else{
+ base = GET_MED(0) + GET_MED(1) + GET_MED(2) * (t - 2);
+ add = GET_MED(2) - 1;
+ INC_MED(0);
+ INC_MED(1);
+ INC_MED(2);
+ }
+ ret = base + get_tail(gb, add);
+ sign = get_bits1(gb);
+ return sign ? ~ret : ret;
+}
+
+static int wv_unpack_stereo(WavpackContext *s, GetBitContext *gb, int16_t *dst)
+{
+ int i, j, count = 0;
+ int last, t;
+ int A, B, L, L2, R, R2;
+ int pos = 0;
+ uint32_t crc = 0xFFFFFFFF;
+
+ s->one = s->zero = s->zeroes = 0;
+ do{
+ L = wv_get_value(s, gb, s->median, &last);
+ if(last) break;
+ R = wv_get_value(s, gb, s->median + 3, &last);
+ if(last) break;
+ for(i = 0; i < s->terms; i++){
+ t = s->decorr[i].value;
+ j = 0;
+ if(t > 0){
+ if(t > 8){
+ if(t & 1){
+ A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1];
+ B = 2 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1];
+ }else{
+ A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1;
+ B = (3 * s->decorr[i].samplesB[0] - s->decorr[i].samplesB[1]) >> 1;
+ }
+ s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0];
+ s->decorr[i].samplesB[1] = s->decorr[i].samplesB[0];
+ j = 0;
+ }else{
+ A = s->decorr[i].samplesA[pos];
+ B = s->decorr[i].samplesB[pos];
+ j = (pos + t) & 7;
+ }
+ L2 = L + ((s->decorr[i].weightA * A + 512) >> 10);
+ R2 = R + ((s->decorr[i].weightB * B + 512) >> 10);
+ if(A && L) s->decorr[i].weightA -= ((((L ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
+ if(B && R) s->decorr[i].weightB -= ((((R ^ B) >> 30) & 2) - 1) * s->decorr[i].delta;
+ s->decorr[i].samplesA[j] = L = L2;
+ s->decorr[i].samplesB[j] = R = R2;
+ }else if(t == -1){
+ L2 = L + ((s->decorr[i].weightA * s->decorr[i].samplesA[0] + 512) >> 10);
+ UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, s->decorr[i].samplesA[0], L);
+ L = L2;
+ R2 = R + ((s->decorr[i].weightB * L2 + 512) >> 10);
+ UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, L2, R);
+ R = R2;
+ s->decorr[i].samplesA[0] = R;
+ }else{
+ R2 = R + ((s->decorr[i].weightB * s->decorr[i].samplesB[0] + 512) >> 10);
+ UPDATE_WEIGHT_CLIP(s->decorr[i].weightB, s->decorr[i].delta, s->decorr[i].samplesB[0], R);
+ R = R2;
+
+ if(t == -3){
+ R2 = s->decorr[i].samplesA[0];
+ s->decorr[i].samplesA[0] = R;
+ }
+
+ L2 = L + ((s->decorr[i].weightA * R2 + 512) >> 10);
+ UPDATE_WEIGHT_CLIP(s->decorr[i].weightA, s->decorr[i].delta, R2, L);
+ L = L2;
+ s->decorr[i].samplesB[0] = L;
+ }
+ }
+ pos = (pos + 1) & 7;
+ if(s->joint)
+ L += (R -= (L >> 1));
+ crc = (crc * 3 + L) * 3 + R;
+ *dst++ = L;
+ *dst++ = R;
+
+ count++;
+ }while(!last && count < s->samples);
+
+ if(crc != s->CRC){
+ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
+ return -1;
+ }
+ return count * 2;
+}
+
+static int wv_unpack_mono(WavpackContext *s, GetBitContext *gb, int16_t *dst)
+{
+ int i, j, count = 0;
+ int last, t;
+ int A, S, T;
+ int pos = 0;
+ uint32_t crc = 0xFFFFFFFF;
+
+ s->one = s->zero = s->zeroes = 0;
+ do{
+ T = wv_get_value(s, gb, s->median, &last);
+ S = 0;
+ if(last) break;
+ for(i = 0; i < s->terms; i++){
+ t = s->decorr[i].value;
+ if(t > 8){
+ if(t & 1)
+ A = 2 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1];
+ else
+ A = (3 * s->decorr[i].samplesA[0] - s->decorr[i].samplesA[1]) >> 1;
+ s->decorr[i].samplesA[1] = s->decorr[i].samplesA[0];
+ j = 0;
+ }else{
+ A = s->decorr[i].samplesA[pos];
+ j = (pos + t) & 7;
+ }
+ S = T + ((s->decorr[i].weightA * A + 512) >> 10);
+ if(A && T) s->decorr[i].weightA -= ((((T ^ A) >> 30) & 2) - 1) * s->decorr[i].delta;
+ s->decorr[i].samplesA[j] = T = S;
+ }
+ pos = (pos + 1) & 7;
+ crc = crc * 3 + S;
+ *dst++ = S;
+ count++;
+ }while(!last && count < s->samples);
+
+ if(crc != s->CRC){
+ av_log(s->avctx, AV_LOG_ERROR, "CRC error\n");
+ return -1;
+ }
+ return count;
+}
+
+static int wavpack_decode_init(AVCodecContext *avctx)
+{
+ WavpackContext *s = avctx->priv_data;
+
+ s->avctx = avctx;
+ s->stereo = (avctx->channels == 2);
+
+ return 0;
+}
+
+static int wavpack_decode_close(AVCodecContext *avctx)
+{
+// WavpackContext *s = avctx->priv_data;
+
+ return 0;
+}
+
+static int wavpack_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ uint8_t *buf, int buf_size)
+{
+ WavpackContext *s = avctx->priv_data;
+ int16_t *samples = data;
+ int samplecount;
+ int got_terms = 0, got_weights = 0, got_samples = 0, got_entropy = 0, got_bs = 0;
+ uint8_t* buf_end = buf + buf_size;
+ int i, j, id, size, ssize, weights, t;
+
+ if (buf_size == 0) return 0;
+
+ memset(s->decorr, 0, MAX_TERMS * sizeof(Decorr));
+
+ s->samples = LE_32(buf); buf += 4;
+ if(!s->samples) return buf_size;
+ /* should not happen but who knows */
+ if(s->samples * 2 * avctx->channels > AVCODEC_MAX_AUDIO_FRAME_SIZE){
+ av_log(avctx, AV_LOG_ERROR, "Packet size is too big to be handled in lavc!\n");
+ return -1;
+ }
+ s->joint = LE_32(buf) & WV_JOINT; buf += 4;
+ s->CRC = LE_32(buf); buf += 4;
+ // parse metadata blocks
+ while(buf < buf_end){
+ id = *buf++;
+ size = *buf++;
+ if(id & WP_IDF_LONG) {
+ size |= (*buf++) << 8;
+ size |= (*buf++) << 16;
+ }
+ size <<= 1; // size is specified in words
+ ssize = size;
+ if(id & WP_IDF_ODD) size--;
+ if(size < 0){
+ av_log(avctx, AV_LOG_ERROR, "Got incorrect block %02X with size %i\n", id, size);
+ break;
+ }
+ if(buf + ssize > buf_end){
+ av_log(avctx, AV_LOG_ERROR, "Block size %i is out of bounds\n", size);
+ break;
+ }
+ if(id & WP_IDF_IGNORE){
+ buf += ssize;
+ continue;
+ }
+ switch(id & WP_IDF_MASK){
+ case WP_ID_DECTERMS:
+ s->terms = size;
+ if(s->terms > MAX_TERMS){
+ av_log(avctx, AV_LOG_ERROR, "Too many decorrelation terms\n");
+ buf += ssize;
+ continue;
+ }
+ for(i = 0; i < s->terms; i++) {
+ s->decorr[s->terms - i - 1].value = (*buf & 0x1F) - 5;
+ s->decorr[s->terms - i - 1].delta = *buf >> 5;
+ buf++;
+ }
+ got_terms = 1;
+ break;
+ case WP_ID_DECWEIGHTS:
+ if(!got_terms){
+ av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n");
+ continue;
+ }
+ weights = size >> s->stereo;
+ if(weights > MAX_TERMS || weights > s->terms){
+ av_log(avctx, AV_LOG_ERROR, "Too many decorrelation weights\n");
+ buf += ssize;
+ continue;
+ }
+ for(i = 0; i < weights; i++) {
+ t = (int8_t)(*buf++);
+ s->decorr[s->terms - i - 1].weightA = t << 3;
+ if(s->decorr[s->terms - i - 1].weightA > 0)
+ s->decorr[s->terms - i - 1].weightA += (s->decorr[s->terms - i - 1].weightA + 64) >> 7;
+ if(s->stereo){
+ t = (int8_t)(*buf++);
+ s->decorr[s->terms - i - 1].weightB = t << 3;
+ if(s->decorr[s->terms - i - 1].weightB > 0)
+ s->decorr[s->terms - i - 1].weightB += (s->decorr[s->terms - i - 1].weightB + 64) >> 7;
+ }
+ }
+ got_weights = 1;
+ break;
+ case WP_ID_DECSAMPLES:
+ if(!got_terms){
+ av_log(avctx, AV_LOG_ERROR, "No decorrelation terms met\n");
+ continue;
+ }
+ t = 0;
+ for(i = s->terms - 1; (i >= 0) && (t < size); i--) {
+ if(s->decorr[i].value > 8){
+ s->decorr[i].samplesA[0] = wp_exp2(LE_16(buf)); buf += 2;
+ s->decorr[i].samplesA[1] = wp_exp2(LE_16(buf)); buf += 2;
+ if(s->stereo){
+ s->decorr[i].samplesB[0] = wp_exp2(LE_16(buf)); buf += 2;
+ s->decorr[i].samplesB[1] = wp_exp2(LE_16(buf)); buf += 2;
+ t += 4;
+ }
+ t += 4;
+ }else if(s->decorr[i].value < 0){
+ s->decorr[i].samplesA[0] = wp_exp2(LE_16(buf)); buf += 2;
+ s->decorr[i].samplesB[0] = wp_exp2(LE_16(buf)); buf += 2;
+ t += 4;
+ }else{
+ for(j = 0; j < s->decorr[i].value; j++){
+ s->decorr[i].samplesA[j] = wp_exp2(LE_16(buf)); buf += 2;
+ if(s->stereo){
+ s->decorr[i].samplesB[j] = wp_exp2(LE_16(buf)); buf += 2;
+ }
+ }
+ t += s->decorr[i].value * 2 * avctx->channels;
+ }
+ }
+ got_samples = 1;
+ break;
+ case WP_ID_ENTROPY:
+ if(size != 6 * avctx->channels){
+ av_log(avctx, AV_LOG_ERROR, "Entropy vars size should be %i, got %i", 6 * avctx->channels, size);
+ buf += ssize;
+ continue;
+ }
+ for(i = 0; i < 3 * avctx->channels; i++){
+ s->median[i] = wp_exp2(LE_16(buf));
+ buf += 2;
+ }
+ got_entropy = 1;
+ break;
+ case WP_ID_DATA:
+ init_get_bits(&s->gb, buf, size * 8);
+ s->data_size = size * 8;
+ buf += size;
+ got_bs = 1;
+ break;
+ default:
+ buf += size;
+ }
+ if(id & WP_IDF_ODD) buf++;
+ }
+ if(!got_terms){
+ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation terms\n");
+ return -1;
+ }
+ if(!got_weights){
+ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation weights\n");
+ return -1;
+ }
+ if(!got_samples){
+ av_log(avctx, AV_LOG_ERROR, "No block with decorrelation samples\n");
+ return -1;
+ }
+ if(!got_entropy){
+ av_log(avctx, AV_LOG_ERROR, "No block with entropy info\n");
+ return -1;
+ }
+ if(!got_bs){
+ av_log(avctx, AV_LOG_ERROR, "Packed samples not found\n");
+ return -1;
+ }
+
+ if(s->stereo)
+ samplecount = wv_unpack_stereo(s, &s->gb, samples);
+ else
+ samplecount = wv_unpack_mono(s, &s->gb, samples);
+ *data_size = samplecount * 2;
+
+ return buf_size;
+}
+
+AVCodec wavpack_decoder = {
+ "wavpack",
+ CODEC_TYPE_AUDIO,
+ CODEC_ID_WAVPACK,
+ sizeof(WavpackContext),
+ wavpack_decode_init,
+ NULL,
+ wavpack_decode_close,
+ wavpack_decode_frame,
+};
diff --git a/src/libffmpeg/libavcodec/wmadata.h b/contrib/ffmpeg/libavcodec/wmadata.h
index e12c4792e..35e545ce6 100644
--- a/src/libffmpeg/libavcodec/wmadata.h
+++ b/contrib/ffmpeg/libavcodec/wmadata.h
@@ -1,3 +1,24 @@
+/*
+ * WMA compatible decoder
+ * copyright (c) 2002 The FFmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file wmadata.h
* Various WMA tables.
diff --git a/src/libffmpeg/libavcodec/wmadec.c b/contrib/ffmpeg/libavcodec/wmadec.c
index 227c9695b..684aea2c8 100644
--- a/src/libffmpeg/libavcodec/wmadec.c
+++ b/contrib/ffmpeg/libavcodec/wmadec.c
@@ -2,18 +2,20 @@
* WMA compatible decoder
* Copyright (c) 2002 The FFmpeg Project.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -130,6 +132,7 @@ typedef struct WMADecodeContext {
float lsp_pow_e_table[256];
float lsp_pow_m_table1[(1 << LSP_POW_BITS)];
float lsp_pow_m_table2[(1 << LSP_POW_BITS)];
+ DSPContext dsp;
#ifdef TRACE
int frame_count;
@@ -228,6 +231,8 @@ static int wma_decode_init(AVCodecContext * avctx)
s->bit_rate = avctx->bit_rate;
s->block_align = avctx->block_align;
+ dsputil_init(&s->dsp, avctx);
+
if (avctx->codec->id == CODEC_ID_WMAV1) {
s->version = 1;
} else {
@@ -712,13 +717,8 @@ static int wma_decode_block(WMADecodeContext *s)
{
int n, v, a, ch, code, bsize;
int coef_nb_bits, total_gain, parse_exponents;
- float window[BLOCK_MAX_SIZE * 2];
-// XXX: FIXME!! there's a bug somewhere which makes this mandatory under altivec
-#ifdef HAVE_ALTIVEC
- volatile int nb_coefs[MAX_CHANNELS] __attribute__((aligned(16)));
-#else
+ DECLARE_ALIGNED_16(float, window[BLOCK_MAX_SIZE * 2]);
int nb_coefs[MAX_CHANNELS];
-#endif
float mdct_norm;
#ifdef TRACE
@@ -873,7 +873,7 @@ static int wma_decode_block(WMADecodeContext *s)
VLC *coef_vlc;
int level, run, sign, tindex;
int16_t *ptr, *eptr;
- const int16_t *level_table, *run_table;
+ const uint16_t *level_table, *run_table;
/* special VLC tables are used for ms stereo because
there is potentially less energy there */
@@ -1109,36 +1109,26 @@ static int wma_decode_block(WMADecodeContext *s)
if (s->channel_coded[ch]) {
DECLARE_ALIGNED_16(FFTSample, output[BLOCK_MAX_SIZE * 2]);
float *ptr;
- int i, n4, index, n;
+ int n4, index, n;
n = s->block_len;
n4 = s->block_len / 2;
- ff_imdct_calc(&s->mdct_ctx[bsize],
+ s->mdct_ctx[bsize].fft.imdct_calc(&s->mdct_ctx[bsize],
output, s->coefs[ch], s->mdct_tmp);
/* XXX: optimize all that by build the window and
multipying/adding at the same time */
- /* multiply by the window */
- for(i=0;i<n * 2;i++) {
- output[i] *= window[i];
- }
- /* add in the frame */
+ /* multiply by the window and add in the frame */
index = (s->frame_len / 2) + s->block_pos - n4;
ptr = &s->frame_out[ch][index];
- for(i=0;i<n * 2;i++) {
- *ptr += output[i];
- ptr++;
- }
+ s->dsp.vector_fmul_add_add(ptr,window,output,ptr,0,2*n,1);
/* specific fast case for ms-stereo : add to second
channel if it is not coded */
if (s->ms_stereo && !s->channel_coded[1]) {
ptr = &s->frame_out[1][index];
- for(i=0;i<n * 2;i++) {
- *ptr += output[i];
- ptr++;
- }
+ s->dsp.vector_fmul_add_add(ptr,window,output,ptr,0,2*n,1);
}
}
}
diff --git a/src/libffmpeg/libavcodec/wmv2.c b/contrib/ffmpeg/libavcodec/wmv2.c
index 3f405af4f..5abc51775 100644
--- a/src/libffmpeg/libavcodec/wmv2.c
+++ b/contrib/ffmpeg/libavcodec/wmv2.c
@@ -1,18 +1,20 @@
/*
* Copyright (c) 2002 The FFmpeg Project.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -511,7 +513,7 @@ static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){
C = s->current_picture.motion_val[0][xy + 2 - wrap];
if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag)
- diff= FFMAX(ABS(A[0] - B[0]), ABS(A[1] - B[1]));
+ diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1]));
else
diff=0;
@@ -848,5 +850,6 @@ AVCodec wmv2_encoder = {
wmv2_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
#endif
diff --git a/src/libffmpeg/libavcodec/wnv1.c b/contrib/ffmpeg/libavcodec/wnv1.c
index 335a04f35..46b31a5c5 100644
--- a/src/libffmpeg/libavcodec/wnv1.c
+++ b/contrib/ffmpeg/libavcodec/wnv1.c
@@ -2,18 +2,20 @@
* Winnov WNV1 codec
* Copyright (c) 2005 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/ws-snd1.c b/contrib/ffmpeg/libavcodec/ws-snd1.c
index aa85b4526..eb4fe81d3 100644
--- a/src/libffmpeg/libavcodec/ws-snd1.c
+++ b/contrib/ffmpeg/libavcodec/ws-snd1.c
@@ -2,18 +2,20 @@
* Westwood SNDx codecs
* Copyright (c) 2005 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avcodec.h"
@@ -27,9 +29,6 @@
* http://www.multimedia.cx
*/
-typedef struct {
-} WSSNDContext;
-
static const char ws_adpcm_2bit[] = { -2, -1, 0, 1};
static const char ws_adpcm_4bit[] = {
-9, -8, -6, -5, -4, -3, -2, -1,
@@ -137,7 +136,7 @@ AVCodec ws_snd1_decoder = {
"ws_snd1",
CODEC_TYPE_AUDIO,
CODEC_ID_WESTWOOD_SND1,
- sizeof(WSSNDContext),
+ 0,
ws_snd_decode_init,
NULL,
NULL,
diff --git a/contrib/ffmpeg/libavcodec/x264.c b/contrib/ffmpeg/libavcodec/x264.c
new file mode 100644
index 000000000..b08678779
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/x264.c
@@ -0,0 +1,299 @@
+/*
+ * H.264 encoding using the x264 library
+ * Copyright (C) 2005 Mans Rullgard <mru@inprovide.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include <x264.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+typedef struct X264Context {
+ x264_param_t params;
+ x264_t *enc;
+ x264_picture_t pic;
+ AVFrame out_pic;
+} X264Context;
+
+static void
+X264_log(void *p, int level, const char *fmt, va_list args)
+{
+ static const int level_map[] = {
+ [X264_LOG_ERROR] = AV_LOG_ERROR,
+ [X264_LOG_WARNING] = AV_LOG_ERROR,
+ [X264_LOG_INFO] = AV_LOG_INFO,
+ [X264_LOG_DEBUG] = AV_LOG_DEBUG
+ };
+
+ if(level < 0 || level > X264_LOG_DEBUG)
+ return;
+
+ av_vlog(p, level_map[level], fmt, args);
+}
+
+
+static int
+encode_nals(uint8_t *buf, int size, x264_nal_t *nals, int nnal)
+{
+ uint8_t *p = buf;
+ int i;
+
+ for(i = 0; i < nnal; i++){
+ int s = x264_nal_encode(p, &size, 1, nals + i);
+ if(s < 0)
+ return -1;
+ p += s;
+ }
+
+ return p - buf;
+}
+
+static int
+X264_frame(AVCodecContext *ctx, uint8_t *buf, int bufsize, void *data)
+{
+ X264Context *x4 = ctx->priv_data;
+ AVFrame *frame = data;
+ x264_nal_t *nal;
+ int nnal, i;
+ x264_picture_t pic_out;
+
+ x4->pic.img.i_csp = X264_CSP_I420;
+ x4->pic.img.i_plane = 3;
+
+ if (frame) {
+ for(i = 0; i < 3; i++){
+ x4->pic.img.plane[i] = frame->data[i];
+ x4->pic.img.i_stride[i] = frame->linesize[i];
+ }
+
+ x4->pic.i_pts = frame->pts;
+ x4->pic.i_type = X264_TYPE_AUTO;
+ }
+
+ if(x264_encoder_encode(x4->enc, &nal, &nnal, frame? &x4->pic: NULL,
+ &pic_out))
+ return -1;
+
+ bufsize = encode_nals(buf, bufsize, nal, nnal);
+ if(bufsize < 0)
+ return -1;
+
+ /* FIXME: dts */
+ x4->out_pic.pts = pic_out.i_pts;
+
+ switch(pic_out.i_type){
+ case X264_TYPE_IDR:
+ case X264_TYPE_I:
+ x4->out_pic.pict_type = FF_I_TYPE;
+ break;
+ case X264_TYPE_P:
+ x4->out_pic.pict_type = FF_P_TYPE;
+ break;
+ case X264_TYPE_B:
+ case X264_TYPE_BREF:
+ x4->out_pic.pict_type = FF_B_TYPE;
+ break;
+ }
+
+ x4->out_pic.key_frame = pic_out.i_type == X264_TYPE_IDR;
+ x4->out_pic.quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
+
+ return bufsize;
+}
+
+static int
+X264_close(AVCodecContext *avctx)
+{
+ X264Context *x4 = avctx->priv_data;
+
+ if(x4->enc)
+ x264_encoder_close(x4->enc);
+
+ return 0;
+}
+
+static int
+X264_init(AVCodecContext *avctx)
+{
+ X264Context *x4 = avctx->priv_data;
+
+ x264_param_default(&x4->params);
+
+ x4->params.pf_log = X264_log;
+ x4->params.p_log_private = avctx;
+
+ x4->params.i_keyint_max = avctx->gop_size;
+ x4->params.rc.i_bitrate = avctx->bit_rate / 1000;
+ x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000;
+ x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000;
+ x4->params.rc.b_stat_write = (avctx->flags & CODEC_FLAG_PASS1);
+ if(avctx->flags & CODEC_FLAG_PASS2) x4->params.rc.b_stat_read = 1;
+ else{
+ if(avctx->crf){
+ x4->params.rc.i_rc_method = X264_RC_CRF;
+ x4->params.rc.f_rf_constant = avctx->crf;
+ }else if(avctx->cqp > -1){
+ x4->params.rc.i_rc_method = X264_RC_CQP;
+ x4->params.rc.i_qp_constant = avctx->cqp;
+ }
+ }
+
+ // if neither crf nor cqp modes are selected we have to enable the RC
+ // we do it this way because we cannot check if the bitrate has been set
+ if(!(avctx->crf || (avctx->cqp > -1))) x4->params.rc.i_rc_method = X264_RC_ABR;
+
+ x4->params.i_bframe = avctx->max_b_frames;
+ x4->params.b_cabac = avctx->coder_type == FF_CODER_TYPE_AC;
+ x4->params.b_bframe_adaptive = avctx->b_frame_strategy;
+ x4->params.i_bframe_bias = avctx->bframebias;
+ x4->params.b_bframe_pyramid = (avctx->flags2 & CODEC_FLAG2_BPYRAMID);
+ avctx->has_b_frames= (avctx->flags2 & CODEC_FLAG2_BPYRAMID) ? 2 : !!avctx->max_b_frames;
+
+ x4->params.i_keyint_min = avctx->keyint_min;
+ if(x4->params.i_keyint_min > x4->params.i_keyint_max)
+ x4->params.i_keyint_min = x4->params.i_keyint_max;
+
+ x4->params.i_scenecut_threshold = avctx->scenechange_threshold;
+
+ x4->params.b_deblocking_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER);
+ x4->params.i_deblocking_filter_alphac0 = avctx->deblockalpha;
+ x4->params.i_deblocking_filter_beta = avctx->deblockbeta;
+
+ x4->params.rc.i_qp_min = avctx->qmin;
+ x4->params.rc.i_qp_max = avctx->qmax;
+ x4->params.rc.i_qp_step = avctx->max_qdiff;
+
+ x4->params.rc.f_qcompress = avctx->qcompress; /* 0.0 => cbr, 1.0 => constant qp */
+ x4->params.rc.f_qblur = avctx->qblur; /* temporally blur quants */
+ x4->params.rc.f_complexity_blur = avctx->complexityblur;
+
+ x4->params.i_frame_reference = avctx->refs;
+
+ x4->params.i_width = avctx->width;
+ x4->params.i_height = avctx->height;
+ x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num;
+ x4->params.vui.i_sar_height = avctx->sample_aspect_ratio.den;
+ x4->params.i_fps_num = avctx->time_base.den;
+ x4->params.i_fps_den = avctx->time_base.num;
+
+ x4->params.analyse.inter = 0;
+ if(avctx->partitions){
+ if(avctx->partitions & X264_PART_I4X4)
+ x4->params.analyse.inter |= X264_ANALYSE_I4x4;
+ if(avctx->partitions & X264_PART_I8X8)
+ x4->params.analyse.inter |= X264_ANALYSE_I8x8;
+ if(avctx->partitions & X264_PART_P8X8)
+ x4->params.analyse.inter |= X264_ANALYSE_PSUB16x16;
+ if(avctx->partitions & X264_PART_P4X4)
+ x4->params.analyse.inter |= X264_ANALYSE_PSUB8x8;
+ if(avctx->partitions & X264_PART_B8X8)
+ x4->params.analyse.inter |= X264_ANALYSE_BSUB16x16;
+ }
+
+ x4->params.analyse.i_direct_mv_pred = avctx->directpred;
+
+ x4->params.analyse.b_weighted_bipred = (avctx->flags2 & CODEC_FLAG2_WPRED);
+
+ if(avctx->me_method == ME_EPZS)
+ x4->params.analyse.i_me_method = X264_ME_DIA;
+ else if(avctx->me_method == ME_HEX)
+ x4->params.analyse.i_me_method = X264_ME_HEX;
+ else if(avctx->me_method == ME_UMH)
+ x4->params.analyse.i_me_method = X264_ME_UMH;
+ else if(avctx->me_method == ME_FULL)
+ x4->params.analyse.i_me_method = X264_ME_ESA;
+ else x4->params.analyse.i_me_method = X264_ME_HEX;
+
+ x4->params.analyse.i_me_range = avctx->me_range;
+ x4->params.analyse.i_subpel_refine = avctx->me_subpel_quality;
+
+ x4->params.analyse.b_bframe_rdo = (avctx->flags2 & CODEC_FLAG2_BRDO);
+ x4->params.analyse.b_mixed_references =
+ (avctx->flags2 & CODEC_FLAG2_MIXED_REFS);
+ x4->params.analyse.b_chroma_me = (avctx->me_cmp & FF_CMP_CHROMA);
+ x4->params.analyse.b_transform_8x8 = (avctx->flags2 & CODEC_FLAG2_8X8DCT);
+ x4->params.analyse.b_fast_pskip = (avctx->flags2 & CODEC_FLAG2_FASTPSKIP);
+
+ x4->params.analyse.i_trellis = avctx->trellis;
+ x4->params.analyse.i_noise_reduction = avctx->noise_reduction;
+
+ if(avctx->level > 0) x4->params.i_level_idc = avctx->level;
+
+ x4->params.rc.f_rate_tolerance =
+ (float)avctx->bit_rate_tolerance/avctx->bit_rate;
+
+ if((avctx->rc_buffer_size != 0) &&
+ (avctx->rc_initial_buffer_occupancy <= avctx->rc_buffer_size)){
+ x4->params.rc.f_vbv_buffer_init =
+ (float)avctx->rc_initial_buffer_occupancy/avctx->rc_buffer_size;
+ }
+ else x4->params.rc.f_vbv_buffer_init = 0.9;
+
+ x4->params.rc.f_ip_factor = 1/fabs(avctx->i_quant_factor);
+ x4->params.rc.f_pb_factor = avctx->b_quant_factor;
+ x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset;
+ x4->params.rc.psz_rc_eq = avctx->rc_eq;
+
+ x4->params.analyse.b_psnr = (avctx->flags & CODEC_FLAG_PSNR);
+ x4->params.i_log_level = X264_LOG_DEBUG;
+
+ x4->params.b_aud = (avctx->flags2 & CODEC_FLAG2_AUD);
+
+ x4->params.i_threads = avctx->thread_count;
+
+ if(avctx->flags & CODEC_FLAG_GLOBAL_HEADER){
+ x4->params.b_repeat_headers = 0;
+ }
+
+ x4->enc = x264_encoder_open(&x4->params);
+ if(!x4->enc)
+ return -1;
+
+ avctx->coded_frame = &x4->out_pic;
+
+ if(avctx->flags & CODEC_FLAG_GLOBAL_HEADER){
+ x264_nal_t *nal;
+ int nnal, i, s = 0;
+
+ x264_encoder_headers(x4->enc, &nal, &nnal);
+
+ /* 5 bytes NAL header + worst case escaping */
+ for(i = 0; i < nnal; i++)
+ s += 5 + nal[i].i_payload * 4 / 3;
+
+ avctx->extradata = av_malloc(s);
+ avctx->extradata_size = encode_nals(avctx->extradata, s, nal, nnal);
+ }
+
+ return 0;
+}
+
+AVCodec x264_encoder = {
+ .name = "h264",
+ .type = CODEC_TYPE_VIDEO,
+ .id = CODEC_ID_H264,
+ .priv_data_size = sizeof(X264Context),
+ .init = X264_init,
+ .encode = X264_frame,
+ .close = X264_close,
+ .capabilities = CODEC_CAP_DELAY,
+ .pix_fmts = (enum PixelFormat[]) { PIX_FMT_YUV420P, -1 }
+};
diff --git a/src/libffmpeg/libavcodec/xan.c b/contrib/ffmpeg/libavcodec/xan.c
index 7ccc65c00..56ce87a95 100644
--- a/src/libffmpeg/libavcodec/xan.c
+++ b/contrib/ffmpeg/libavcodec/xan.c
@@ -2,18 +2,20 @@
* Wing Commander/Xan Video Decoder
* Copyright (C) 2003 the ffmpeg project
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavcodec/xl.c b/contrib/ffmpeg/libavcodec/xl.c
index d626ff12a..67ad237e1 100644
--- a/src/libffmpeg/libavcodec/xl.c
+++ b/contrib/ffmpeg/libavcodec/xl.c
@@ -2,18 +2,20 @@
* Miro VideoXL codec
* Copyright (c) 2004 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -31,7 +33,7 @@ typedef struct VideoXLContext{
AVFrame pic;
} VideoXLContext;
-const int xl_table[32] = {
+static const int xl_table[32] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 12, 15, 20, 25, 34, 46,
64, 82, 94, 103, 108, 113, 116, 119,
diff --git a/contrib/ffmpeg/libavcodec/xvid_internal.h b/contrib/ffmpeg/libavcodec/xvid_internal.h
new file mode 100644
index 000000000..49c59c205
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/xvid_internal.h
@@ -0,0 +1,32 @@
+/*
+ * copyright (C) 2006 Corey Hickey
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef XVID_INTERNAL_H
+#define XVID_INTERNAL_H
+
+/**
+ * @file xvid_internal.h
+ * common functions for use with the XviD wrappers
+ */
+
+
+int av_tempfile(char *prefix, char **filename);
+
+#endif /* XVID_INTERNAL_H */
diff --git a/contrib/ffmpeg/libavcodec/xvid_rc.c b/contrib/ffmpeg/libavcodec/xvid_rc.c
new file mode 100644
index 000000000..6a0029e6d
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/xvid_rc.c
@@ -0,0 +1,148 @@
+/*
+ * xvid Rate control wrapper for lavc video encoders
+ *
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <xvid.h>
+#include <unistd.h>
+#include "avcodec.h"
+#include "xvid_internal.h"
+//#include "dsputil.h"
+#include "mpegvideo.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+extern unsigned int xvid_debug;
+
+int ff_xvid_rate_control_init(MpegEncContext *s){
+ char *tmp_name;
+ int fd, i;
+ xvid_plg_create_t xvid_plg_create;
+ xvid_plugin_2pass2_t xvid_2pass2;
+
+//xvid_debug=-1;
+
+ fd=av_tempfile("xvidrc.", &tmp_name);
+ if (fd == -1) {
+ av_log(NULL, AV_LOG_ERROR, "Can't create temporary pass2 file.\n");
+ return -1;
+ }
+
+ for(i=0; i<s->rc_context.num_entries; i++){
+ static const char *frame_types = " ipbs";
+ char tmp[256];
+ RateControlEntry *rce;
+
+ rce= &s->rc_context.entry[i];
+
+ snprintf(tmp, sizeof(tmp), "%c %d %d %d %d %d %d\n",
+ frame_types[rce->pict_type], (int)lrintf(rce->qscale / FF_QP2LAMBDA), rce->i_count, s->mb_num - rce->i_count - rce->skip_count,
+ rce->skip_count, (rce->i_tex_bits + rce->p_tex_bits + rce->misc_bits+7)/8, (rce->header_bits+rce->mv_bits+7)/8);
+
+//av_log(NULL, AV_LOG_ERROR, "%s\n", tmp);
+ write(fd, tmp, strlen(tmp));
+ }
+
+ close(fd);
+
+ memset(&xvid_2pass2, 0, sizeof(xvid_2pass2));
+ xvid_2pass2.version= XVID_MAKE_VERSION(1,1,0);
+ xvid_2pass2.filename= tmp_name;
+ xvid_2pass2.bitrate= s->avctx->bit_rate;
+ xvid_2pass2.vbv_size= s->avctx->rc_buffer_size;
+ xvid_2pass2.vbv_maxrate= s->avctx->rc_max_rate;
+ xvid_2pass2.vbv_initial= s->avctx->rc_initial_buffer_occupancy;
+
+ memset(&xvid_plg_create, 0, sizeof(xvid_plg_create));
+ xvid_plg_create.version= XVID_MAKE_VERSION(1,1,0);
+ xvid_plg_create.fbase= s->avctx->time_base.den;
+ xvid_plg_create.fincr= s->avctx->time_base.num;
+ xvid_plg_create.param= &xvid_2pass2;
+
+ if(xvid_plugin_2pass2(NULL, XVID_PLG_CREATE, &xvid_plg_create, &s->rc_context.non_lavc_opaque)<0){
+ av_log(NULL, AV_LOG_ERROR, "xvid_plugin_2pass2 failed\n");
+ return -1;
+ }
+ return 0;
+}
+
+float ff_xvid_rate_estimate_qscale(MpegEncContext *s, int dry_run){
+ xvid_plg_data_t xvid_plg_data;
+
+ memset(&xvid_plg_data, 0, sizeof(xvid_plg_data));
+ xvid_plg_data.version= XVID_MAKE_VERSION(1,1,0);
+ xvid_plg_data.width = s->width;
+ xvid_plg_data.height= s->height;
+ xvid_plg_data.mb_width = s->mb_width;
+ xvid_plg_data.mb_height= s->mb_height;
+ xvid_plg_data.fbase= s->avctx->time_base.den;
+ xvid_plg_data.fincr= s->avctx->time_base.num;
+ xvid_plg_data.min_quant[0]= s->avctx->qmin;
+ xvid_plg_data.min_quant[1]= s->avctx->qmin;
+ xvid_plg_data.min_quant[2]= s->avctx->qmin; //FIXME i/b factor & offset
+ xvid_plg_data.max_quant[0]= s->avctx->qmax;
+ xvid_plg_data.max_quant[1]= s->avctx->qmax;
+ xvid_plg_data.max_quant[2]= s->avctx->qmax; //FIXME i/b factor & offset
+ xvid_plg_data.bquant_offset = 0; // 100 * s->avctx->b_quant_offset;
+ xvid_plg_data.bquant_ratio = 100; // * s->avctx->b_quant_factor;
+
+#if 0
+ xvid_plg_data.stats.hlength= X
+#endif
+
+ if(!s->rc_context.dry_run_qscale){
+ if(s->picture_number){
+ xvid_plg_data.length=
+ xvid_plg_data.stats.length= (s->frame_bits + 7)/8;
+ xvid_plg_data.frame_num= s->rc_context.last_picture_number;
+ xvid_plg_data.quant= s->qscale;
+
+ xvid_plg_data.type= s->last_pict_type;
+ if(xvid_plugin_2pass2(s->rc_context.non_lavc_opaque, XVID_PLG_AFTER, &xvid_plg_data, NULL)){
+ av_log(s->avctx, AV_LOG_ERROR, "xvid_plugin_2pass2(handle, XVID_PLG_AFTER, ...) FAILED\n");
+ return -1;
+ }
+ }
+ s->rc_context.last_picture_number=
+ xvid_plg_data.frame_num= s->picture_number;
+ xvid_plg_data.quant= 0;
+ if(xvid_plugin_2pass2(s->rc_context.non_lavc_opaque, XVID_PLG_BEFORE, &xvid_plg_data, NULL)){
+ av_log(s->avctx, AV_LOG_ERROR, "xvid_plugin_2pass2(handle, XVID_PLG_BEFORE, ...) FAILED\n");
+ return -1;
+ }
+ s->rc_context.dry_run_qscale= xvid_plg_data.quant;
+ }
+ xvid_plg_data.quant= s->rc_context.dry_run_qscale;
+ if(!dry_run)
+ s->rc_context.dry_run_qscale= 0;
+
+ if(s->pict_type == B_TYPE) //FIXME this is not exactly identical to xvid
+ return xvid_plg_data.quant * FF_QP2LAMBDA * s->avctx->b_quant_factor + s->avctx->b_quant_offset;
+ else
+ return xvid_plg_data.quant * FF_QP2LAMBDA;
+}
+
+void ff_xvid_rate_control_uninit(MpegEncContext *s){
+ xvid_plg_destroy_t xvid_plg_destroy;
+
+ xvid_plugin_2pass2(s->rc_context.non_lavc_opaque, XVID_PLG_DESTROY, &xvid_plg_destroy, NULL);
+}
+
diff --git a/contrib/ffmpeg/libavcodec/xvidff.c b/contrib/ffmpeg/libavcodec/xvidff.c
new file mode 100644
index 000000000..590fe4b30
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/xvidff.c
@@ -0,0 +1,768 @@
+/*
+ * Interface to xvidcore for mpeg4 encoding
+ * Copyright (c) 2004 Adam Thayer <krevnik@comcast.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file xvidmpeg4.c
+ * Interface to xvidcore for MPEG-4 compliant encoding.
+ * @author Adam Thayer (krevnik@comcast.net)
+ */
+
+#include <xvid.h>
+#include <unistd.h>
+#include "common.h"
+#include "avcodec.h"
+#include "xvid_internal.h"
+
+/**
+ * Buffer management macros.
+ */
+#define BUFFER_SIZE 1024
+#define BUFFER_REMAINING(x) (BUFFER_SIZE - strlen(x))
+#define BUFFER_CAT(x) (&((x)[strlen(x)]))
+
+/* For PPC Use */
+#if HAVE_ALTIVEC==1
+extern int has_altivec(void);
+#endif
+
+/**
+ * Structure for the private XviD context.
+ * This stores all the private context for the codec.
+ */
+typedef struct xvid_context {
+ void *encoder_handle; /** Handle for XviD Encoder */
+ int xsize, ysize; /** Frame size */
+ int vop_flags; /** VOP flags for XviD Encoder */
+ int vol_flags; /** VOL flags for XviD Encoder */
+ int me_flags; /** Motion Estimation flags */
+ int qscale; /** Do we use constant scale? */
+ int quicktime_format; /** Are we in a QT-based format? */
+ AVFrame encoded_picture; /** Encoded frame information */
+ char *twopassbuffer; /** Character buffer for two-pass */
+ char *old_twopassbuffer; /** Old character buffer (two-pass) */
+ char *twopassfile; /** second pass temp file name */
+ unsigned char *intra_matrix; /** P-Frame Quant Matrix */
+ unsigned char *inter_matrix; /** I-Frame Quant Matrix */
+} xvid_context_t;
+
+/**
+ * Structure for the private first-pass plugin.
+ */
+typedef struct xvid_ff_pass1 {
+ int version; /** XviD version */
+ xvid_context_t *context; /** Pointer to private context */
+} xvid_ff_pass1_t;
+
+/* Prototypes - See function implementation for details */
+int xvid_strip_vol_header(AVCodecContext *avctx, unsigned char *frame, unsigned int header_len, unsigned int frame_len);
+int xvid_ff_2pass(void *ref, int opt, void *p1, void *p2);
+void xvid_correct_framerate(AVCodecContext *avctx);
+
+/**
+ * Creates the private context for the encoder.
+ * All buffers are allocated, settings are loaded from the user,
+ * and the encoder context created.
+ *
+ * @param avctx AVCodecContext pointer to context
+ * @return Returns 0 on success, -1 on failure
+ */
+int ff_xvid_encode_init(AVCodecContext *avctx) {
+ int xerr, i;
+ int xvid_flags = avctx->flags;
+ xvid_context_t *x = avctx->priv_data;
+ uint16_t *intra, *inter;
+ int fd;
+
+ xvid_plugin_single_t single;
+ xvid_ff_pass1_t rc2pass1;
+ xvid_plugin_2pass2_t rc2pass2;
+ xvid_gbl_init_t xvid_gbl_init;
+ xvid_enc_create_t xvid_enc_create;
+ xvid_enc_plugin_t plugins[7];
+
+ /* Bring in VOP flags from ffmpeg command-line */
+ x->vop_flags = XVID_VOP_HALFPEL; /* Bare minimum quality */
+ if( xvid_flags & CODEC_FLAG_4MV )
+ x->vop_flags |= XVID_VOP_INTER4V; /* Level 3 */
+ if( xvid_flags & CODEC_FLAG_TRELLIS_QUANT)
+ x->vop_flags |= XVID_VOP_TRELLISQUANT; /* Level 5 */
+ if( xvid_flags & CODEC_FLAG_AC_PRED )
+ x->vop_flags |= XVID_VOP_HQACPRED; /* Level 6 */
+ if( xvid_flags & CODEC_FLAG_GRAY )
+ x->vop_flags |= XVID_VOP_GREYSCALE;
+
+ /* Decide which ME quality setting to use */
+ x->me_flags = 0;
+ switch( avctx->me_method ) {
+ case ME_FULL: /* Quality 6 */
+ x->me_flags |= XVID_ME_EXTSEARCH16
+ | XVID_ME_EXTSEARCH8;
+
+ case ME_EPZS: /* Quality 4 */
+ x->me_flags |= XVID_ME_ADVANCEDDIAMOND8
+ | XVID_ME_HALFPELREFINE8
+ | XVID_ME_CHROMA_PVOP
+ | XVID_ME_CHROMA_BVOP;
+
+ case ME_LOG: /* Quality 2 */
+ case ME_PHODS:
+ case ME_X1:
+ x->me_flags |= XVID_ME_ADVANCEDDIAMOND16
+ | XVID_ME_HALFPELREFINE16;
+
+ case ME_ZERO: /* Quality 0 */
+ default:
+ break;
+ }
+
+ /* Decide how we should decide blocks */
+ switch( avctx->mb_decision ) {
+ case 2:
+ x->vop_flags |= XVID_VOP_MODEDECISION_RD;
+ x->me_flags |= XVID_ME_HALFPELREFINE8_RD
+ | XVID_ME_QUARTERPELREFINE8_RD
+ | XVID_ME_EXTSEARCH_RD
+ | XVID_ME_CHECKPREDICTION_RD;
+ case 1:
+ if( !(x->vop_flags & XVID_VOP_MODEDECISION_RD) )
+ x->vop_flags |= XVID_VOP_FAST_MODEDECISION_RD;
+ x->me_flags |= XVID_ME_HALFPELREFINE16_RD
+ | XVID_ME_QUARTERPELREFINE16_RD;
+
+ default:
+ break;
+ }
+
+ /* Bring in VOL flags from ffmpeg command-line */
+ x->vol_flags = 0;
+ if( xvid_flags & CODEC_FLAG_GMC ) {
+ x->vol_flags |= XVID_VOL_GMC;
+ x->me_flags |= XVID_ME_GME_REFINE;
+ }
+ if( xvid_flags & CODEC_FLAG_QPEL ) {
+ x->vol_flags |= XVID_VOL_QUARTERPEL;
+ x->me_flags |= XVID_ME_QUARTERPELREFINE16;
+ if( x->vop_flags & XVID_VOP_INTER4V )
+ x->me_flags |= XVID_ME_QUARTERPELREFINE8;
+ }
+
+ memset(&xvid_gbl_init, 0, sizeof(xvid_gbl_init));
+ xvid_gbl_init.version = XVID_VERSION;
+ xvid_gbl_init.debug = 0;
+
+#ifdef ARCH_POWERPC
+ /* XviD's PPC support is borked, use libavcodec to detect */
+#if HAVE_ALTIVEC==1
+ if( has_altivec() ) {
+ xvid_gbl_init.cpu_flags = XVID_CPU_FORCE | XVID_CPU_ALTIVEC;
+ } else
+#endif
+ xvid_gbl_init.cpu_flags = XVID_CPU_FORCE;
+#else
+ /* XviD can detect on x86 */
+ xvid_gbl_init.cpu_flags = 0;
+#endif
+
+ /* Initialize */
+ xvid_global(NULL, XVID_GBL_INIT, &xvid_gbl_init, NULL);
+
+ /* Create the encoder reference */
+ memset(&xvid_enc_create, 0, sizeof(xvid_enc_create));
+ xvid_enc_create.version = XVID_VERSION;
+
+ /* Store the desired frame size */
+ xvid_enc_create.width = x->xsize = avctx->width;
+ xvid_enc_create.height = x->ysize = avctx->height;
+
+ /* XviD can determine the proper profile to use */
+ /* xvid_enc_create.profile = XVID_PROFILE_S_L3; */
+
+ /* We don't use zones or threads */
+ xvid_enc_create.zones = NULL;
+ xvid_enc_create.num_zones = 0;
+ xvid_enc_create.num_threads = 0;
+
+ xvid_enc_create.plugins = plugins;
+ xvid_enc_create.num_plugins = 0;
+
+ /* Initialize Buffers */
+ x->twopassbuffer = NULL;
+ x->old_twopassbuffer = NULL;
+ x->twopassfile = NULL;
+
+ if( xvid_flags & CODEC_FLAG_PASS1 ) {
+ memset(&rc2pass1, 0, sizeof(xvid_ff_pass1_t));
+ rc2pass1.version = XVID_VERSION;
+ rc2pass1.context = x;
+ x->twopassbuffer = av_malloc(BUFFER_SIZE);
+ x->old_twopassbuffer = av_malloc(BUFFER_SIZE);
+ if( x->twopassbuffer == NULL || x->old_twopassbuffer == NULL ) {
+ av_log(avctx, AV_LOG_ERROR,
+ "XviD: Cannot allocate 2-pass log buffers\n");
+ return -1;
+ }
+ x->twopassbuffer[0] = x->old_twopassbuffer[0] = 0;
+
+ plugins[xvid_enc_create.num_plugins].func = xvid_ff_2pass;
+ plugins[xvid_enc_create.num_plugins].param = &rc2pass1;
+ xvid_enc_create.num_plugins++;
+ } else if( xvid_flags & CODEC_FLAG_PASS2 ) {
+ memset(&rc2pass2, 0, sizeof(xvid_plugin_2pass2_t));
+ rc2pass2.version = XVID_VERSION;
+ rc2pass2.bitrate = avctx->bit_rate;
+
+ fd = av_tempfile("xvidff.", &(x->twopassfile));
+ if( fd == -1 ) {
+ av_log(avctx, AV_LOG_ERROR,
+ "XviD: Cannot write 2-pass pipe\n");
+ return -1;
+ }
+
+ if( avctx->stats_in == NULL ) {
+ av_log(avctx, AV_LOG_ERROR,
+ "XviD: No 2-pass information loaded for second pass\n");
+ return -1;
+ }
+
+ if( strlen(avctx->stats_in) >
+ write(fd, avctx->stats_in, strlen(avctx->stats_in)) ) {
+ close(fd);
+ av_log(avctx, AV_LOG_ERROR,
+ "XviD: Cannot write to 2-pass pipe\n");
+ return -1;
+ }
+
+ close(fd);
+ rc2pass2.filename = x->twopassfile;
+ plugins[xvid_enc_create.num_plugins].func = xvid_plugin_2pass2;
+ plugins[xvid_enc_create.num_plugins].param = &rc2pass2;
+ xvid_enc_create.num_plugins++;
+ } else if( !(xvid_flags & CODEC_FLAG_QSCALE) ) {
+ /* Single Pass Bitrate Control! */
+ memset(&single, 0, sizeof(xvid_plugin_single_t));
+ single.version = XVID_VERSION;
+ single.bitrate = avctx->bit_rate;
+
+ plugins[xvid_enc_create.num_plugins].func = xvid_plugin_single;
+ plugins[xvid_enc_create.num_plugins].param = &single;
+ xvid_enc_create.num_plugins++;
+ }
+
+ /* Luminance Masking */
+ if( 0.0 != avctx->lumi_masking ) {
+ plugins[xvid_enc_create.num_plugins].func = xvid_plugin_lumimasking;
+ plugins[xvid_enc_create.num_plugins].param = NULL;
+ xvid_enc_create.num_plugins++;
+ }
+
+ /* Frame Rate and Key Frames */
+ xvid_correct_framerate(avctx);
+ xvid_enc_create.fincr = avctx->time_base.num;
+ xvid_enc_create.fbase = avctx->time_base.den;
+ if( avctx->gop_size > 0 )
+ xvid_enc_create.max_key_interval = avctx->gop_size;
+ else
+ xvid_enc_create.max_key_interval = 240; /* XviD's best default */
+
+ /* Quants */
+ if( xvid_flags & CODEC_FLAG_QSCALE ) x->qscale = 1;
+ else x->qscale = 0;
+
+ xvid_enc_create.min_quant[0] = avctx->qmin;
+ xvid_enc_create.min_quant[1] = avctx->qmin;
+ xvid_enc_create.min_quant[2] = avctx->qmin;
+ xvid_enc_create.max_quant[0] = avctx->qmax;
+ xvid_enc_create.max_quant[1] = avctx->qmax;
+ xvid_enc_create.max_quant[2] = avctx->qmax;
+
+ /* Quant Matrices */
+ x->intra_matrix = x->inter_matrix = NULL;
+ if( avctx->mpeg_quant )
+ x->vol_flags |= XVID_VOL_MPEGQUANT;
+ if( (avctx->intra_matrix || avctx->inter_matrix) ) {
+ x->vol_flags |= XVID_VOL_MPEGQUANT;
+
+ if( avctx->intra_matrix ) {
+ intra = avctx->intra_matrix;
+ x->intra_matrix = av_malloc(sizeof(unsigned char) * 64);
+ } else
+ intra = NULL;
+ if( avctx->inter_matrix ) {
+ inter = avctx->inter_matrix;
+ x->inter_matrix = av_malloc(sizeof(unsigned char) * 64);
+ } else
+ inter = NULL;
+
+ for( i = 0; i < 64; i++ ) {
+ if( intra )
+ x->intra_matrix[i] = (unsigned char)intra[i];
+ if( inter )
+ x->inter_matrix[i] = (unsigned char)inter[i];
+ }
+ }
+
+ /* Misc Settings */
+ xvid_enc_create.frame_drop_ratio = 0;
+ xvid_enc_create.global = 0;
+ if( xvid_flags & CODEC_FLAG_CLOSED_GOP )
+ xvid_enc_create.global |= XVID_GLOBAL_CLOSED_GOP;
+
+ /* Determines which codec mode we are operating in */
+ avctx->extradata = NULL;
+ avctx->extradata_size = 0;
+ if( xvid_flags & CODEC_FLAG_GLOBAL_HEADER ) {
+ /* In this case, we are claiming to be MPEG4 */
+ x->quicktime_format = 1;
+ avctx->codec_id = CODEC_ID_MPEG4;
+ } else {
+ /* We are claiming to be XviD */
+ x->quicktime_format = 0;
+ if(!avctx->codec_tag)
+ avctx->codec_tag = ff_get_fourcc("xvid");
+ }
+
+ /* Bframes */
+ xvid_enc_create.max_bframes = avctx->max_b_frames;
+ xvid_enc_create.bquant_offset = 100 * avctx->b_quant_offset;
+ xvid_enc_create.bquant_ratio = 100 * avctx->b_quant_factor;
+ if( avctx->max_b_frames > 0 && !x->quicktime_format ) xvid_enc_create.global |= XVID_GLOBAL_PACKED;
+
+ /* Create encoder context */
+ xerr = xvid_encore(NULL, XVID_ENC_CREATE, &xvid_enc_create, NULL);
+ if( xerr ) {
+ av_log(avctx, AV_LOG_ERROR, "XviD: Could not create encoder reference\n");
+ return -1;
+ }
+
+ x->encoder_handle = xvid_enc_create.handle;
+ avctx->coded_frame = &x->encoded_picture;
+
+ return 0;
+}
+
+/**
+ * Encodes a single frame.
+ *
+ * @param avctx AVCodecContext pointer to context
+ * @param frame Pointer to encoded frame buffer
+ * @param buf_size Size of encoded frame buffer
+ * @param data Pointer to AVFrame of unencoded frame
+ * @return Returns 0 on success, -1 on failure
+ */
+int ff_xvid_encode_frame(AVCodecContext *avctx,
+ unsigned char *frame, int buf_size, void *data) {
+ int xerr, i;
+ char *tmp;
+ xvid_context_t *x = avctx->priv_data;
+ AVFrame *picture = data;
+ AVFrame *p = &(x->encoded_picture);
+
+ xvid_enc_frame_t xvid_enc_frame;
+ xvid_enc_stats_t xvid_enc_stats;
+
+ /* Start setting up the frame */
+ memset(&xvid_enc_frame, 0, sizeof(xvid_enc_frame));
+ xvid_enc_frame.version = XVID_VERSION;
+ memset(&xvid_enc_stats, 0, sizeof(xvid_enc_stats));
+ xvid_enc_stats.version = XVID_VERSION;
+ *p = *picture;
+
+ /* Let XviD know where to put the frame. */
+ xvid_enc_frame.bitstream = frame;
+ xvid_enc_frame.length = buf_size;
+
+ /* Initialize input image fields */
+ if( avctx->pix_fmt != PIX_FMT_YUV420P ) {
+ av_log(avctx, AV_LOG_ERROR, "XviD: Color spaces other than 420p not supported\n");
+ return -1;
+ }
+
+ xvid_enc_frame.input.csp = XVID_CSP_PLANAR; /* YUV420P */
+
+ for( i = 0; i < 4; i++ ) {
+ xvid_enc_frame.input.plane[i] = picture->data[i];
+ xvid_enc_frame.input.stride[i] = picture->linesize[i];
+ }
+
+ /* Encoder Flags */
+ xvid_enc_frame.vop_flags = x->vop_flags;
+ xvid_enc_frame.vol_flags = x->vol_flags;
+ xvid_enc_frame.motion = x->me_flags;
+ xvid_enc_frame.type = XVID_TYPE_AUTO;
+
+ /* Quant Setting */
+ if( x->qscale ) xvid_enc_frame.quant = picture->quality / FF_QP2LAMBDA;
+ else xvid_enc_frame.quant = 0;
+
+ /* Matrices */
+ xvid_enc_frame.quant_intra_matrix = x->intra_matrix;
+ xvid_enc_frame.quant_inter_matrix = x->inter_matrix;
+
+ /* Encode */
+ xerr = xvid_encore(x->encoder_handle, XVID_ENC_ENCODE,
+ &xvid_enc_frame, &xvid_enc_stats);
+
+ /* Two-pass log buffer swapping */
+ avctx->stats_out = NULL;
+ if( x->twopassbuffer ) {
+ tmp = x->old_twopassbuffer;
+ x->old_twopassbuffer = x->twopassbuffer;
+ x->twopassbuffer = tmp;
+ x->twopassbuffer[0] = 0;
+ if( x->old_twopassbuffer[0] != 0 ) {
+ avctx->stats_out = x->old_twopassbuffer;
+ }
+ }
+
+ if( 0 <= xerr ) {
+ p->quality = xvid_enc_stats.quant * FF_QP2LAMBDA;
+ if( xvid_enc_stats.type == XVID_TYPE_PVOP )
+ p->pict_type = FF_P_TYPE;
+ else if( xvid_enc_stats.type == XVID_TYPE_BVOP )
+ p->pict_type = FF_B_TYPE;
+ else if( xvid_enc_stats.type == XVID_TYPE_SVOP )
+ p->pict_type = FF_S_TYPE;
+ else
+ p->pict_type = FF_I_TYPE;
+ if( xvid_enc_frame.out_flags & XVID_KEYFRAME ) {
+ p->key_frame = 1;
+ if( x->quicktime_format )
+ return xvid_strip_vol_header(avctx, frame,
+ xvid_enc_stats.hlength, xerr);
+ } else
+ p->key_frame = 0;
+
+ return xerr;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "XviD: Encoding Error Occurred: %i\n", xerr);
+ return -1;
+ }
+}
+
+/**
+ * Destroys the private context for the encoder.
+ * All buffers are freed, and the XviD encoder context is destroyed.
+ *
+ * @param avctx AVCodecContext pointer to context
+ * @return Returns 0, success guaranteed
+ */
+int ff_xvid_encode_close(AVCodecContext *avctx) {
+ xvid_context_t *x = avctx->priv_data;
+
+ xvid_encore(x->encoder_handle, XVID_ENC_DESTROY, NULL, NULL);
+
+ if( avctx->extradata != NULL )
+ av_free(avctx->extradata);
+ if( x->twopassbuffer != NULL ) {
+ av_free(x->twopassbuffer);
+ av_free(x->old_twopassbuffer);
+ }
+ if( x->twopassfile != NULL )
+ av_free(x->twopassfile);
+ if( x->intra_matrix != NULL )
+ av_free(x->intra_matrix);
+ if( x->inter_matrix != NULL )
+ av_free(x->inter_matrix);
+
+ return 0;
+}
+
+/**
+ * Routine to create a global VO/VOL header for MP4 container.
+ * What we do here is extract the header from the XviD bitstream
+ * as it is encoded. We also strip the repeated headers from the
+ * bitstream when a global header is requested for MPEG-4 ISO
+ * compliance.
+ *
+ * @param avctx AVCodecContext pointer to context
+ * @param frame Pointer to encoded frame data
+ * @param header_len Length of header to search
+ * @param frame_len Length of encoded frame data
+ * @return Returns new length of frame data
+ */
+int xvid_strip_vol_header(AVCodecContext *avctx,
+ unsigned char *frame,
+ unsigned int header_len,
+ unsigned int frame_len) {
+ int vo_len = 0, i;
+
+ for( i = 0; i < header_len - 3; i++ ) {
+ if( frame[i] == 0x00 &&
+ frame[i+1] == 0x00 &&
+ frame[i+2] == 0x01 &&
+ frame[i+3] == 0xB6 ) {
+ vo_len = i;
+ break;
+ }
+ }
+
+ if( vo_len > 0 ) {
+ /* We need to store the header, so extract it */
+ if( avctx->extradata == NULL ) {
+ avctx->extradata = av_malloc(vo_len);
+ memcpy(avctx->extradata, frame, vo_len);
+ avctx->extradata_size = vo_len;
+ }
+ /* Less dangerous now, memmove properly copies the two
+ chunks of overlapping data */
+ memmove(frame, &(frame[vo_len]), frame_len - vo_len);
+ return frame_len - vo_len;
+ } else
+ return frame_len;
+}
+
+/**
+ * Routine to correct a possibly erroneous framerate being fed to us.
+ * XviD currently chokes on framerates where the ticks per frame is
+ * extremely large. This function works to correct problems in this area
+ * by estimating a new framerate and taking the simpler fraction of
+ * the two presented.
+ *
+ * @param avctx Context that contains the framerate to correct.
+ */
+void xvid_correct_framerate(AVCodecContext *avctx) {
+ int frate, fbase;
+ int est_frate, est_fbase;
+ int gcd;
+ float est_fps, fps;
+
+ frate = avctx->time_base.den;
+ fbase = avctx->time_base.num;
+
+ gcd = ff_gcd(frate, fbase);
+ if( gcd > 1 ) {
+ frate /= gcd;
+ fbase /= gcd;
+ }
+
+ if( frate <= 65000 && fbase <= 65000 ) {
+ avctx->time_base.den = frate;
+ avctx->time_base.num = fbase;
+ return;
+ }
+
+ fps = (float)frate / (float)fbase;
+ est_fps = roundf(fps * 1000.0) / 1000.0;
+
+ est_frate = (int)est_fps;
+ if( est_fps > (int)est_fps ) {
+ est_frate = (est_frate + 1) * 1000;
+ est_fbase = (int)roundf((float)est_frate / est_fps);
+ } else
+ est_fbase = 1;
+
+ gcd = ff_gcd(est_frate, est_fbase);
+ if( gcd > 1 ) {
+ est_frate /= gcd;
+ est_fbase /= gcd;
+ }
+
+ if( fbase > est_fbase ) {
+ avctx->time_base.den = est_frate;
+ avctx->time_base.num = est_fbase;
+ av_log(avctx, AV_LOG_DEBUG,
+ "XviD: framerate re-estimated: %.2f, %.3f%% correction\n",
+ est_fps, (((est_fps - fps)/fps) * 100.0));
+ } else {
+ avctx->time_base.den = frate;
+ avctx->time_base.num = fbase;
+ }
+}
+
+/*
+ * XviD 2-Pass Kludge Section
+ *
+ * XviD's default 2-pass doesn't allow us to create data as we need to, so
+ * this section spends time replacing the first pass plugin so we can write
+ * statistic information as libavcodec requests in. We have another kludge
+ * that allows us to pass data to the second pass in XviD without a custom
+ * rate-control plugin.
+ */
+
+/**
+ * Initializes the two-pass plugin and context.
+ *
+ * @param param Input construction parameter structure
+ * @param handle Private context handle
+ * @return Returns XVID_ERR_xxxx on failure, or 0 on success.
+ */
+static int xvid_ff_2pass_create(xvid_plg_create_t * param,
+ void ** handle) {
+ xvid_ff_pass1_t *x = (xvid_ff_pass1_t *)param->param;
+ char *log = x->context->twopassbuffer;
+
+ /* Do a quick bounds check */
+ if( log == NULL )
+ return XVID_ERR_FAIL;
+
+ /* We use snprintf() */
+ /* This is because we can safely prevent a buffer overflow */
+ log[0] = 0;
+ snprintf(log, BUFFER_REMAINING(log),
+ "# ffmpeg 2-pass log file, using xvid codec\n");
+ snprintf(BUFFER_CAT(log), BUFFER_REMAINING(log),
+ "# Do not modify. libxvidcore version: %d.%d.%d\n\n",
+ XVID_VERSION_MAJOR(XVID_VERSION),
+ XVID_VERSION_MINOR(XVID_VERSION),
+ XVID_VERSION_PATCH(XVID_VERSION));
+
+ *handle = x->context;
+ return 0;
+}
+
+/**
+ * Destroys the two-pass plugin context.
+ *
+ * @param ref Context pointer for the plugin
+ * @param param Destrooy context
+ * @return Returns 0, success guaranteed
+ */
+static int xvid_ff_2pass_destroy(xvid_context_t *ref,
+ xvid_plg_destroy_t *param) {
+ /* Currently cannot think of anything to do on destruction */
+ /* Still, the framework should be here for reference/use */
+ if( ref->twopassbuffer != NULL )
+ ref->twopassbuffer[0] = 0;
+ return 0;
+}
+
+/**
+ * Enables fast encode mode during the first pass.
+ *
+ * @param ref Context pointer for the plugin
+ * @param param Frame data
+ * @return Returns 0, success guaranteed
+ */
+static int xvid_ff_2pass_before(xvid_context_t *ref,
+ xvid_plg_data_t *param) {
+ int motion_remove;
+ int motion_replacements;
+ int vop_remove;
+
+ /* Nothing to do here, result is changed too much */
+ if( param->zone && param->zone->mode == XVID_ZONE_QUANT )
+ return 0;
+
+ /* We can implement a 'turbo' first pass mode here */
+ param->quant = 2;
+
+ /* Init values */
+ motion_remove = ~XVID_ME_CHROMA_PVOP &
+ ~XVID_ME_CHROMA_BVOP &
+ ~XVID_ME_EXTSEARCH16 &
+ ~XVID_ME_ADVANCEDDIAMOND16;
+ motion_replacements = XVID_ME_FAST_MODEINTERPOLATE |
+ XVID_ME_SKIP_DELTASEARCH |
+ XVID_ME_FASTREFINE16 |
+ XVID_ME_BFRAME_EARLYSTOP;
+ vop_remove = ~XVID_VOP_MODEDECISION_RD &
+ ~XVID_VOP_FAST_MODEDECISION_RD &
+ ~XVID_VOP_TRELLISQUANT &
+ ~XVID_VOP_INTER4V &
+ ~XVID_VOP_HQACPRED;
+
+ param->vol_flags &= ~XVID_VOL_GMC;
+ param->vop_flags &= vop_remove;
+ param->motion_flags &= motion_remove;
+ param->motion_flags |= motion_replacements;
+
+ return 0;
+}
+
+/**
+ * Captures statistic data and writes it during first pass.
+ *
+ * @param ref Context pointer for the plugin
+ * @param param Statistic data
+ * @return Returns XVID_ERR_xxxx on failure, or 0 on success
+ */
+static int xvid_ff_2pass_after(xvid_context_t *ref,
+ xvid_plg_data_t *param) {
+ char *log = ref->twopassbuffer;
+ char *frame_types = " ipbs";
+ char frame_type;
+
+ /* Quick bounds check */
+ if( log == NULL )
+ return XVID_ERR_FAIL;
+
+ /* Convert the type given to us into a character */
+ if( param->type < 5 && param->type > 0 ) {
+ frame_type = frame_types[param->type];
+ } else {
+ return XVID_ERR_FAIL;
+ }
+
+ snprintf(BUFFER_CAT(log), BUFFER_REMAINING(log),
+ "%c %d %d %d %d %d %d\n",
+ frame_type, param->stats.quant, param->stats.kblks, param->stats.mblks,
+ param->stats.ublks, param->stats.length, param->stats.hlength);
+
+ return 0;
+}
+
+/**
+ * Dispatch function for our custom plugin.
+ * This handles the dispatch for the XviD plugin. It passes data
+ * on to other functions for actual processing.
+ *
+ * @param ref Context pointer for the plugin
+ * @param cmd The task given for us to complete
+ * @param p1 First parameter (varies)
+ * @param p2 Second parameter (varies)
+ * @return Returns XVID_ERR_xxxx on failure, or 0 on success
+ */
+int xvid_ff_2pass(void *ref, int cmd, void *p1, void *p2) {
+ switch( cmd ) {
+ case XVID_PLG_INFO:
+ case XVID_PLG_FRAME:
+ return 0;
+
+ case XVID_PLG_BEFORE:
+ return xvid_ff_2pass_before(ref, p1);
+
+ case XVID_PLG_CREATE:
+ return xvid_ff_2pass_create(p1, p2);
+
+ case XVID_PLG_AFTER:
+ return xvid_ff_2pass_after(ref, p1);
+
+ case XVID_PLG_DESTROY:
+ return xvid_ff_2pass_destroy(ref, p1);
+
+ default:
+ return XVID_ERR_FAIL;
+ }
+}
+
+/**
+ * XviD codec definition for libavcodec.
+ */
+AVCodec xvid_encoder = {
+ "xvid",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_XVID,
+ sizeof(xvid_context_t),
+ ff_xvid_encode_init,
+ ff_xvid_encode_frame,
+ ff_xvid_encode_close,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+};
diff --git a/contrib/ffmpeg/libavcodec/xvmcvideo.c b/contrib/ffmpeg/libavcodec/xvmcvideo.c
new file mode 100644
index 000000000..4a0677f6e
--- /dev/null
+++ b/contrib/ffmpeg/libavcodec/xvmcvideo.c
@@ -0,0 +1,318 @@
+/*
+ * XVideo Motion Compensation
+ * Copyright (c) 2003 Ivan Kalvachev
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <limits.h>
+
+//avcodec include
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#ifdef USE_FASTMEMCPY
+#include "libvo/fastmemcpy.h"
+#endif
+
+#ifdef HAVE_XVMC
+
+//X11 includes are in the xvmc_render.h
+//by replacing it with none-X one
+//XvMC emulation could be performed
+
+#include "xvmc_render.h"
+
+//#include "xvmc_debug.h"
+
+//set s->block
+inline void XVMC_init_block(MpegEncContext *s){
+xvmc_render_state_t * render;
+ render = (xvmc_render_state_t*)s->current_picture.data[2];
+ assert(render != NULL);
+ if( (render == NULL) || (render->magic != MP_XVMC_RENDER_MAGIC) ){
+ assert(0);
+ return;//make sure that this is render packet
+ }
+ s->block =(DCTELEM *)(render->data_blocks+(render->next_free_data_block_num)*64);
+}
+
+void XVMC_pack_pblocks(MpegEncContext *s, int cbp){
+int i,j;
+const int mb_block_count = 4+(1<<s->chroma_format);
+
+ j=0;
+ cbp<<= 12-mb_block_count;
+ for(i=0; i<mb_block_count; i++){
+ if(cbp & (1<<11)) {
+ s->pblocks[i] = (short *)(&s->block[(j++)]);
+ }else{
+ s->pblocks[i] = NULL;
+ }
+ cbp+=cbp;
+// printf("s->pblocks[%d]=%p ,s->block=%p cbp=%d\n",i,s->pblocks[i],s->block,cbp);
+ }
+}
+
+//these functions should be called on every new field or/and frame
+//They should be safe if they are called few times for same field!
+int XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx){
+xvmc_render_state_t * render,* last, * next;
+
+ assert(avctx != NULL);
+
+ render = (xvmc_render_state_t*)s->current_picture.data[2];
+ assert(render != NULL);
+ if( (render == NULL) || (render->magic != MP_XVMC_RENDER_MAGIC) )
+ return -1;//make sure that this is render packet
+
+ render->picture_structure = s->picture_structure;
+ render->flags = (s->first_field)? 0: XVMC_SECOND_FIELD;
+
+//make sure that all data is drawn by XVMC_end_frame
+ assert(render->filled_mv_blocks_num==0);
+
+ render->p_future_surface = NULL;
+ render->p_past_surface = NULL;
+
+ switch(s->pict_type){
+ case I_TYPE:
+ return 0;// no prediction from other frames
+ case B_TYPE:
+ next = (xvmc_render_state_t*)s->next_picture.data[2];
+ assert(next!=NULL);
+ assert(next->state & MP_XVMC_STATE_PREDICTION);
+ if(next == NULL) return -1;
+ if(next->magic != MP_XVMC_RENDER_MAGIC) return -1;
+ render->p_future_surface = next->p_surface;
+ //no return here, going to set forward prediction
+ case P_TYPE:
+ last = (xvmc_render_state_t*)s->last_picture.data[2];
+ if(last == NULL)// && !s->first_field)
+ last = render;//predict second field from the first
+ if(last->magic != MP_XVMC_RENDER_MAGIC) return -1;
+ assert(last->state & MP_XVMC_STATE_PREDICTION);
+ render->p_past_surface = last->p_surface;
+ return 0;
+ }
+
+return -1;
+}
+
+void XVMC_field_end(MpegEncContext *s){
+xvmc_render_state_t * render;
+ render = (xvmc_render_state_t*)s->current_picture.data[2];
+ assert(render != NULL);
+
+ if(render->filled_mv_blocks_num > 0){
+// printf("xvmcvideo.c: rendering %d left blocks after last slice!!!\n",render->filled_mv_blocks_num );
+ ff_draw_horiz_band(s,0,0);
+ }
+}
+
+void XVMC_decode_mb(MpegEncContext *s){
+XvMCMacroBlock * mv_block;
+xvmc_render_state_t * render;
+int i,cbp,blocks_per_mb;
+
+const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
+
+
+ if(s->encoding){
+ av_log(s->avctx, AV_LOG_ERROR, "XVMC doesn't support encoding!!!\n");
+ return;
+ }
+
+ //from MPV_decode_mb(),
+ /* update DC predictors for P macroblocks */
+ if (!s->mb_intra) {
+ s->last_dc[0] =
+ s->last_dc[1] =
+ s->last_dc[2] = 128 << s->intra_dc_precision;
+ }
+
+ //MC doesn't skip blocks
+ s->mb_skipped = 0;
+
+
+ // do I need to export quant when I could not perform postprocessing?
+ // anyway, it doesn't hurrt
+ s->current_picture.qscale_table[mb_xy] = s->qscale;
+
+//START OF XVMC specific code
+ render = (xvmc_render_state_t*)s->current_picture.data[2];
+ assert(render!=NULL);
+ assert(render->magic==MP_XVMC_RENDER_MAGIC);
+ assert(render->mv_blocks);
+
+ //take the next free macroblock
+ mv_block = &render->mv_blocks[render->start_mv_blocks_num +
+ render->filled_mv_blocks_num ];
+
+// memset(mv_block,0,sizeof(XvMCMacroBlock));
+
+ mv_block->x = s->mb_x;
+ mv_block->y = s->mb_y;
+ mv_block->dct_type = s->interlaced_dct;//XVMC_DCT_TYPE_FRAME/FIELD;
+// mv_block->motion_type = 0; //zero to silense warnings
+ if(s->mb_intra){
+ mv_block->macroblock_type = XVMC_MB_TYPE_INTRA;//no MC, all done
+ }else{
+ mv_block->macroblock_type = XVMC_MB_TYPE_PATTERN;
+
+ if(s->mv_dir & MV_DIR_FORWARD){
+ mv_block->macroblock_type|= XVMC_MB_TYPE_MOTION_FORWARD;
+ //pmv[n][dir][xy]=mv[dir][n][xy]
+ mv_block->PMV[0][0][0] = s->mv[0][0][0];
+ mv_block->PMV[0][0][1] = s->mv[0][0][1];
+ mv_block->PMV[1][0][0] = s->mv[0][1][0];
+ mv_block->PMV[1][0][1] = s->mv[0][1][1];
+ }
+ if(s->mv_dir & MV_DIR_BACKWARD){
+ mv_block->macroblock_type|=XVMC_MB_TYPE_MOTION_BACKWARD;
+ mv_block->PMV[0][1][0] = s->mv[1][0][0];
+ mv_block->PMV[0][1][1] = s->mv[1][0][1];
+ mv_block->PMV[1][1][0] = s->mv[1][1][0];
+ mv_block->PMV[1][1][1] = s->mv[1][1][1];
+ }
+
+ switch(s->mv_type){
+ case MV_TYPE_16X16:
+ mv_block->motion_type = XVMC_PREDICTION_FRAME;
+ break;
+ case MV_TYPE_16X8:
+ mv_block->motion_type = XVMC_PREDICTION_16x8;
+ break;
+ case MV_TYPE_FIELD:
+ mv_block->motion_type = XVMC_PREDICTION_FIELD;
+ if(s->picture_structure == PICT_FRAME){
+ mv_block->PMV[0][0][1]<<=1;
+ mv_block->PMV[1][0][1]<<=1;
+ mv_block->PMV[0][1][1]<<=1;
+ mv_block->PMV[1][1][1]<<=1;
+ }
+ break;
+ case MV_TYPE_DMV:
+ mv_block->motion_type = XVMC_PREDICTION_DUAL_PRIME;
+ if(s->picture_structure == PICT_FRAME){
+
+ mv_block->PMV[0][0][0] = s->mv[0][0][0];//top from top
+ mv_block->PMV[0][0][1] = s->mv[0][0][1]<<1;
+
+ mv_block->PMV[0][1][0] = s->mv[0][0][0];//bottom from bottom
+ mv_block->PMV[0][1][1] = s->mv[0][0][1]<<1;
+
+ mv_block->PMV[1][0][0] = s->mv[0][2][0];//dmv00, top from bottom
+ mv_block->PMV[1][0][1] = s->mv[0][2][1]<<1;//dmv01
+
+ mv_block->PMV[1][1][0] = s->mv[0][3][0];//dmv10, bottom from top
+ mv_block->PMV[1][1][1] = s->mv[0][3][1]<<1;//dmv11
+
+ }else{
+ mv_block->PMV[0][1][0] = s->mv[0][2][0];//dmv00
+ mv_block->PMV[0][1][1] = s->mv[0][2][1];//dmv01
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ mv_block->motion_vertical_field_select = 0;
+
+//set correct field referenses
+ if(s->mv_type == MV_TYPE_FIELD || s->mv_type == MV_TYPE_16X8){
+ if( s->field_select[0][0] ) mv_block->motion_vertical_field_select|=1;
+ if( s->field_select[1][0] ) mv_block->motion_vertical_field_select|=2;
+ if( s->field_select[0][1] ) mv_block->motion_vertical_field_select|=4;
+ if( s->field_select[1][1] ) mv_block->motion_vertical_field_select|=8;
+ }
+ }//!intra
+//time to handle data blocks;
+ mv_block->index = render->next_free_data_block_num;
+
+ blocks_per_mb = 6;
+ if( s->chroma_format >= 2){
+ blocks_per_mb = 4 + (1 << (s->chroma_format));
+ }
+
+// calculate cbp
+ cbp = 0;
+ for(i=0; i<blocks_per_mb; i++) {
+ cbp+= cbp;
+ if(s->block_last_index[i] >= 0)
+ cbp++;
+ }
+
+ if(s->flags & CODEC_FLAG_GRAY){
+ if(s->mb_intra){//intra frames are alwasy full chroma block
+ for(i=4; i<blocks_per_mb; i++){
+ memset(s->pblocks[i],0,sizeof(short)*8*8);//so we need to clear them
+ if(!render->unsigned_intra)
+ s->pblocks[i][0] = 1<<10;
+ }
+ }else{
+ cbp&= 0xf << (blocks_per_mb - 4);
+ blocks_per_mb = 4;//Luminance blocks only
+ }
+ }
+ mv_block->coded_block_pattern = cbp;
+ if(cbp == 0)
+ mv_block->macroblock_type &= ~XVMC_MB_TYPE_PATTERN;
+
+ for(i=0; i<blocks_per_mb; i++){
+ if(s->block_last_index[i] >= 0){
+ // i do not have unsigned_intra MOCO to test, hope it is OK
+ if( (s->mb_intra) && ( render->idct || (!render->idct && !render->unsigned_intra)) )
+ s->pblocks[i][0]-=1<<10;
+ if(!render->idct){
+ s->dsp.idct(s->pblocks[i]);
+ //!!TODO!clip!!!
+ }
+//copy blocks only if the codec doesn't support pblocks reordering
+ if(s->avctx->xvmc_acceleration == 1){
+ memcpy(&render->data_blocks[(render->next_free_data_block_num)*64],
+ s->pblocks[i],sizeof(short)*8*8);
+ }else{
+/* if(s->pblocks[i] != &render->data_blocks[
+ (render->next_free_data_block_num)*64]){
+ printf("ERROR mb(%d,%d) s->pblocks[i]=%p data_block[]=%p\n",
+ s->mb_x,s->mb_y, s->pblocks[i],
+ &render->data_blocks[(render->next_free_data_block_num)*64]);
+ }*/
+ }
+ render->next_free_data_block_num++;
+ }
+ }
+ render->filled_mv_blocks_num++;
+
+ assert(render->filled_mv_blocks_num <= render->total_number_of_mv_blocks);
+ assert(render->next_free_data_block_num <= render->total_number_of_data_blocks);
+
+
+ if(render->filled_mv_blocks_num >= render->total_number_of_mv_blocks)
+ ff_draw_horiz_band(s,0,0);
+
+// DumpRenderInfo(render);
+// DumpMBlockInfo(mv_block);
+
+}
+
+#endif
diff --git a/src/libffmpeg/libavcodec/zmbv.c b/contrib/ffmpeg/libavcodec/zmbv.c
index fd8497dd3..fe3745e09 100644
--- a/src/libffmpeg/libavcodec/zmbv.c
+++ b/contrib/ffmpeg/libavcodec/zmbv.c
@@ -2,18 +2,20 @@
* Zip Motion Blocks Video (ZMBV) decoder
* Copyright (c) 2006 Konstantin Shishkov
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -616,7 +618,7 @@ static int decode_init(AVCodecContext *avctx)
c->width = avctx->width;
c->height = avctx->height;
- if (avcodec_check_dimensions(avctx, avctx->height, avctx->width) < 0) {
+ if (avcodec_check_dimensions(avctx, avctx->width, avctx->height) < 0) {
return 1;
}
c->bpp = avctx->bits_per_sample;
@@ -671,10 +673,8 @@ static int decode_end(AVCodecContext *avctx)
#ifdef CONFIG_ZLIB
inflateEnd(&(c->zstream));
#endif
- if(c->cur)
- av_freep(&c->cur);
- if(c->prev)
- av_freep(&c->prev);
+ av_freep(&c->cur);
+ av_freep(&c->prev);
return 0;
}
diff --git a/contrib/ffmpeg/libavformat/4xm.c b/contrib/ffmpeg/libavformat/4xm.c
new file mode 100644
index 000000000..12e7d9ee4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/4xm.c
@@ -0,0 +1,331 @@
+/*
+ * 4X Technologies .4xm File Demuxer (no muxer)
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file 4xm.c
+ * 4X Technologies file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the .4xm file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+#include "avformat.h"
+
+#define RIFF_TAG MKTAG('R', 'I', 'F', 'F')
+#define _4XMV_TAG MKTAG('4', 'X', 'M', 'V')
+#define LIST_TAG MKTAG('L', 'I', 'S', 'T')
+#define HEAD_TAG MKTAG('H', 'E', 'A', 'D')
+#define TRK__TAG MKTAG('T', 'R', 'K', '_')
+#define MOVI_TAG MKTAG('M', 'O', 'V', 'I')
+#define VTRK_TAG MKTAG('V', 'T', 'R', 'K')
+#define STRK_TAG MKTAG('S', 'T', 'R', 'K')
+#define std__TAG MKTAG('s', 't', 'd', '_')
+#define name_TAG MKTAG('n', 'a', 'm', 'e')
+#define vtrk_TAG MKTAG('v', 't', 'r', 'k')
+#define strk_TAG MKTAG('s', 't', 'r', 'k')
+#define ifrm_TAG MKTAG('i', 'f', 'r', 'm')
+#define pfrm_TAG MKTAG('p', 'f', 'r', 'm')
+#define cfrm_TAG MKTAG('c', 'f', 'r', 'm')
+#define snd__TAG MKTAG('s', 'n', 'd', '_')
+
+#define vtrk_SIZE 0x44
+#define strk_SIZE 0x28
+
+#define GET_LIST_HEADER() \
+ fourcc_tag = get_le32(pb); \
+ size = get_le32(pb); \
+ if (fourcc_tag != LIST_TAG) \
+ return AVERROR_INVALIDDATA; \
+ fourcc_tag = get_le32(pb);
+
+typedef struct AudioTrack {
+ int sample_rate;
+ int bits;
+ int channels;
+ int stream_index;
+ int adpcm;
+} AudioTrack;
+
+typedef struct FourxmDemuxContext {
+ int width;
+ int height;
+ int video_stream_index;
+ int track_count;
+ AudioTrack *tracks;
+ int selected_track;
+
+ int64_t audio_pts;
+ int64_t video_pts;
+ float fps;
+} FourxmDemuxContext;
+
+static int fourxm_probe(AVProbeData *p)
+{
+ if (p->buf_size < 12)
+ return 0;
+
+ if ((LE_32(&p->buf[0]) != RIFF_TAG) ||
+ (LE_32(&p->buf[8]) != _4XMV_TAG))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int fourxm_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ unsigned int fourcc_tag;
+ unsigned int size;
+ int header_size;
+ FourxmDemuxContext *fourxm = (FourxmDemuxContext *)s->priv_data;
+ unsigned char *header;
+ int i;
+ int current_track = -1;
+ AVStream *st;
+
+ fourxm->track_count = 0;
+ fourxm->tracks = NULL;
+ fourxm->selected_track = 0;
+ fourxm->fps = 1.0;
+
+ /* skip the first 3 32-bit numbers */
+ url_fseek(pb, 12, SEEK_CUR);
+
+ /* check for LIST-HEAD */
+ GET_LIST_HEADER();
+ header_size = size - 4;
+ if (fourcc_tag != HEAD_TAG)
+ return AVERROR_INVALIDDATA;
+
+ /* allocate space for the header and load the whole thing */
+ header = av_malloc(header_size);
+ if (!header)
+ return AVERROR_NOMEM;
+ if (get_buffer(pb, header, header_size) != header_size)
+ return AVERROR_IO;
+
+ /* take the lazy approach and search for any and all vtrk and strk chunks */
+ for (i = 0; i < header_size - 8; i++) {
+ fourcc_tag = LE_32(&header[i]);
+ size = LE_32(&header[i + 4]);
+
+ if (fourcc_tag == std__TAG) {
+ fourxm->fps = av_int2flt(LE_32(&header[i + 12]));
+ } else if (fourcc_tag == vtrk_TAG) {
+ /* check that there is enough data */
+ if (size != vtrk_SIZE) {
+ av_free(header);
+ return AVERROR_INVALIDDATA;
+ }
+ fourxm->width = LE_32(&header[i + 36]);
+ fourxm->height = LE_32(&header[i + 40]);
+ i += 8 + size;
+
+ /* allocate a new AVStream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 60, 1, fourxm->fps);
+
+ fourxm->video_stream_index = st->index;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_4XM;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = fourxm->width;
+ st->codec->height = fourxm->height;
+
+ } else if (fourcc_tag == strk_TAG) {
+ /* check that there is enough data */
+ if (size != strk_SIZE) {
+ av_free(header);
+ return AVERROR_INVALIDDATA;
+ }
+ current_track = LE_32(&header[i + 8]);
+ if (current_track + 1 > fourxm->track_count) {
+ fourxm->track_count = current_track + 1;
+ if((unsigned)fourxm->track_count >= UINT_MAX / sizeof(AudioTrack))
+ return -1;
+ fourxm->tracks = av_realloc(fourxm->tracks,
+ fourxm->track_count * sizeof(AudioTrack));
+ if (!fourxm->tracks) {
+ av_free(header);
+ return AVERROR_NOMEM;
+ }
+ }
+ fourxm->tracks[current_track].adpcm = LE_32(&header[i + 12]);
+ fourxm->tracks[current_track].channels = LE_32(&header[i + 36]);
+ fourxm->tracks[current_track].sample_rate = LE_32(&header[i + 40]);
+ fourxm->tracks[current_track].bits = LE_32(&header[i + 44]);
+ i += 8 + size;
+
+ /* allocate a new AVStream */
+ st = av_new_stream(s, current_track);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 60, 1, fourxm->tracks[current_track].sample_rate);
+
+ fourxm->tracks[current_track].stream_index = st->index;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = 0;
+ st->codec->channels = fourxm->tracks[current_track].channels;
+ st->codec->sample_rate = fourxm->tracks[current_track].sample_rate;
+ st->codec->bits_per_sample = fourxm->tracks[current_track].bits;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+ if (fourxm->tracks[current_track].adpcm)
+ st->codec->codec_id = CODEC_ID_ADPCM_4XM;
+ else if (st->codec->bits_per_sample == 8)
+ st->codec->codec_id = CODEC_ID_PCM_U8;
+ else
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ }
+ }
+
+ av_free(header);
+
+ /* skip over the LIST-MOVI chunk (which is where the stream should be */
+ GET_LIST_HEADER();
+ if (fourcc_tag != MOVI_TAG)
+ return AVERROR_INVALIDDATA;
+
+ /* initialize context members */
+ fourxm->video_pts = -1; /* first frame will push to 0 */
+ fourxm->audio_pts = 0;
+
+ return 0;
+}
+
+static int fourxm_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ FourxmDemuxContext *fourxm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned int fourcc_tag;
+ unsigned int size, out_size;
+ int ret = 0;
+ int track_number;
+ int packet_read = 0;
+ unsigned char header[8];
+ int audio_frame_count;
+
+ while (!packet_read) {
+
+ if ((ret = get_buffer(&s->pb, header, 8)) < 0)
+ return ret;
+ fourcc_tag = LE_32(&header[0]);
+ size = LE_32(&header[4]);
+ if (url_feof(pb))
+ return AVERROR_IO;
+ switch (fourcc_tag) {
+
+ case LIST_TAG:
+ /* this is a good time to bump the video pts */
+ fourxm->video_pts ++;
+
+ /* skip the LIST-* tag and move on to the next fourcc */
+ get_le32(pb);
+ break;
+
+ case ifrm_TAG:
+ case pfrm_TAG:
+ case cfrm_TAG:{
+
+ /* allocate 8 more bytes than 'size' to account for fourcc
+ * and size */
+ if (size + 8 < size || av_new_packet(pkt, size + 8))
+ return AVERROR_IO;
+ pkt->stream_index = fourxm->video_stream_index;
+ pkt->pts = fourxm->video_pts;
+ pkt->pos = url_ftell(&s->pb);
+ memcpy(pkt->data, header, 8);
+ ret = get_buffer(&s->pb, &pkt->data[8], size);
+
+ if (ret < 0)
+ av_free_packet(pkt);
+ else
+ packet_read = 1;
+ break;
+ }
+
+ case snd__TAG:
+ track_number = get_le32(pb);
+ out_size= get_le32(pb);
+ size-=8;
+
+ if (track_number == fourxm->selected_track) {
+ ret= av_get_packet(&s->pb, pkt, size);
+ if(ret<0)
+ return AVERROR_IO;
+ pkt->stream_index =
+ fourxm->tracks[fourxm->selected_track].stream_index;
+ pkt->pts = fourxm->audio_pts;
+ packet_read = 1;
+
+ /* pts accounting */
+ audio_frame_count = size;
+ if (fourxm->tracks[fourxm->selected_track].adpcm)
+ audio_frame_count -=
+ 2 * (fourxm->tracks[fourxm->selected_track].channels);
+ audio_frame_count /=
+ fourxm->tracks[fourxm->selected_track].channels;
+ if (fourxm->tracks[fourxm->selected_track].adpcm)
+ audio_frame_count *= 2;
+ else
+ audio_frame_count /=
+ (fourxm->tracks[fourxm->selected_track].bits / 8);
+ fourxm->audio_pts += audio_frame_count;
+
+ } else {
+ url_fseek(pb, size, SEEK_CUR);
+ }
+ break;
+
+ default:
+ url_fseek(pb, size, SEEK_CUR);
+ break;
+ }
+ }
+ return ret;
+}
+
+static int fourxm_read_close(AVFormatContext *s)
+{
+ FourxmDemuxContext *fourxm = (FourxmDemuxContext *)s->priv_data;
+
+ av_free(fourxm->tracks);
+
+ return 0;
+}
+
+AVInputFormat fourxm_demuxer = {
+ "4xm",
+ "4X Technologies format",
+ sizeof(FourxmDemuxContext),
+ fourxm_probe,
+ fourxm_read_header,
+ fourxm_read_packet,
+ fourxm_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/Makefile b/contrib/ffmpeg/libavformat/Makefile
new file mode 100644
index 000000000..fd2ac2a29
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/Makefile
@@ -0,0 +1,197 @@
+#
+# libavformat Makefile
+# (c) 2000-2003 Fabrice Bellard
+#
+include ../config.mak
+
+CFLAGS+=-I$(SRC_PATH)/libavcodec
+
+OBJS= utils.o cutils.o os_support.o allformats.o
+
+HEADERS = avformat.h avio.h rtp.h rtsp.h rtspcodes.h
+
+# muxers/demuxers
+OBJS-$(CONFIG_FOURXM_DEMUXER) += 4xm.o
+OBJS-$(CONFIG_ADTS_MUXER) += adtsenc.o
+OBJS-$(CONFIG_AIFF_DEMUXER) += aiff.o riff.o
+OBJS-$(CONFIG_AIFF_MUXER) += aiff.o riff.o
+OBJS-$(CONFIG_AMR_DEMUXER) += amr.o
+OBJS-$(CONFIG_AMR_MUXER) += amr.o
+OBJS-$(CONFIG_ASF_DEMUXER) += asf.o riff.o
+OBJS-$(CONFIG_ASF_MUXER) += asf-enc.o riff.o
+OBJS-$(CONFIG_ASF_STREAM_MUXER) += asf-enc.o riff.o
+OBJS-$(CONFIG_AU_DEMUXER) += au.o riff.o
+OBJS-$(CONFIG_AU_MUXER) += au.o riff.o
+OBJS-$(CONFIG_AVI_DEMUXER) += avidec.o riff.o
+OBJS-$(CONFIG_AVI_MUXER) += avienc.o riff.o
+OBJS-$(CONFIG_AVISYNTH) += avisynth.o
+OBJS-$(CONFIG_AVS_DEMUXER) += avs.o vocdec.o voc.o riff.o
+OBJS-$(CONFIG_CRC_MUXER) += crc.o
+OBJS-$(CONFIG_FRAMECRC_MUXER) += crc.o
+OBJS-$(CONFIG_DAUD_DEMUXER) += daud.o
+OBJS-$(CONFIG_DSICIN_DEMUXER) += dsicin.o
+OBJS-$(CONFIG_DV_DEMUXER) += dv.o
+OBJS-$(CONFIG_DV_MUXER) += dvenc.o
+OBJS-$(CONFIG_EA_DEMUXER) += electronicarts.o
+OBJS-$(CONFIG_FFM_DEMUXER) += ffm.o
+OBJS-$(CONFIG_FFM_MUXER) += ffm.o
+OBJS-$(CONFIG_FLIC_DEMUXER) += flic.o
+OBJS-$(CONFIG_FLV_DEMUXER) += flvdec.o
+OBJS-$(CONFIG_FLV_MUXER) += flvenc.o
+OBJS-$(CONFIG_GIF_MUXER) += gif.o
+OBJS-$(CONFIG_GIF_DEMUXER) += gifdec.o
+OBJS-$(CONFIG_GXF_DEMUXER) += gxf.o
+OBJS-$(CONFIG_IDCIN_DEMUXER) += idcin.o
+OBJS-$(CONFIG_ROQ_DEMUXER) += idroq.o
+OBJS-$(CONFIG_IMAGE2_DEMUXER) += img2.o
+OBJS-$(CONFIG_IMAGE2PIPE_DEMUXER) += img2.o
+OBJS-$(CONFIG_IMAGE2_MUXER) += img2.o
+OBJS-$(CONFIG_IMAGE2PIPE_MUXER) += img2.o
+OBJS-$(CONFIG_IPMOVIE_DEMUXER) += ipmovie.o
+OBJS-$(CONFIG_MATROSKA_DEMUXER) += matroska.o riff.o
+OBJS-$(CONFIG_MM_DEMUXER) += mm.o
+OBJS-$(CONFIG_MMF_DEMUXER) += mmf.o riff.o
+OBJS-$(CONFIG_MMF_MUXER) += mmf.o riff.o
+OBJS-$(CONFIG_MOV_DEMUXER) += mov.o riff.o isom.o
+OBJS-$(CONFIG_MOV_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_MTV_DEMUXER) += mtv.o
+OBJS-$(CONFIG_TGP_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_MP4_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_PSP_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_TG2_MUXER) += movenc.o riff.o isom.o
+OBJS-$(CONFIG_MP3_DEMUXER) += mp3.o
+OBJS-$(CONFIG_MP2_MUXER) += mp3.o
+OBJS-$(CONFIG_MP3_MUXER) += mp3.o
+OBJS-$(CONFIG_MPEG1SYSTEM_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEG1VCD_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEG2VOB_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEG2SVCD_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEG2DVD_MUXER) += mpeg.o
+OBJS-$(CONFIG_MPEGPS_DEMUXER) += mpeg.o
+OBJS-$(CONFIG_MPEGTS_DEMUXER) += mpegts.o
+OBJS-$(CONFIG_MPEGTS_MUXER) += mpegtsenc.o
+OBJS-$(CONFIG_MPJPEG_MUXER) += mpjpeg.o
+OBJS-$(CONFIG_MXF_DEMUXER) += mxf.o
+OBJS-$(CONFIG_NSV_DEMUXER) += nsvdec.o riff.o
+OBJS-$(CONFIG_NUV_DEMUXER) += nuv.o riff.o
+OBJS-$(CONFIG_OGG_DEMUXER) += ogg2.o \
+ oggparsevorbis.o \
+ oggparsetheora.o \
+ oggparseflac.o \
+ oggparseogm.o \
+ riff.o
+OBJS-$(CONFIG_STR_DEMUXER) += psxstr.o
+OBJS-$(CONFIG_SHORTEN_DEMUXER) += raw.o
+OBJS-$(CONFIG_FLAC_DEMUXER) += raw.o
+OBJS-$(CONFIG_FLAC_MUXER) += raw.o
+OBJS-$(CONFIG_AC3_DEMUXER) += raw.o
+OBJS-$(CONFIG_AC3_MUXER) += raw.o
+OBJS-$(CONFIG_DTS_DEMUXER) += raw.o
+OBJS-$(CONFIG_AAC_DEMUXER) += raw.o
+OBJS-$(CONFIG_H261_DEMUXER) += raw.o
+OBJS-$(CONFIG_H261_MUXER) += raw.o
+OBJS-$(CONFIG_H263_DEMUXER) += raw.o
+OBJS-$(CONFIG_H263_MUXER) += raw.o
+OBJS-$(CONFIG_M4V_DEMUXER) += raw.o
+OBJS-$(CONFIG_M4V_MUXER) += raw.o
+OBJS-$(CONFIG_H264_DEMUXER) += raw.o
+OBJS-$(CONFIG_H264_MUXER) += raw.o
+OBJS-$(CONFIG_MPEGVIDEO_DEMUXER) += raw.o
+OBJS-$(CONFIG_MPEG1VIDEO_MUXER) += raw.o
+OBJS-$(CONFIG_MPEG2VIDEO_MUXER) += raw.o
+OBJS-$(CONFIG_MJPEG_DEMUXER) += raw.o
+OBJS-$(CONFIG_INGENIENT_DEMUXER) += raw.o
+OBJS-$(CONFIG_MJPEG_MUXER) += raw.o
+OBJS-$(CONFIG_RAWVIDEO_DEMUXER) += raw.o
+OBJS-$(CONFIG_RAWVIDEO_MUXER) += raw.o
+OBJS-$(CONFIG_NULL_MUXER) += raw.o
+OBJS-$(CONFIG_RM_DEMUXER) += rm.o
+OBJS-$(CONFIG_RM_MUXER) += rm.o
+OBJS-$(CONFIG_SEGAFILM_DEMUXER) += segafilm.o
+OBJS-$(CONFIG_VMD_DEMUXER) += sierravmd.o
+OBJS-$(CONFIG_SMACKER_DEMUXER) += smacker.o
+OBJS-$(CONFIG_SOL_DEMUXER) += sol.o
+OBJS-$(CONFIG_SWF_DEMUXER) += swf.o
+OBJS-$(CONFIG_SWF_MUXER) += swf.o
+OBJS-$(CONFIG_TIERTEXSEQ_DEMUXER) += tiertexseq.o
+OBJS-$(CONFIG_TTA_DEMUXER) += tta.o
+OBJS-$(CONFIG_VOC_DEMUXER) += vocdec.o voc.o riff.o
+OBJS-$(CONFIG_VOC_MUXER) += vocenc.o voc.o riff.o
+OBJS-$(CONFIG_WAV_DEMUXER) += wav.o riff.o
+OBJS-$(CONFIG_WAV_MUXER) += wav.o riff.o
+OBJS-$(CONFIG_WC3_DEMUXER) += wc3movie.o
+OBJS-$(CONFIG_WSAUD_DEMUXER) += westwood.o
+OBJS-$(CONFIG_WSVQA_DEMUXER) += westwood.o
+OBJS-$(CONFIG_WV_DEMUXER) += wv.o
+OBJS-$(CONFIG_YUV4MPEGPIPE_MUXER) += yuv4mpeg.o
+OBJS-$(CONFIG_YUV4MPEGPIPE_DEMUXER) += yuv4mpeg.o
+
+OBJS+= framehook.o
+
+ifeq ($(CONFIG_VIDEO4LINUX),yes)
+OBJS-$(CONFIG_VIDEO_GRAB_DEVICE_DEMUXER) += grab.o
+endif
+
+ifeq ($(CONFIG_VIDEO4LINUX2),yes)
+OBJS-$(CONFIG_V4L2_DEMUXER) += v4l2.o
+endif
+
+ifeq ($(CONFIG_BKTR),yes)
+OBJS-$(CONFIG_VIDEO_GRAB_DEVICE_DEMUXER) += grab_bktr.o
+endif
+
+ifeq ($(CONFIG_DV1394),yes)
+OBJS-$(CONFIG_DV1394_DEMUXER) += dv1394.o
+endif
+
+ifeq ($(CONFIG_DC1394),yes)
+OBJS-$(CONFIG_DC1394_DEMUXER) += dc1394.o
+endif
+
+ifeq ($(CONFIG_AUDIO_OSS),yes)
+OBJS-$(CONFIG_AUDIO_DEMUXER) += audio.o
+OBJS-$(CONFIG_AUDIO_MUXER) += audio.o
+endif
+
+EXTRALIBS := -L$(BUILD_ROOT)/libavutil -lavutil$(BUILDSUF) \
+ -lavcodec$(BUILDSUF) -L$(BUILD_ROOT)/libavcodec $(EXTRALIBS)
+
+ifeq ($(CONFIG_AUDIO_BEOS),yes)
+CPPOBJS+= beosaudio.o
+endif
+
+# protocols I/O
+OBJS+= avio.o aviobuf.o
+
+ifeq ($(CONFIG_PROTOCOLS),yes)
+OBJS+= file.o
+ifeq ($(CONFIG_NETWORK),yes)
+OBJS+= udp.o tcp.o http.o rtsp.o rtp.o rtpproto.o mpegts.o base64.o rtp_h264.o
+endif
+endif
+
+ifeq ($(CONFIG_LIBNUT),yes)
+OBJS-$(CONFIG_NUT_DEMUXER) += libnut.o riff.o
+OBJS-$(CONFIG_NUT_MUXER) += libnut.o riff.o
+else
+OBJS-$(CONFIG_NUT_DEMUXER) += nutdec.o riff.o
+#OBJS-$(CONFIG_NUT_MUXER) += nutenc.o riff.o
+endif
+
+ifeq ($(CONFIG_LIBOGG),yes)
+OBJS-$(CONFIG_OGG_MUXER) += ogg.o
+endif
+
+ifeq ($(CONFIG_GPL),yes)
+OBJS-$(CONFIG_GXF_MUXER) += gxfenc.o
+endif
+
+OBJS += $(OBJS-yes)
+
+NAME=avformat
+ifeq ($(BUILD_SHARED),yes)
+LIBVERSION=$(LAVFVERSION)
+LIBMAJOR=$(LAVFMAJOR)
+endif
+
+include ../common.mak
diff --git a/contrib/ffmpeg/libavformat/adtsenc.c b/contrib/ffmpeg/libavformat/adtsenc.c
new file mode 100644
index 000000000..1ef683838
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/adtsenc.c
@@ -0,0 +1,123 @@
+/*
+ * ADTS muxer.
+ * Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@smartjog.com>
+ * Mans Rullgard <mru@inprovide.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "bitstream.h"
+
+#define ADTS_HEADER_SIZE 7
+
+typedef struct {
+ int write_adts;
+ int objecttype;
+ int sample_rate_index;
+ int channel_conf;
+} ADTSContext;
+
+static int decode_extradata(ADTSContext *adts, uint8_t *buf, int size)
+{
+ GetBitContext gb;
+
+ init_get_bits(&gb, buf, size * 8);
+ adts->objecttype = get_bits(&gb, 5) - 1;
+ adts->sample_rate_index = get_bits(&gb, 4);
+ adts->channel_conf = get_bits(&gb, 4);
+
+ adts->write_adts = 1;
+
+ return 0;
+}
+
+static int adts_write_header(AVFormatContext *s)
+{
+ ADTSContext *adts = s->priv_data;
+ AVCodecContext *avc = s->streams[0]->codec;
+
+ if(avc->extradata_size > 0)
+ decode_extradata(adts, avc->extradata, avc->extradata_size);
+
+ return 0;
+}
+
+static int adts_write_frame_header(AVFormatContext *s, int size)
+{
+ ADTSContext *ctx = s->priv_data;
+ PutBitContext pb;
+ uint8_t buf[ADTS_HEADER_SIZE];
+
+ init_put_bits(&pb, buf, ADTS_HEADER_SIZE);
+
+ /* adts_fixed_header */
+ put_bits(&pb, 12, 0xfff); /* syncword */
+ put_bits(&pb, 1, 0); /* ID */
+ put_bits(&pb, 2, 0); /* layer */
+ put_bits(&pb, 1, 1); /* protection_absent */
+ put_bits(&pb, 2, ctx->objecttype); /* profile_objecttype */
+ put_bits(&pb, 4, ctx->sample_rate_index);
+ put_bits(&pb, 1, 0); /* private_bit */
+ put_bits(&pb, 3, ctx->channel_conf); /* channel_configuration */
+ put_bits(&pb, 1, 0); /* original_copy */
+ put_bits(&pb, 1, 0); /* home */
+
+ /* adts_variable_header */
+ put_bits(&pb, 1, 0); /* copyright_identification_bit */
+ put_bits(&pb, 1, 0); /* copyright_identification_start */
+ put_bits(&pb, 13, ADTS_HEADER_SIZE + size); /* aac_frame_length */
+ put_bits(&pb, 11, 0x7ff); /* adts_buffer_fullness */
+ put_bits(&pb, 2, 0); /* number_of_raw_data_blocks_in_frame */
+
+ flush_put_bits(&pb);
+ put_buffer(&s->pb, buf, ADTS_HEADER_SIZE);
+
+ return 0;
+}
+
+static int adts_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int adts_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ADTSContext *adts = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+
+ if (!pkt->size)
+ return 0;
+ if(adts->write_adts)
+ adts_write_frame_header(s, pkt->size);
+ put_buffer(pb, pkt->data, pkt->size);
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+AVOutputFormat adts_muxer = {
+ "adts",
+ "ADTS AAC",
+ "audio/aac",
+ "aac",
+ sizeof(ADTSContext),
+ CODEC_ID_AAC,
+ CODEC_ID_NONE,
+ adts_write_header,
+ adts_write_packet,
+ adts_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/aiff.c b/contrib/ffmpeg/libavformat/aiff.c
new file mode 100644
index 000000000..e4cf66c3b
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/aiff.c
@@ -0,0 +1,436 @@
+/*
+ * AIFF/AIFF-C muxer and demuxer
+ * Copyright (c) 2006 Patrick Guimond
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+#include "intfloat_readwrite.h"
+
+static const CodecTag codec_aiff_tags[] = {
+ { CODEC_ID_PCM_S16BE, MKTAG('N','O','N','E') },
+ { CODEC_ID_PCM_S8, MKTAG('N','O','N','E') },
+ { CODEC_ID_PCM_S24BE, MKTAG('N','O','N','E') },
+ { CODEC_ID_PCM_S32BE, MKTAG('N','O','N','E') },
+ { CODEC_ID_PCM_ALAW, MKTAG('a','l','a','w') },
+ { CODEC_ID_PCM_ALAW, MKTAG('A','L','A','W') },
+ { CODEC_ID_PCM_MULAW, MKTAG('u','l','a','w') },
+ { CODEC_ID_PCM_MULAW, MKTAG('U','L','A','W') },
+ { CODEC_ID_MACE3, MKTAG('M','A','C','3') },
+ { CODEC_ID_MACE6, MKTAG('M','A','C','6') },
+ { CODEC_ID_GSM, MKTAG('G','S','M',' ') },
+ { CODEC_ID_ADPCM_G726, MKTAG('G','7','2','6') },
+ { 0, 0 },
+};
+
+#define AIFF 0
+#define AIFF_C_VERSION1 0xA2805140
+
+static int aiff_codec_get_id (int bps)
+{
+ if (bps <= 8)
+ return CODEC_ID_PCM_S8;
+ if (bps <= 16)
+ return CODEC_ID_PCM_S16BE;
+ if (bps <= 24)
+ return CODEC_ID_PCM_S24BE;
+ if (bps <= 32)
+ return CODEC_ID_PCM_S32BE;
+
+ /* bigger than 32 isn't allowed */
+ return 0;
+}
+
+/* returns the size of the found tag */
+static int get_tag(ByteIOContext *pb, uint32_t * tag)
+{
+ int size;
+
+ if (url_feof(pb))
+ return AVERROR_IO;
+
+ *tag = get_le32(pb);
+ size = get_be32(pb);
+
+ if (size < 0)
+ size = 0x7fffffff;
+
+ return size;
+}
+
+/* Metadata string read */
+static void get_meta(ByteIOContext *pb, char * str, int strsize, int size)
+{
+ int res;
+
+ if (size > strsize-1)
+ res = get_buffer(pb, (uint8_t*)str, strsize-1);
+ else
+ res = get_buffer(pb, (uint8_t*)str, size);
+
+ if (res < 0)
+ return;
+
+ str[res] = 0;
+ if (size & 1)
+ size++;
+ size -= res;
+ if (size);
+ url_fskip(pb, size);
+}
+
+/* Returns the number of sound data frames or negative on error */
+static unsigned int get_aiff_header(ByteIOContext *pb, AVCodecContext *codec,
+ int size, unsigned version)
+{
+ AVExtFloat ext;
+ double sample_rate;
+ unsigned int num_frames;
+
+
+ if (size & 1)
+ size++;
+
+ codec->codec_type = CODEC_TYPE_AUDIO;
+ codec->channels = get_be16(pb);
+ num_frames = get_be32(pb);
+ codec->bits_per_sample = get_be16(pb);
+
+ get_buffer(pb, (uint8_t*)&ext, sizeof(ext));/* Sample rate is in */
+ sample_rate = av_ext2dbl(ext); /* 80 bits BE IEEE extended float */
+ codec->sample_rate = sample_rate;
+ size -= 18;
+
+ /* Got an AIFF-C? */
+ if (version == AIFF_C_VERSION1) {
+ codec->codec_tag = get_le32(pb);
+ codec->codec_id = codec_get_id (codec_aiff_tags, codec->codec_tag);
+
+ if (codec->codec_id == CODEC_ID_PCM_S16BE) {
+ codec->codec_id = aiff_codec_get_id (codec->bits_per_sample);
+ codec->bits_per_sample = av_get_bits_per_sample(codec->codec_id);
+ }
+
+ size -= 4;
+ } else {
+ /* Need the codec type */
+ codec->codec_id = aiff_codec_get_id (codec->bits_per_sample);
+ codec->bits_per_sample = av_get_bits_per_sample(codec->codec_id);
+ }
+
+ if (!codec->codec_id)
+ return AVERROR_INVALIDDATA;
+
+ /* Block align needs to be computed in all cases, as the definition
+ * is specific to applications -> here we use the WAVE format definition */
+ codec->block_align = (codec->bits_per_sample * codec->channels) >> 3;
+
+ codec->bit_rate = codec->sample_rate * (codec->block_align << 3);
+
+ /* Chunk is over */
+ if (size)
+ url_fseek(pb, size, SEEK_CUR);
+
+ return num_frames;
+}
+
+#ifdef CONFIG_MUXERS
+typedef struct {
+ offset_t form;
+ offset_t frames;
+ offset_t ssnd;
+} AIFFOutputContext;
+
+static int aiff_write_header(AVFormatContext *s)
+{
+ AIFFOutputContext *aiff = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc = s->streams[0]->codec;
+ AVExtFloat sample_rate;
+
+ /* First verify if format is ok */
+ enc->codec_tag = codec_get_tag(codec_aiff_tags, enc->codec_id);
+ if (!enc->codec_tag) {
+ av_free(aiff);
+ return -1;
+ }
+
+ /* FORM AIFF header */
+ put_tag(pb, "FORM");
+ aiff->form = url_ftell(pb);
+ put_be32(pb, 0); /* file length */
+ put_tag(pb, "AIFC");
+
+ /* Version chunk */
+ put_tag(pb, "FVER");
+ put_be32(pb, 4);
+ put_be32(pb, 0xA2805140);
+
+ /* Common chunk */
+ put_tag(pb, "COMM");
+ put_be32(pb, 24); /* size */
+ put_be16(pb, enc->channels); /* Number of channels */
+
+ aiff->frames = url_ftell(pb);
+ put_be32(pb, 0); /* Number of frames */
+
+ if (!enc->bits_per_sample)
+ enc->bits_per_sample = av_get_bits_per_sample(enc->codec_id);
+ if (!enc->bits_per_sample) {
+ av_log(s, AV_LOG_ERROR, "could not compute bits per sample\n");
+ return -1;
+ }
+ if (!enc->block_align)
+ enc->block_align = (enc->bits_per_sample * enc->channels) >> 3;
+
+ put_be16(pb, enc->bits_per_sample); /* Sample size */
+
+ sample_rate = av_dbl2ext((double)enc->sample_rate);
+ put_buffer(pb, (uint8_t*)&sample_rate, sizeof(sample_rate));
+
+ put_le32(pb, enc->codec_tag);
+ put_be16(pb, 0);
+
+ /* Sound data chunk */
+ put_tag(pb, "SSND");
+ aiff->ssnd = url_ftell(pb); /* Sound chunk size */
+ put_be32(pb, 0); /* Sound samples data size */
+ put_be32(pb, 0); /* Data offset */
+ put_be32(pb, 0); /* Block-size (block align) */
+
+ av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
+
+ /* Data is starting here */
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int aiff_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+static int aiff_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ AIFFOutputContext *aiff = s->priv_data;
+ AVCodecContext *enc = s->streams[0]->codec;
+
+ /* Chunks sizes must be even */
+ offset_t file_size, end_size;
+ end_size = file_size = url_ftell(pb);
+ if (file_size & 1) {
+ put_byte(pb, 0);
+ end_size++;
+ }
+
+ if (!url_is_streamed(&s->pb)) {
+ /* File length */
+ url_fseek(pb, aiff->form, SEEK_SET);
+ put_be32(pb, (uint32_t)(file_size - aiff->form - 4));
+
+ /* Number of sample frames */
+ url_fseek(pb, aiff->frames, SEEK_SET);
+ put_be32(pb, ((uint32_t)(file_size-aiff->ssnd-12))/enc->block_align);
+
+ /* Sound Data chunk size */
+ url_fseek(pb, aiff->ssnd, SEEK_SET);
+ put_be32(pb, (uint32_t)(file_size - aiff->ssnd - 4));
+
+ /* return to the end */
+ url_fseek(pb, end_size, SEEK_SET);
+
+ put_flush_packet(pb);
+ }
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+static int aiff_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size < 16)
+ return 0;
+ if (p->buf[0] == 'F' && p->buf[1] == 'O' &&
+ p->buf[2] == 'R' && p->buf[3] == 'M' &&
+ p->buf[8] == 'A' && p->buf[9] == 'I' &&
+ p->buf[10] == 'F' && (p->buf[11] == 'F' || p->buf[11] == 'C'))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* aiff input */
+static int aiff_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int size, filesize, offset;
+ uint32_t tag;
+ unsigned version = AIFF_C_VERSION1;
+ ByteIOContext *pb = &s->pb;
+ AVStream * st = s->streams[0];
+
+ /* check FORM header */
+ filesize = get_tag(pb, &tag);
+ if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M'))
+ return AVERROR_INVALIDDATA;
+
+ /* AIFF data type */
+ tag = get_le32(pb);
+ if (tag == MKTAG('A', 'I', 'F', 'F')) /* Got an AIFF file */
+ version = AIFF;
+ else if (tag != MKTAG('A', 'I', 'F', 'C')) /* An AIFF-C file then */
+ return AVERROR_INVALIDDATA;
+
+ filesize -= 4;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ while (filesize > 0) {
+ /* parse different chunks */
+ size = get_tag(pb, &tag);
+ if (size < 0)
+ return size;
+
+ filesize -= size + 8;
+
+ switch (tag) {
+ case MKTAG('C', 'O', 'M', 'M'): /* Common chunk */
+ /* Then for the complete header info */
+ st->nb_frames = get_aiff_header (pb, st->codec, size, version);
+ if (st->nb_frames < 0)
+ return st->nb_frames;
+ break;
+
+ case MKTAG('F', 'V', 'E', 'R'): /* Version chunk */
+ version = get_be32(pb);
+ break;
+
+ case MKTAG('N', 'A', 'M', 'E'): /* Sample name chunk */
+ get_meta (pb, s->title, sizeof(s->title), size);
+ break;
+
+ case MKTAG('A', 'U', 'T', 'H'): /* Author chunk */
+ get_meta (pb, s->author, sizeof(s->author), size);
+ break;
+
+ case MKTAG('(', 'c', ')', ' '): /* Copyright chunk */
+ get_meta (pb, s->copyright, sizeof(s->copyright), size);
+ break;
+
+ case MKTAG('A', 'N', 'N', 'O'): /* Annotation chunk */
+ get_meta (pb, s->comment, sizeof(s->comment), size);
+ break;
+
+ case MKTAG('S', 'S', 'N', 'D'): /* Sampled sound chunk */
+ get_be32(pb); /* Block align... don't care */
+ offset = get_be32(pb); /* Offset of sound data */
+ goto got_sound;
+
+ default: /* Jump */
+ if (size & 1) /* Always even aligned */
+ size++;
+ url_fskip (pb, size);
+ }
+ }
+
+ /* End of loop and didn't get sound */
+ return AVERROR_INVALIDDATA;
+
+got_sound:
+ /* Now positioned, get the sound data start and end */
+ if (st->nb_frames)
+ s->file_size = st->nb_frames * st->codec->block_align;
+
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+ st->start_time = 0;
+ st->duration = st->nb_frames;
+
+ /* Position the stream at the first block */
+ url_fskip(pb, offset);
+
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int aiff_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ AVStream *st = s->streams[0];
+ int res;
+
+ /* End of stream may be reached */
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+
+ /* Now for that packet */
+ res = av_get_packet(&s->pb, pkt, (MAX_SIZE / st->codec->block_align) * st->codec->block_align);
+ if (res < 0)
+ return res;
+
+ /* Only one stream in an AIFF file */
+ pkt->stream_index = 0;
+ return 0;
+}
+
+static int aiff_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int aiff_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ return pcm_read_seek(s, stream_index, timestamp, flags);
+}
+
+#ifdef CONFIG_AIFF_DEMUXER
+AVInputFormat aiff_demuxer = {
+ "aiff",
+ "Audio IFF",
+ 0,
+ aiff_probe,
+ aiff_read_header,
+ aiff_read_packet,
+ aiff_read_close,
+ aiff_read_seek,
+};
+#endif
+
+#ifdef CONFIG_AIFF_MUXER
+AVOutputFormat aiff_muxer = {
+ "aiff",
+ "Audio IFF",
+ "audio/aiff",
+ "aif,aiff,afc,aifc",
+ sizeof(AIFFOutputContext),
+ CODEC_ID_PCM_S16BE,
+ CODEC_ID_NONE,
+ aiff_write_header,
+ aiff_write_packet,
+ aiff_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/allformats.c b/contrib/ffmpeg/libavformat/allformats.c
new file mode 100644
index 000000000..f4b16adff
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/allformats.c
@@ -0,0 +1,182 @@
+/*
+ * Register all the formats and protocols
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+
+#define REGISTER_MUXER(X,x) \
+ if(ENABLE_##X##_MUXER) av_register_output_format(&x##_muxer)
+#define REGISTER_DEMUXER(X,x) \
+ if(ENABLE_##X##_DEMUXER) av_register_input_format(&x##_demuxer)
+#define REGISTER_MUXDEMUX(X,x) REGISTER_MUXER(X,x); REGISTER_DEMUXER(X,x)
+
+/* If you do not call this function, then you can select exactly which
+ formats you want to support */
+
+/**
+ * Initialize libavcodec and register all the codecs and formats.
+ */
+void av_register_all(void)
+{
+ static int inited = 0;
+
+ if (inited != 0)
+ return;
+ inited = 1;
+
+ avcodec_init();
+ avcodec_register_all();
+
+ REGISTER_DEMUXER (AAC, aac);
+ REGISTER_MUXDEMUX(AC3, ac3);
+ REGISTER_MUXER (ADTS, adts);
+ REGISTER_MUXDEMUX(AIFF, aiff);
+ REGISTER_MUXDEMUX(AMR, amr);
+ REGISTER_MUXDEMUX(ASF, asf);
+ REGISTER_MUXER (ASF_STREAM, asf_stream);
+ REGISTER_MUXDEMUX(AU, au);
+#if defined(CONFIG_AUDIO_OSS) || defined(CONFIG_AUDIO_BEOS)
+ REGISTER_MUXDEMUX(AUDIO, audio);
+#endif
+ REGISTER_MUXDEMUX(AVI, avi);
+#ifdef CONFIG_AVISYNTH
+ av_register_input_format(&avisynth_demuxer);
+#endif
+ REGISTER_DEMUXER (AVS, avs);
+ REGISTER_MUXER (CRC, crc);
+ REGISTER_DEMUXER (DAUD, daud);
+#ifdef CONFIG_DC1394
+ REGISTER_DEMUXER (DC1394, dc1394);
+#endif
+ REGISTER_DEMUXER (DSICIN, dsicin);
+ REGISTER_DEMUXER (DTS, dts);
+ REGISTER_MUXDEMUX(DV, dv);
+#ifdef CONFIG_DV1394
+ REGISTER_DEMUXER (DV1394, dv1394);
+#endif
+ REGISTER_DEMUXER (EA, ea);
+ REGISTER_MUXDEMUX(FFM, ffm);
+ REGISTER_MUXDEMUX(FLAC, flac);
+ REGISTER_DEMUXER (FLIC, flic);
+ REGISTER_MUXDEMUX(FLV, flv);
+ REGISTER_DEMUXER (FOURXM, fourxm);
+ REGISTER_MUXER (FRAMECRC, framecrc);
+ REGISTER_MUXDEMUX(GIF, gif);
+ REGISTER_DEMUXER (GXF, gxf);
+#ifdef CONFIG_GPL
+ REGISTER_MUXER (GXF, gxf);
+#endif
+ REGISTER_MUXDEMUX(H261, h261);
+ REGISTER_MUXDEMUX(H263, h263);
+ REGISTER_MUXDEMUX(H264, h264);
+ REGISTER_DEMUXER (IDCIN, idcin);
+ REGISTER_MUXDEMUX(IMAGE2, image2);
+ REGISTER_MUXDEMUX(IMAGE2PIPE, image2pipe);
+ REGISTER_DEMUXER (INGENIENT, ingenient);
+ REGISTER_DEMUXER (IPMOVIE, ipmovie);
+ REGISTER_MUXDEMUX(M4V, m4v);
+ REGISTER_DEMUXER (MATROSKA, matroska);
+ REGISTER_MUXDEMUX(MJPEG, mjpeg);
+ REGISTER_DEMUXER (MM, mm);
+ REGISTER_MUXDEMUX(MMF, mmf);
+ REGISTER_MUXDEMUX(MOV, mov);
+ REGISTER_MUXER (MP2, mp2);
+ REGISTER_MUXDEMUX(MP3, mp3);
+ REGISTER_MUXER (MP4, mp4);
+ REGISTER_MUXER (MPEG1SYSTEM, mpeg1system);
+ REGISTER_MUXER (MPEG1VCD, mpeg1vcd);
+ REGISTER_MUXER (MPEG1VIDEO, mpeg1video);
+ REGISTER_MUXER (MPEG2DVD, mpeg2dvd);
+ REGISTER_MUXER (MPEG2SVCD, mpeg2svcd);
+ REGISTER_MUXER (MPEG2VIDEO, mpeg2video);
+ REGISTER_MUXER (MPEG2VOB, mpeg2vob);
+ REGISTER_DEMUXER (MPEGPS, mpegps);
+ REGISTER_MUXDEMUX(MPEGTS, mpegts);
+ REGISTER_DEMUXER (MPEGVIDEO, mpegvideo);
+ REGISTER_MUXER (MPJPEG, mpjpeg);
+ REGISTER_DEMUXER (MTV, mtv);
+ REGISTER_DEMUXER (MXF, mxf);
+ REGISTER_DEMUXER (NSV, nsv);
+ REGISTER_MUXER (NULL, null);
+ REGISTER_DEMUXER (NUT, nut);
+#ifdef CONFIG_LIBNUT
+ REGISTER_MUXER (NUT, nut);
+#endif
+ REGISTER_DEMUXER (NUV, nuv);
+ REGISTER_DEMUXER (OGG, ogg);
+#ifdef CONFIG_LIBOGG
+ REGISTER_MUXER (OGG, ogg);
+#endif
+ REGISTER_MUXDEMUX(PCM_ALAW, pcm_alaw);
+ REGISTER_MUXDEMUX(PCM_MULAW, pcm_mulaw);
+ REGISTER_MUXDEMUX(PCM_S16BE, pcm_s16be);
+ REGISTER_MUXDEMUX(PCM_S16LE, pcm_s16le);
+ REGISTER_MUXDEMUX(PCM_S8, pcm_s8);
+ REGISTER_MUXDEMUX(PCM_U16BE, pcm_u16be);
+ REGISTER_MUXDEMUX(PCM_U16LE, pcm_u16le);
+ REGISTER_MUXDEMUX(PCM_U8, pcm_u8);
+ REGISTER_MUXER (PSP, psp);
+ REGISTER_MUXDEMUX(RAWVIDEO, rawvideo);
+ REGISTER_MUXDEMUX(RM, rm);
+ REGISTER_DEMUXER (ROQ, roq);
+#ifdef CONFIG_NETWORK
+ REGISTER_DEMUXER (REDIR, redir);
+ REGISTER_MUXER (RTP, rtp);
+ REGISTER_DEMUXER (RTSP, rtsp);
+ REGISTER_DEMUXER (SDP, sdp);
+ av_register_rtp_dynamic_payload_handlers();
+#endif
+ REGISTER_DEMUXER (SEGAFILM, segafilm);
+ REGISTER_DEMUXER (SHORTEN, shorten);
+ REGISTER_DEMUXER (SMACKER, smacker);
+ REGISTER_DEMUXER (SOL, sol);
+ REGISTER_DEMUXER (STR, str);
+ REGISTER_MUXDEMUX(SWF, swf);
+ REGISTER_MUXER (TG2, tg2);
+ REGISTER_MUXER (TGP, tgp);
+ REGISTER_DEMUXER (TIERTEXSEQ, tiertexseq);
+ REGISTER_DEMUXER (TTA, tta);
+#ifdef CONFIG_VIDEO4LINUX2
+ REGISTER_DEMUXER (V4L2, v4l2);
+#endif
+#if defined(CONFIG_VIDEO4LINUX) || defined(CONFIG_BKTR)
+ REGISTER_DEMUXER (VIDEO_GRAB_DEVICE, video_grab_device);
+#endif
+ REGISTER_DEMUXER (VMD, vmd);
+ REGISTER_MUXDEMUX(VOC, voc);
+ REGISTER_MUXDEMUX(WAV, wav);
+ REGISTER_DEMUXER (WC3, wc3);
+ REGISTER_DEMUXER (WSAUD, wsaud);
+ REGISTER_DEMUXER (WSVQA, wsvqa);
+ REGISTER_DEMUXER (WV, wv);
+ REGISTER_MUXDEMUX(YUV4MPEGPIPE, yuv4mpegpipe);
+
+#ifdef CONFIG_PROTOCOLS
+ /* file protocols */
+ register_protocol(&file_protocol);
+ register_protocol(&pipe_protocol);
+#ifdef CONFIG_NETWORK
+ register_protocol(&udp_protocol);
+ register_protocol(&rtp_protocol);
+ register_protocol(&tcp_protocol);
+ register_protocol(&http_protocol);
+#endif
+#endif
+}
diff --git a/contrib/ffmpeg/libavformat/allformats.h b/contrib/ffmpeg/libavformat/allformats.h
new file mode 100644
index 000000000..a138841c9
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/allformats.h
@@ -0,0 +1,176 @@
+/*
+ * Register all the formats and protocols.
+ * copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef ALLFORMATS_H
+#define ALLFORMATS_H
+
+extern AVInputFormat fourxm_demuxer;
+extern AVOutputFormat adts_muxer;
+extern AVInputFormat aiff_demuxer;
+extern AVOutputFormat aiff_muxer;
+extern AVInputFormat amr_demuxer;
+extern AVOutputFormat amr_muxer;
+extern AVInputFormat asf_demuxer;
+extern AVOutputFormat asf_muxer;
+extern AVOutputFormat asf_stream_muxer;
+extern AVInputFormat au_demuxer;
+extern AVOutputFormat au_muxer;
+extern AVInputFormat audio_demuxer;
+extern AVOutputFormat audio_muxer;
+extern AVInputFormat avi_demuxer;
+extern AVOutputFormat avi_muxer;
+extern AVInputFormat avisynth_demuxer;
+extern AVInputFormat avs_demuxer;
+extern AVOutputFormat crc_muxer;
+extern AVOutputFormat framecrc_muxer;
+extern AVInputFormat daud_demuxer;
+extern AVInputFormat dc1394_demuxer;
+extern AVInputFormat dsicin_demuxer;
+extern AVInputFormat dv1394_demuxer;
+extern AVInputFormat dv_demuxer;
+extern AVOutputFormat dv_muxer;
+extern AVInputFormat ea_demuxer;
+extern AVInputFormat ffm_demuxer;
+extern AVOutputFormat ffm_muxer;
+extern AVInputFormat flic_demuxer;
+extern AVInputFormat flv_demuxer;
+extern AVOutputFormat flv_muxer;
+extern AVOutputFormat gif_muxer;
+extern AVInputFormat gif_demuxer;
+extern AVInputFormat video_grab_device_demuxer;
+extern AVInputFormat gxf_demuxer;
+extern AVOutputFormat gxf_muxer;
+extern AVInputFormat idcin_demuxer;
+extern AVInputFormat roq_demuxer;
+extern AVInputFormat image2_demuxer;
+extern AVInputFormat image2pipe_demuxer;
+extern AVOutputFormat image2_muxer;
+extern AVOutputFormat image2pipe_muxer;
+extern AVInputFormat image_demuxer;
+extern AVInputFormat imagepipe_demuxer;
+extern AVOutputFormat image_muxer;
+extern AVOutputFormat imagepipe_muxer;
+extern AVInputFormat ipmovie_demuxer;
+extern AVInputFormat matroska_demuxer;
+extern AVInputFormat mm_demuxer;
+extern AVInputFormat mmf_demuxer;
+extern AVOutputFormat mmf_muxer;
+extern AVInputFormat mov_demuxer;
+extern AVOutputFormat mov_muxer;
+extern AVOutputFormat tgp_muxer;
+extern AVOutputFormat mp4_muxer;
+extern AVOutputFormat psp_muxer;
+extern AVOutputFormat tg2_muxer;
+extern AVInputFormat mp3_demuxer;
+extern AVOutputFormat mp2_muxer;
+extern AVOutputFormat mp3_muxer;
+extern AVOutputFormat mpeg1system_muxer;
+extern AVOutputFormat mpeg1vcd_muxer;
+extern AVOutputFormat mpeg2vob_muxer;
+extern AVOutputFormat mpeg2svcd_muxer;
+extern AVOutputFormat mpeg2dvd_muxer;
+extern AVInputFormat mpegps_demuxer;
+extern AVInputFormat mpegts_demuxer;
+extern AVOutputFormat mpegts_muxer;
+extern AVOutputFormat mpjpeg_muxer;
+extern AVInputFormat mtv_demuxer;
+extern AVInputFormat mxf_demuxer;
+extern AVInputFormat nsv_demuxer;
+extern AVInputFormat nut_demuxer;
+extern AVOutputFormat nut_muxer;
+extern AVInputFormat nuv_demuxer;
+extern AVInputFormat ogg_demuxer;
+extern AVOutputFormat ogg_muxer;
+extern AVInputFormat str_demuxer;
+extern AVInputFormat shorten_demuxer;
+extern AVInputFormat flac_demuxer;
+extern AVOutputFormat flac_muxer;
+extern AVInputFormat ac3_demuxer;
+extern AVOutputFormat ac3_muxer;
+extern AVInputFormat dts_demuxer;
+extern AVInputFormat aac_demuxer;
+extern AVInputFormat h261_demuxer;
+extern AVOutputFormat h261_muxer;
+extern AVInputFormat h263_demuxer;
+extern AVOutputFormat h263_muxer;
+extern AVInputFormat m4v_demuxer;
+extern AVOutputFormat m4v_muxer;
+extern AVInputFormat h264_demuxer;
+extern AVOutputFormat h264_muxer;
+extern AVInputFormat mpegvideo_demuxer;
+extern AVOutputFormat mpeg1video_muxer;
+extern AVOutputFormat mpeg2video_muxer;
+extern AVInputFormat mjpeg_demuxer;
+extern AVInputFormat ingenient_demuxer;
+extern AVOutputFormat mjpeg_muxer;
+extern AVInputFormat pcm_s16le_demuxer;
+extern AVOutputFormat pcm_s16le_muxer;
+extern AVInputFormat pcm_s16be_demuxer;
+extern AVOutputFormat pcm_s16be_muxer;
+extern AVInputFormat pcm_u16le_demuxer;
+extern AVOutputFormat pcm_u16le_muxer;
+extern AVInputFormat pcm_u16be_demuxer;
+extern AVOutputFormat pcm_u16be_muxer;
+extern AVInputFormat pcm_s8_demuxer;
+extern AVOutputFormat pcm_s8_muxer;
+extern AVInputFormat pcm_u8_demuxer;
+extern AVOutputFormat pcm_u8_muxer;
+extern AVInputFormat pcm_mulaw_demuxer;
+extern AVOutputFormat pcm_mulaw_muxer;
+extern AVInputFormat pcm_alaw_demuxer;
+extern AVOutputFormat pcm_alaw_muxer;
+extern AVInputFormat rawvideo_demuxer;
+extern AVOutputFormat rawvideo_muxer;
+extern AVOutputFormat null_muxer;
+extern AVInputFormat rm_demuxer;
+extern AVOutputFormat rm_muxer;
+extern AVInputFormat sdp_demuxer;
+extern AVInputFormat redir_demuxer;
+extern AVInputFormat segafilm_demuxer;
+extern AVInputFormat vmd_demuxer;
+extern AVInputFormat smacker_demuxer;
+extern AVInputFormat sol_demuxer;
+extern AVInputFormat swf_demuxer;
+extern AVOutputFormat swf_muxer;
+extern AVInputFormat tta_demuxer;
+extern AVInputFormat v4l2_demuxer;
+extern AVInputFormat voc_demuxer;
+extern AVOutputFormat voc_muxer;
+extern AVInputFormat wav_demuxer;
+extern AVOutputFormat wav_muxer;
+extern AVInputFormat wc3_demuxer;
+extern AVInputFormat wsaud_demuxer;
+extern AVInputFormat wsvqa_demuxer;
+extern AVInputFormat wv_demuxer;
+extern AVOutputFormat yuv4mpegpipe_muxer;
+extern AVInputFormat yuv4mpegpipe_demuxer;
+extern AVInputFormat tiertexseq_demuxer;
+
+/* raw.c */
+int pcm_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags);
+
+/* rtsp.c */
+int redir_open(AVFormatContext **ic_ptr, ByteIOContext *f);
+/* rtp.c */
+void av_register_rtp_dynamic_payload_handlers();
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/amr.c b/contrib/ffmpeg/libavformat/amr.c
new file mode 100644
index 000000000..635a898fa
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/amr.c
@@ -0,0 +1,201 @@
+/*
+ * amr file format
+ * Copyright (c) 2001 ffmpeg project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+Write and read amr data according to RFC3267, http://www.ietf.org/rfc/rfc3267.txt?number=3267
+
+Only mono files are supported.
+
+*/
+#include "avformat.h"
+
+static const char AMR_header [] = "#!AMR\n";
+static const char AMRWB_header [] = "#!AMR-WB\n";
+
+#ifdef CONFIG_MUXERS
+static int amr_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc = s->streams[0]->codec;
+
+ s->priv_data = NULL;
+
+ if (enc->codec_id == CODEC_ID_AMR_NB)
+ {
+ put_tag(pb, AMR_header); /* magic number */
+ }
+ else if(enc->codec_id == CODEC_ID_AMR_WB)
+ {
+ put_tag(pb, AMRWB_header); /* magic number */
+ }
+ else
+ {
+ return -1;
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int amr_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ put_buffer(&s->pb, pkt->data, pkt->size);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int amr_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+#endif /* CONFIG_MUXERS */
+
+static int amr_probe(AVProbeData *p)
+{
+ //Only check for "#!AMR" which could be amr-wb, amr-nb.
+ //This will also trigger multichannel files: "#!AMR_MC1.0\n" and
+ //"#!AMR-WB_MC1.0\n" (not supported)
+
+ if (p->buf_size < 5)
+ return 0;
+ if(memcmp(p->buf,AMR_header,5)==0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* amr input */
+static int amr_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ uint8_t header[9];
+
+ get_buffer(pb, header, 6);
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ {
+ return AVERROR_NOMEM;
+ }
+ if(memcmp(header,AMR_header,6)!=0)
+ {
+ get_buffer(pb, header+6, 3);
+ if(memcmp(header,AMRWB_header,9)!=0)
+ {
+ return -1;
+ }
+
+ st->codec->codec_tag = MKTAG('s', 'a', 'w', 'b');
+ st->codec->codec_id = CODEC_ID_AMR_WB;
+ st->codec->sample_rate = 16000;
+ }
+ else
+ {
+ st->codec->codec_tag = MKTAG('s', 'a', 'm', 'r');
+ st->codec->codec_id = CODEC_ID_AMR_NB;
+ st->codec->sample_rate = 8000;
+ }
+ st->codec->channels = 1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+
+ return 0;
+}
+
+static int amr_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ AVCodecContext *enc = s->streams[0]->codec;
+ int read, size, toc, mode;
+
+ if (url_feof(&s->pb))
+ {
+ return AVERROR_IO;
+ }
+
+//FIXME this is wrong, this should rather be in a AVParset
+ toc=get_byte(&s->pb);
+ mode = (toc >> 3) & 0x0F;
+
+ if (enc->codec_id == CODEC_ID_AMR_NB)
+ {
+ static const uint8_t packed_size[16] = {12, 13, 15, 17, 19, 20, 26, 31, 5, 0, 0, 0, 0, 0, 0, 0};
+
+ size=packed_size[mode]+1;
+ }
+ else if(enc->codec_id == CODEC_ID_AMR_WB)
+ {
+ static uint8_t packed_size[16] = {18, 24, 33, 37, 41, 47, 51, 59, 61, 6, 6, 0, 0, 0, 1, 1};
+
+ size=packed_size[mode];
+ }
+ else
+ {
+ assert(0);
+ }
+
+ if ( (size==0) || av_new_packet(pkt, size))
+ {
+ return AVERROR_IO;
+ }
+
+ pkt->stream_index = 0;
+ pkt->pos= url_ftell(&s->pb);
+ pkt->data[0]=toc;
+ pkt->duration= enc->codec_id == CODEC_ID_AMR_NB ? 160 : 320;
+ read = get_buffer(&s->pb, pkt->data+1, size-1);
+
+ if (read != size-1)
+ {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_AMR_DEMUXER
+AVInputFormat amr_demuxer = {
+ "amr",
+ "3gpp amr file format",
+ 0, /*priv_data_size*/
+ amr_probe,
+ amr_read_header,
+ amr_read_packet,
+ NULL,
+};
+#endif
+
+#ifdef CONFIG_AMR_MUXER
+AVOutputFormat amr_muxer = {
+ "amr",
+ "3gpp amr file format",
+ "audio/amr",
+ "amr",
+ 0,
+ CODEC_ID_AMR_NB,
+ CODEC_ID_NONE,
+ amr_write_header,
+ amr_write_packet,
+ amr_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/asf-enc.c b/contrib/ffmpeg/libavformat/asf-enc.c
new file mode 100644
index 000000000..3ef67507f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/asf-enc.c
@@ -0,0 +1,866 @@
+/*
+ * Adaptive stream format muxer
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+#include "asf.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+
+#define ASF_INDEXED_INTERVAL 10000000
+#define ASF_INDEX_BLOCK 600
+
+#define ASF_PACKET_ERROR_CORRECTION_DATA_SIZE 0x2
+#define ASF_PACKET_ERROR_CORRECTION_FLAGS (\
+ ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT | \
+ ASF_PACKET_ERROR_CORRECTION_DATA_SIZE\
+ )
+
+#if (ASF_PACKET_ERROR_CORRECTION_FLAGS != 0)
+# define ASF_PACKET_ERROR_CORRECTION_FLAGS_FIELD_SIZE 1
+#else
+# define ASF_PACKET_ERROR_CORRECTION_FLAGS_FIELD_SIZE 0
+#endif
+
+#define ASF_PPI_PROPERTY_FLAGS (\
+ ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE | \
+ ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD | \
+ ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE | \
+ ASF_PL_FLAG_STREAM_NUMBER_LENGTH_FIELD_IS_BYTE \
+ )
+
+#define ASF_PPI_LENGTH_TYPE_FLAGS 0
+
+#define ASF_PAYLOAD_FLAGS ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD
+
+#if (ASF_PPI_FLAG_SEQUENCE_FIELD_IS_BYTE == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE))
+# define ASF_PPI_SEQUENCE_FIELD_SIZE 1
+#endif
+#if (ASF_PPI_FLAG_SEQUENCE_FIELD_IS_WORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE))
+# define ASF_PPI_SEQUENCE_FIELD_SIZE 2
+#endif
+#if (ASF_PPI_FLAG_SEQUENCE_FIELD_IS_DWORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_SEQUENCE_FIELD_SIZE))
+# define ASF_PPI_SEQUENCE_FIELD_SIZE 4
+#endif
+#ifndef ASF_PPI_SEQUENCE_FIELD_SIZE
+# define ASF_PPI_SEQUENCE_FIELD_SIZE 0
+#endif
+
+
+#if (ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_BYTE == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 1
+#endif
+#if (ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_WORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 2
+#endif
+#if (ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_DWORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 4
+#endif
+#ifndef ASF_PPI_PACKET_LENGTH_FIELD_SIZE
+# define ASF_PPI_PACKET_LENGTH_FIELD_SIZE 0
+#endif
+
+#if (ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 1
+#endif
+#if (ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 2
+#endif
+#if (ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_DWORD == (ASF_PPI_LENGTH_TYPE_FLAGS & ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE))
+# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 4
+#endif
+#ifndef ASF_PPI_PADDING_LENGTH_FIELD_SIZE
+# define ASF_PPI_PADDING_LENGTH_FIELD_SIZE 0
+#endif
+
+#if (ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 1
+#endif
+#if (ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_WORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 2
+#endif
+#if (ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_DWORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 4
+#endif
+#ifndef ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE
+# define ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE 0
+#endif
+
+#if (ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_BYTE == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 1
+#endif
+#if (ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_WORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 2
+#endif
+#if (ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 4
+#endif
+#ifndef ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE
+# define ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE 0
+#endif
+
+#if (ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 1
+#endif
+#if (ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_WORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 2
+#endif
+#if (ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_DWORD == (ASF_PPI_PROPERTY_FLAGS & ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 4
+#endif
+#ifndef ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE
+# define ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE 0
+#endif
+
+#if (ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_BYTE == (ASF_PAYLOAD_FLAGS & ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_LENGTH_FIELD_SIZE 1
+#endif
+#if (ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD == (ASF_PAYLOAD_FLAGS & ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE))
+# define ASF_PAYLOAD_LENGTH_FIELD_SIZE 2
+#endif
+#ifndef ASF_PAYLOAD_LENGTH_FIELD_SIZE
+# define ASF_PAYLOAD_LENGTH_FIELD_SIZE 0
+#endif
+
+#define PACKET_HEADER_MIN_SIZE (\
+ ASF_PACKET_ERROR_CORRECTION_FLAGS_FIELD_SIZE + \
+ ASF_PACKET_ERROR_CORRECTION_DATA_SIZE + \
+ 1 + /*Length Type Flags*/ \
+ 1 + /*Property Flags*/ \
+ ASF_PPI_PACKET_LENGTH_FIELD_SIZE + \
+ ASF_PPI_SEQUENCE_FIELD_SIZE + \
+ ASF_PPI_PADDING_LENGTH_FIELD_SIZE + \
+ 4 + /*Send Time Field*/ \
+ 2 /*Duration Field*/ \
+ )
+
+
+// Replicated Data shall be at least 8 bytes long.
+#define ASF_PAYLOAD_REPLICATED_DATA_LENGTH 0x08
+
+#define PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD (\
+ 1 + /*Stream Number*/ \
+ ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE + \
+ ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE + \
+ ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE + \
+ ASF_PAYLOAD_REPLICATED_DATA_LENGTH \
+ )
+
+#define PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS (\
+ 1 + /*Stream Number*/ \
+ ASF_PAYLOAD_MEDIA_OBJECT_NUMBER_FIELD_SIZE + \
+ ASF_PAYLOAD_OFFSET_INTO_MEDIA_OBJECT_FIELD_SIZE + \
+ ASF_PAYLOAD_REPLICATED_DATA_LENGTH_FIELD_SIZE + \
+ ASF_PAYLOAD_REPLICATED_DATA_LENGTH + \
+ ASF_PAYLOAD_LENGTH_FIELD_SIZE \
+ )
+
+#define SINGLE_PAYLOAD_DATA_LENGTH (\
+ PACKET_SIZE - \
+ PACKET_HEADER_MIN_SIZE - \
+ PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD \
+ )
+
+#define MULTI_PAYLOAD_CONSTANT (\
+ PACKET_SIZE - \
+ PACKET_HEADER_MIN_SIZE - \
+ 1 - /*Payload Flags*/ \
+ 2*PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS \
+ )
+
+static int preroll_time = 2000;
+
+static const uint8_t error_spread_ADPCM_G726[] = { 0x01, 0x90, 0x01, 0x90, 0x01, 0x01, 0x00, 0x00 };
+
+static void put_guid(ByteIOContext *s, const GUID *g)
+{
+ int i;
+
+ put_le32(s, g->v1);
+ put_le16(s, g->v2);
+ put_le16(s, g->v3);
+ for(i=0;i<8;i++)
+ put_byte(s, g->v4[i]);
+}
+
+static void put_str16(ByteIOContext *s, const char *tag)
+{
+ int c;
+
+ put_le16(s,strlen(tag) + 1);
+ for(;;) {
+ c = (uint8_t)*tag++;
+ put_le16(s, c);
+ if (c == '\0')
+ break;
+ }
+}
+
+static void put_str16_nolen(ByteIOContext *s, const char *tag)
+{
+ int c;
+
+ for(;;) {
+ c = (uint8_t)*tag++;
+ put_le16(s, c);
+ if (c == '\0')
+ break;
+ }
+}
+
+static int64_t put_header(ByteIOContext *pb, const GUID *g)
+{
+ int64_t pos;
+
+ pos = url_ftell(pb);
+ put_guid(pb, g);
+ put_le64(pb, 24);
+ return pos;
+}
+
+/* update header size */
+static void end_header(ByteIOContext *pb, int64_t pos)
+{
+ int64_t pos1;
+
+ pos1 = url_ftell(pb);
+ url_fseek(pb, pos + 16, SEEK_SET);
+ put_le64(pb, pos1 - pos);
+ url_fseek(pb, pos1, SEEK_SET);
+}
+
+/* write an asf chunk (only used in streaming case) */
+static void put_chunk(AVFormatContext *s, int type, int payload_length, int flags)
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int length;
+
+ length = payload_length + 8;
+ put_le16(pb, type);
+ put_le16(pb, length); //size
+ put_le32(pb, asf->seqno);//sequence number
+ put_le16(pb, flags); /* unknown bytes */
+ put_le16(pb, length); //size_confirm
+ asf->seqno++;
+}
+
+/* convert from unix to windows time */
+static int64_t unix_to_file_time(int ti)
+{
+ int64_t t;
+
+ t = ti * int64_t_C(10000000);
+ t += int64_t_C(116444736000000000);
+ return t;
+}
+
+/* write the header (used two times if non streamed) */
+static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data_chunk_size)
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int header_size, n, extra_size, extra_size2, wav_extra_size, file_time;
+ int has_title;
+ AVCodecContext *enc;
+ int64_t header_offset, cur_pos, hpos;
+ int bit_rate;
+ int64_t duration;
+
+ duration = asf->duration + preroll_time * 10000;
+ has_title = (s->title[0] || s->author[0] || s->copyright[0] || s->comment[0]);
+
+ bit_rate = 0;
+ for(n=0;n<s->nb_streams;n++) {
+ enc = s->streams[n]->codec;
+
+ av_set_pts_info(s->streams[n], 32, 1, 1000); /* 32 bit pts in ms */
+
+ bit_rate += enc->bit_rate;
+ }
+
+ if (asf->is_streamed) {
+ put_chunk(s, 0x4824, 0, 0xc00); /* start of stream (length will be patched later) */
+ }
+
+ put_guid(pb, &asf_header);
+ put_le64(pb, -1); /* header length, will be patched after */
+ put_le32(pb, 3 + has_title + s->nb_streams); /* number of chunks in header */
+ put_byte(pb, 1); /* ??? */
+ put_byte(pb, 2); /* ??? */
+
+ /* file header */
+ header_offset = url_ftell(pb);
+ hpos = put_header(pb, &file_header);
+ put_guid(pb, &my_guid);
+ put_le64(pb, file_size);
+ file_time = 0;
+ put_le64(pb, unix_to_file_time(file_time));
+ put_le64(pb, asf->nb_packets); /* number of packets */
+ put_le64(pb, duration); /* end time stamp (in 100ns units) */
+ put_le64(pb, duration); /* duration (in 100ns units) */
+ put_le32(pb, preroll_time); /* start time stamp */
+ put_le32(pb, 0); /* ??? */
+ put_le32(pb, asf->is_streamed ? 1 : 0); /* ??? */
+ put_le32(pb, asf->packet_size); /* packet size */
+ put_le32(pb, asf->packet_size); /* packet size */
+ put_le32(pb, bit_rate); /* Nominal data rate in bps */
+ end_header(pb, hpos);
+
+ /* unknown headers */
+ hpos = put_header(pb, &head1_guid);
+ put_guid(pb, &head2_guid);
+ put_le32(pb, 6);
+ put_le16(pb, 0);
+ end_header(pb, hpos);
+
+ /* title and other infos */
+ if (has_title) {
+ hpos = put_header(pb, &comment_header);
+ if ( s->title[0] ) { put_le16(pb, 2 * (strlen(s->title ) + 1)); } else { put_le16(pb, 0); }
+ if ( s->author[0] ) { put_le16(pb, 2 * (strlen(s->author ) + 1)); } else { put_le16(pb, 0); }
+ if ( s->copyright[0] ) { put_le16(pb, 2 * (strlen(s->copyright) + 1)); } else { put_le16(pb, 0); }
+ if ( s->comment[0] ) { put_le16(pb, 2 * (strlen(s->comment ) + 1)); } else { put_le16(pb, 0); }
+ put_le16(pb, 0);
+ if ( s->title[0] ) put_str16_nolen(pb, s->title);
+ if ( s->author[0] ) put_str16_nolen(pb, s->author);
+ if ( s->copyright[0] ) put_str16_nolen(pb, s->copyright);
+ if ( s->comment[0] ) put_str16_nolen(pb, s->comment);
+ end_header(pb, hpos);
+ }
+
+ /* stream headers */
+ for(n=0;n<s->nb_streams;n++) {
+ int64_t es_pos;
+ const uint8_t *er_spr = NULL;
+ int er_spr_len = 0;
+ // ASFStream *stream = &asf->streams[n];
+
+ enc = s->streams[n]->codec;
+ asf->streams[n].num = n + 1;
+ asf->streams[n].seq = 0;
+
+
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ if (enc->codec_id == CODEC_ID_ADPCM_G726) {
+ er_spr = error_spread_ADPCM_G726;
+ er_spr_len = sizeof(error_spread_ADPCM_G726);
+ }
+ }
+
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ wav_extra_size = 0;
+ extra_size = 18 + wav_extra_size;
+ extra_size2 = er_spr_len;
+ break;
+ default:
+ case CODEC_TYPE_VIDEO:
+ wav_extra_size = enc->extradata_size;
+ extra_size = 0x33 + wav_extra_size;
+ extra_size2 = 0;
+ break;
+ }
+
+ hpos = put_header(pb, &stream_header);
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ put_guid(pb, &audio_stream);
+ if ((er_spr != NULL) && (er_spr_len != 0)) {
+ put_guid(pb, &audio_conceal_spread);
+ } else {
+ put_guid(pb, &video_conceal_none);
+ }
+ } else {
+ put_guid(pb, &video_stream);
+ put_guid(pb, &video_conceal_none);
+ }
+ put_le64(pb, 0); /* ??? */
+ es_pos = url_ftell(pb);
+ put_le32(pb, extra_size); /* wav header len */
+ put_le32(pb, extra_size2); /* additional data len */
+ put_le16(pb, n + 1); /* stream number */
+ put_le32(pb, 0); /* ??? */
+
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ /* WAVEFORMATEX header */
+ int wavsize = put_wav_header(pb, enc);
+ if ((enc->codec_id != CODEC_ID_MP3) && (enc->codec_id != CODEC_ID_MP2) && (enc->codec_id != CODEC_ID_ADPCM_IMA_WAV) && (enc->extradata_size==0)) {
+ wavsize += 2;
+ put_le16(pb, 0);
+ }
+
+ if (wavsize < 0)
+ return -1;
+ if (wavsize != extra_size) {
+ cur_pos = url_ftell(pb);
+ url_fseek(pb, es_pos, SEEK_SET);
+ put_le32(pb, wavsize); /* wav header len */
+ url_fseek(pb, cur_pos, SEEK_SET);
+ }
+ /* ERROR Correction */
+ if ((er_spr != NULL) && (er_spr_len != 0))
+ put_buffer(pb, er_spr, er_spr_len);
+ } else {
+ put_le32(pb, enc->width);
+ put_le32(pb, enc->height);
+ put_byte(pb, 2); /* ??? */
+ put_le16(pb, 40 + enc->extradata_size); /* size */
+
+ /* BITMAPINFOHEADER header */
+ put_bmp_header(pb, enc, codec_bmp_tags, 1);
+ }
+ end_header(pb, hpos);
+ }
+
+ /* media comments */
+
+ hpos = put_header(pb, &codec_comment_header);
+ put_guid(pb, &codec_comment1_header);
+ put_le32(pb, s->nb_streams);
+ for(n=0;n<s->nb_streams;n++) {
+ AVCodec *p;
+
+ enc = s->streams[n]->codec;
+ p = avcodec_find_encoder(enc->codec_id);
+
+ put_le16(pb, asf->streams[n].num);
+ put_str16(pb, p ? p->name : enc->codec_name);
+ put_le16(pb, 0); /* no parameters */
+
+
+ /* id */
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ put_le16(pb, 2);
+ if(!enc->codec_tag)
+ enc->codec_tag = codec_get_tag(codec_wav_tags, enc->codec_id);
+ if(!enc->codec_tag)
+ return -1;
+ put_le16(pb, enc->codec_tag);
+ } else {
+ put_le16(pb, 4);
+ if(!enc->codec_tag)
+ enc->codec_tag = codec_get_tag(codec_bmp_tags, enc->codec_id);
+ if(!enc->codec_tag)
+ return -1;
+ put_le32(pb, enc->codec_tag);
+ }
+ }
+ end_header(pb, hpos);
+
+ /* patch the header size fields */
+
+ cur_pos = url_ftell(pb);
+ header_size = cur_pos - header_offset;
+ if (asf->is_streamed) {
+ header_size += 8 + 30 + 50;
+
+ url_fseek(pb, header_offset - 10 - 30, SEEK_SET);
+ put_le16(pb, header_size);
+ url_fseek(pb, header_offset - 2 - 30, SEEK_SET);
+ put_le16(pb, header_size);
+
+ header_size -= 8 + 30 + 50;
+ }
+ header_size += 24 + 6;
+ url_fseek(pb, header_offset - 14, SEEK_SET);
+ put_le64(pb, header_size);
+ url_fseek(pb, cur_pos, SEEK_SET);
+
+ /* movie chunk, followed by packets of packet_size */
+ asf->data_offset = cur_pos;
+ put_guid(pb, &data_header);
+ put_le64(pb, data_chunk_size);
+ put_guid(pb, &my_guid);
+ put_le64(pb, asf->nb_packets); /* nb packets */
+ put_byte(pb, 1); /* ??? */
+ put_byte(pb, 1); /* ??? */
+ return 0;
+}
+
+static int asf_write_header(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+
+ asf->packet_size = PACKET_SIZE;
+ asf->nb_packets = 0;
+
+ asf->last_indexed_pts = 0;
+ asf->index_ptr = (ASFIndex*)av_malloc( sizeof(ASFIndex) * ASF_INDEX_BLOCK );
+ asf->nb_index_memory_alloc = ASF_INDEX_BLOCK;
+ asf->nb_index_count = 0;
+ asf->maximum_packet = 0;
+
+ if (asf_write_header1(s, 0, 50) < 0) {
+ //av_free(asf);
+ return -1;
+ }
+
+ put_flush_packet(&s->pb);
+
+ asf->packet_nb_payloads = 0;
+ asf->prev_packet_sent_time = 0;
+ asf->packet_timestamp_start = -1;
+ asf->packet_timestamp_end = -1;
+ init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1,
+ NULL, NULL, NULL, NULL);
+
+ return 0;
+}
+
+static int asf_write_stream_header(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+
+ asf->is_streamed = 1;
+
+ return asf_write_header(s);
+}
+
+static int put_payload_parsing_info(
+ AVFormatContext *s,
+ unsigned int sendtime,
+ unsigned int duration,
+ int nb_payloads,
+ int padsize
+ )
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ppi_size, i;
+ unsigned char *start_ppi_ptr = pb->buf_ptr;
+
+ int iLengthTypeFlags = ASF_PPI_LENGTH_TYPE_FLAGS;
+
+ put_byte(pb, ASF_PACKET_ERROR_CORRECTION_FLAGS);
+ for (i = 0; i < ASF_PACKET_ERROR_CORRECTION_DATA_SIZE; i++){
+ put_byte(pb, 0x0);
+ }
+
+ if (asf->multi_payloads_present)
+ iLengthTypeFlags |= ASF_PPI_FLAG_MULTIPLE_PAYLOADS_PRESENT;
+
+ if (padsize > 0) {
+ if (padsize < 256)
+ iLengthTypeFlags |= ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE;
+ else
+ iLengthTypeFlags |= ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD;
+ }
+ put_byte(pb, iLengthTypeFlags);
+
+ put_byte(pb, ASF_PPI_PROPERTY_FLAGS);
+
+ if (iLengthTypeFlags & ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD)
+ put_le16(pb, padsize - 2);
+ if (iLengthTypeFlags & ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE)
+ put_byte(pb, padsize - 1);
+
+ put_le32(pb, sendtime);
+ put_le16(pb, duration);
+ if (asf->multi_payloads_present)
+ put_byte(pb, nb_payloads | ASF_PAYLOAD_FLAGS);
+
+ ppi_size = pb->buf_ptr - start_ppi_ptr;
+
+ return ppi_size;
+}
+
+static void flush_packet(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+ int packet_hdr_size, packet_filled_size;
+
+ if (asf->is_streamed) {
+ put_chunk(s, 0x4424, asf->packet_size, 0);
+ }
+
+ packet_hdr_size = put_payload_parsing_info(
+ s,
+ asf->packet_timestamp_start,
+ asf->packet_timestamp_end - asf->packet_timestamp_start,
+ asf->packet_nb_payloads,
+ asf->packet_size_left
+ );
+
+ packet_filled_size = PACKET_SIZE - packet_hdr_size - asf->packet_size_left;
+ memset(asf->packet_buf + packet_filled_size, 0, asf->packet_size_left);
+
+ put_buffer(&s->pb, asf->packet_buf, asf->packet_size - packet_hdr_size);
+
+ put_flush_packet(&s->pb);
+ asf->nb_packets++;
+ asf->packet_nb_payloads = 0;
+ asf->prev_packet_sent_time = asf->packet_timestamp_start;
+ asf->packet_timestamp_start = -1;
+ asf->packet_timestamp_end = -1;
+ init_put_byte(&asf->pb, asf->packet_buf, asf->packet_size, 1,
+ NULL, NULL, NULL, NULL);
+}
+
+static void put_payload_header(
+ AVFormatContext *s,
+ ASFStream *stream,
+ int presentation_time,
+ int m_obj_size,
+ int m_obj_offset,
+ int payload_len,
+ int flags
+ )
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &asf->pb;
+ int val;
+
+ val = stream->num;
+ if (flags & PKT_FLAG_KEY)
+ val |= ASF_PL_FLAG_KEY_FRAME;
+ put_byte(pb, val);
+
+ put_byte(pb, stream->seq); //Media object number
+ put_le32(pb, m_obj_offset); //Offset Into Media Object
+
+ // Replicated Data shall be at least 8 bytes long.
+ // The first 4 bytes of data shall contain the
+ // Size of the Media Object that the payload belongs to.
+ // The next 4 bytes of data shall contain the
+ // Presentation Time for the media object that the payload belongs to.
+ put_byte(pb, ASF_PAYLOAD_REPLICATED_DATA_LENGTH);
+
+ put_le32(pb, m_obj_size); //Replicated Data - Media Object Size
+ put_le32(pb, presentation_time);//Replicated Data - Presentation Time
+
+ if (asf->multi_payloads_present){
+ put_le16(pb, payload_len); //payload length
+ }
+}
+
+static void put_frame(
+ AVFormatContext *s,
+ ASFStream *stream,
+ int timestamp,
+ const uint8_t *buf,
+ int m_obj_size,
+ int flags
+ )
+{
+ ASFContext *asf = s->priv_data;
+ int m_obj_offset, payload_len, frag_len1;
+
+ m_obj_offset = 0;
+ while (m_obj_offset < m_obj_size) {
+ payload_len = m_obj_size - m_obj_offset;
+ if (asf->packet_timestamp_start == -1) {
+ asf->multi_payloads_present = (payload_len < MULTI_PAYLOAD_CONSTANT);
+
+ if (asf->multi_payloads_present){
+ asf->packet_size_left = PACKET_SIZE; //For debug
+ asf->packet_size_left = PACKET_SIZE - PACKET_HEADER_MIN_SIZE - 1;
+ frag_len1 = MULTI_PAYLOAD_CONSTANT - 1;
+ }
+ else {
+ asf->packet_size_left = PACKET_SIZE - PACKET_HEADER_MIN_SIZE;
+ frag_len1 = SINGLE_PAYLOAD_DATA_LENGTH;
+ }
+ if (asf->prev_packet_sent_time > timestamp)
+ asf->packet_timestamp_start = asf->prev_packet_sent_time;
+ else
+ asf->packet_timestamp_start = timestamp;
+ }
+ else {
+ // multi payloads
+ frag_len1 = asf->packet_size_left - PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS;
+
+ if (asf->prev_packet_sent_time > timestamp)
+ asf->packet_timestamp_start = asf->prev_packet_sent_time;
+ else if (asf->packet_timestamp_start >= timestamp)
+ asf->packet_timestamp_start = timestamp;
+ }
+ if (frag_len1 > 0) {
+ if (payload_len > frag_len1)
+ payload_len = frag_len1;
+ else if (payload_len == (frag_len1 - 1))
+ payload_len = frag_len1 - 2; //additional byte need to put padding length
+
+ put_payload_header(s, stream, timestamp+preroll_time, m_obj_size, m_obj_offset, payload_len, flags);
+ put_buffer(&asf->pb, buf, payload_len);
+
+ if (asf->multi_payloads_present)
+ asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS);
+ else
+ asf->packet_size_left -= (payload_len + PAYLOAD_HEADER_SIZE_SINGLE_PAYLOAD);
+ asf->packet_timestamp_end = timestamp;
+
+ asf->packet_nb_payloads++;
+ } else {
+ payload_len = 0;
+ }
+ m_obj_offset += payload_len;
+ buf += payload_len;
+
+ if (!asf->multi_payloads_present)
+ flush_packet(s);
+ else if (asf->packet_size_left <= (PAYLOAD_HEADER_SIZE_MULTIPLE_PAYLOADS + 1))
+ flush_packet(s);
+ }
+ stream->seq++;
+}
+
+static int asf_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ASFContext *asf = s->priv_data;
+ ASFStream *stream;
+ int64_t duration;
+ AVCodecContext *codec;
+ int64_t packet_st,pts;
+ int start_sec,i;
+
+ codec = s->streams[pkt->stream_index]->codec;
+ stream = &asf->streams[pkt->stream_index];
+
+ //XXX /FIXME use duration from AVPacket (quick hack by)
+ pts = (pkt->pts != AV_NOPTS_VALUE) ? pkt->pts : pkt->dts;
+ if (pts == AV_NOPTS_VALUE) {
+ if (codec->codec_type == CODEC_TYPE_AUDIO) {
+ duration = (codec->frame_number * (int64_t)codec->frame_size * int64_t_C(10000000)) /
+ codec->sample_rate;
+ } else {
+ duration = av_rescale(codec->frame_number * (int64_t)codec->time_base.num, 10000000, codec->time_base.den);
+ }
+ } else {
+ duration = pts * 10000;
+ }
+ if (duration > asf->duration)
+ asf->duration = duration;
+
+ packet_st = asf->nb_packets;
+ put_frame(s, stream, pkt->pts, pkt->data, pkt->size, pkt->flags);
+
+ /* check index */
+ if ((!asf->is_streamed) && (codec->codec_type == CODEC_TYPE_VIDEO) && (pkt->flags & PKT_FLAG_KEY)) {
+ start_sec = (int)(duration / int64_t_C(10000000));
+ if (start_sec != (int)(asf->last_indexed_pts / int64_t_C(10000000))) {
+ for(i=asf->nb_index_count;i<start_sec;i++) {
+ if (i>=asf->nb_index_memory_alloc) {
+ asf->nb_index_memory_alloc += ASF_INDEX_BLOCK;
+ asf->index_ptr = (ASFIndex*)av_realloc( asf->index_ptr, sizeof(ASFIndex) * asf->nb_index_memory_alloc );
+ }
+ // store
+ asf->index_ptr[i].packet_number = (uint32_t)packet_st;
+ asf->index_ptr[i].packet_count = (uint16_t)(asf->nb_packets-packet_st);
+ if (asf->maximum_packet < (uint16_t)(asf->nb_packets-packet_st))
+ asf->maximum_packet = (uint16_t)(asf->nb_packets-packet_st);
+ }
+ asf->nb_index_count = start_sec;
+ asf->last_indexed_pts = duration;
+ }
+ }
+ return 0;
+}
+
+//
+static int asf_write_index(AVFormatContext *s, ASFIndex *index, uint16_t max, uint32_t count)
+{
+ ByteIOContext *pb = &s->pb;
+ int i;
+
+ put_guid(pb, &simple_index_header);
+ put_le64(pb, 24 + 16 + 8 + 4 + 4 + (4 + 2)*count);
+ put_guid(pb, &my_guid);
+ put_le64(pb, ASF_INDEXED_INTERVAL);
+ put_le32(pb, max);
+ put_le32(pb, count);
+ for(i=0; i<count; i++) {
+ put_le32(pb, index[i].packet_number);
+ put_le16(pb, index[i].packet_count);
+ }
+
+ return 0;
+}
+
+static int asf_write_trailer(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+ int64_t file_size,data_size;
+
+ /* flush the current packet */
+ if (asf->pb.buf_ptr > asf->pb.buffer)
+ flush_packet(s);
+
+ /* write index */
+ data_size = url_ftell(&s->pb);
+ if ((!asf->is_streamed) && (asf->nb_index_count != 0)) {
+ asf_write_index(s, asf->index_ptr, asf->maximum_packet, asf->nb_index_count);
+ }
+ put_flush_packet(&s->pb);
+
+ if (asf->is_streamed) {
+ put_chunk(s, 0x4524, 0, 0); /* end of stream */
+ } else {
+ /* rewrite an updated header */
+ file_size = url_ftell(&s->pb);
+ url_fseek(&s->pb, 0, SEEK_SET);
+ asf_write_header1(s, file_size, data_size - asf->data_offset);
+ }
+
+ put_flush_packet(&s->pb);
+ av_free(asf->index_ptr);
+ return 0;
+}
+
+#ifdef CONFIG_ASF_MUXER
+AVOutputFormat asf_muxer = {
+ "asf",
+ "asf format",
+ "video/x-ms-asf",
+ "asf,wmv,wma",
+ sizeof(ASFContext),
+#ifdef CONFIG_MP3LAME
+ CODEC_ID_MP3,
+#else
+ CODEC_ID_MP2,
+#endif
+ CODEC_ID_MSMPEG4V3,
+ asf_write_header,
+ asf_write_packet,
+ asf_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+
+#ifdef CONFIG_ASF_STREAM_MUXER
+AVOutputFormat asf_stream_muxer = {
+ "asf_stream",
+ "asf format",
+ "video/x-ms-asf",
+ "asf,wmv,wma",
+ sizeof(ASFContext),
+#ifdef CONFIG_MP3LAME
+ CODEC_ID_MP3,
+#else
+ CODEC_ID_MP2,
+#endif
+ CODEC_ID_MSMPEG4V3,
+ asf_write_stream_header,
+ asf_write_packet,
+ asf_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif //CONFIG_ASF_STREAM_MUXER
diff --git a/contrib/ffmpeg/libavformat/asf.c b/contrib/ffmpeg/libavformat/asf.c
new file mode 100644
index 000000000..f63e4b695
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/asf.c
@@ -0,0 +1,951 @@
+/*
+ * ASF compatible demuxer
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+#include "mpegaudio.h"
+#include "asf.h"
+#include "common.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#define FRAME_HEADER_SIZE 17
+// Fix Me! FRAME_HEADER_SIZE may be different.
+
+static const GUID index_guid = {
+ 0x33000890, 0xe5b1, 0x11cf, { 0x89, 0xf4, 0x00, 0xa0, 0xc9, 0x03, 0x49, 0xcb },
+};
+
+/**********************************/
+/* decoding */
+
+//#define DEBUG
+
+#ifdef DEBUG
+#define PRINT_IF_GUID(g,cmp) \
+if (!memcmp(g, &cmp, sizeof(GUID))) \
+ printf("(GUID: %s) ", #cmp)
+
+static void print_guid(const GUID *g)
+{
+ int i;
+ PRINT_IF_GUID(g, asf_header);
+ else PRINT_IF_GUID(g, file_header);
+ else PRINT_IF_GUID(g, stream_header);
+ else PRINT_IF_GUID(g, audio_stream);
+ else PRINT_IF_GUID(g, audio_conceal_none);
+ else PRINT_IF_GUID(g, video_stream);
+ else PRINT_IF_GUID(g, video_conceal_none);
+ else PRINT_IF_GUID(g, command_stream);
+ else PRINT_IF_GUID(g, comment_header);
+ else PRINT_IF_GUID(g, codec_comment_header);
+ else PRINT_IF_GUID(g, codec_comment1_header);
+ else PRINT_IF_GUID(g, data_header);
+ else PRINT_IF_GUID(g, index_guid);
+ else PRINT_IF_GUID(g, head1_guid);
+ else PRINT_IF_GUID(g, head2_guid);
+ else PRINT_IF_GUID(g, my_guid);
+ else PRINT_IF_GUID(g, ext_stream_header);
+ else PRINT_IF_GUID(g, extended_content_header);
+ else PRINT_IF_GUID(g, ext_stream_embed_stream_header);
+ else PRINT_IF_GUID(g, ext_stream_audio_stream);
+ else
+ printf("(GUID: unknown) ");
+ printf("0x%08x, 0x%04x, 0x%04x, {", g->v1, g->v2, g->v3);
+ for(i=0;i<8;i++)
+ printf(" 0x%02x,", g->v4[i]);
+ printf("}\n");
+}
+#undef PRINT_IF_GUID
+#endif
+
+static void get_guid(ByteIOContext *s, GUID *g)
+{
+ int i;
+
+ g->v1 = get_le32(s);
+ g->v2 = get_le16(s);
+ g->v3 = get_le16(s);
+ for(i=0;i<8;i++)
+ g->v4[i] = get_byte(s);
+}
+
+#if 0
+static void get_str16(ByteIOContext *pb, char *buf, int buf_size)
+{
+ int len, c;
+ char *q;
+
+ len = get_le16(pb);
+ q = buf;
+ while (len > 0) {
+ c = get_le16(pb);
+ if ((q - buf) < buf_size - 1)
+ *q++ = c;
+ len--;
+ }
+ *q = '\0';
+}
+#endif
+
+static void get_str16_nolen(ByteIOContext *pb, int len, char *buf, int buf_size)
+{
+ char* q = buf;
+ len /= 2;
+ while (len--) {
+ uint8_t tmp;
+ PUT_UTF8(get_le16(pb), tmp, if (q - buf < buf_size - 1) *q++ = tmp;)
+ }
+ *q = '\0';
+}
+
+static int asf_probe(AVProbeData *pd)
+{
+ GUID g;
+ const unsigned char *p;
+ int i;
+
+ /* check file header */
+ if (pd->buf_size <= 32)
+ return 0;
+ p = pd->buf;
+ g.v1 = p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
+ p += 4;
+ g.v2 = p[0] | (p[1] << 8);
+ p += 2;
+ g.v3 = p[0] | (p[1] << 8);
+ p += 2;
+ for(i=0;i<8;i++)
+ g.v4[i] = *p++;
+
+ if (!memcmp(&g, &asf_header, sizeof(GUID)))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int asf_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ ASFContext *asf = s->priv_data;
+ GUID g;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ ASFStream *asf_st;
+ int size, i;
+ int64_t gsize;
+
+ get_guid(pb, &g);
+ if (memcmp(&g, &asf_header, sizeof(GUID)))
+ goto fail;
+ get_le64(pb);
+ get_le32(pb);
+ get_byte(pb);
+ get_byte(pb);
+ memset(&asf->asfid2avid, -1, sizeof(asf->asfid2avid));
+ for(;;) {
+ get_guid(pb, &g);
+ gsize = get_le64(pb);
+#ifdef DEBUG
+ printf("%08"PRIx64": ", url_ftell(pb) - 24);
+ print_guid(&g);
+ printf(" size=0x%"PRIx64"\n", gsize);
+#endif
+ if (gsize < 24)
+ goto fail;
+ if (!memcmp(&g, &file_header, sizeof(GUID))) {
+ get_guid(pb, &asf->hdr.guid);
+ asf->hdr.file_size = get_le64(pb);
+ asf->hdr.create_time = get_le64(pb);
+ asf->hdr.packets_count = get_le64(pb);
+ asf->hdr.send_time = get_le64(pb);
+ asf->hdr.play_time = get_le64(pb);
+ asf->hdr.preroll = get_le32(pb);
+ asf->hdr.ignore = get_le32(pb);
+ asf->hdr.flags = get_le32(pb);
+ asf->hdr.min_pktsize = get_le32(pb);
+ asf->hdr.max_pktsize = get_le32(pb);
+ asf->hdr.max_bitrate = get_le32(pb);
+ asf->packet_size = asf->hdr.max_pktsize;
+ asf->nb_packets = asf->hdr.packets_count;
+ } else if (!memcmp(&g, &stream_header, sizeof(GUID))) {
+ int type, type_specific_size, sizeX;
+ uint64_t total_size;
+ unsigned int tag1;
+ int64_t pos1, pos2;
+ int test_for_ext_stream_audio;
+
+ pos1 = url_ftell(pb);
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ goto fail;
+ av_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
+ asf_st = av_mallocz(sizeof(ASFStream));
+ if (!asf_st)
+ goto fail;
+ st->priv_data = asf_st;
+ st->start_time = asf->hdr.preroll;
+ st->duration = asf->hdr.send_time /
+ (10000000 / 1000) - st->start_time;
+ get_guid(pb, &g);
+
+ test_for_ext_stream_audio = 0;
+ if (!memcmp(&g, &audio_stream, sizeof(GUID))) {
+ type = CODEC_TYPE_AUDIO;
+ } else if (!memcmp(&g, &video_stream, sizeof(GUID))) {
+ type = CODEC_TYPE_VIDEO;
+ } else if (!memcmp(&g, &command_stream, sizeof(GUID))) {
+ type = CODEC_TYPE_UNKNOWN;
+ } else if (!memcmp(&g, &ext_stream_embed_stream_header, sizeof(GUID))) {
+ test_for_ext_stream_audio = 1;
+ type = CODEC_TYPE_UNKNOWN;
+ } else {
+ goto fail;
+ }
+ get_guid(pb, &g);
+ total_size = get_le64(pb);
+ type_specific_size = get_le32(pb);
+ get_le32(pb);
+ st->id = get_le16(pb) & 0x7f; /* stream id */
+ // mapping of asf ID to AV stream ID;
+ asf->asfid2avid[st->id] = s->nb_streams - 1;
+
+ get_le32(pb);
+
+ if (test_for_ext_stream_audio) {
+ get_guid(pb, &g);
+ if (!memcmp(&g, &ext_stream_audio_stream, sizeof(GUID))) {
+ type = CODEC_TYPE_AUDIO;
+ get_guid(pb, &g);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_guid(pb, &g);
+ get_le32(pb);
+ }
+ }
+
+ st->codec->codec_type = type;
+ if (type == CODEC_TYPE_AUDIO) {
+ get_wav_header(pb, st->codec, type_specific_size);
+ st->need_parsing = 1;
+ /* We have to init the frame size at some point .... */
+ pos2 = url_ftell(pb);
+ if (gsize > (pos2 + 8 - pos1 + 24)) {
+ asf_st->ds_span = get_byte(pb);
+ asf_st->ds_packet_size = get_le16(pb);
+ asf_st->ds_chunk_size = get_le16(pb);
+ asf_st->ds_data_size = get_le16(pb);
+ asf_st->ds_silence_data = get_byte(pb);
+ }
+ //printf("Descrambling: ps:%d cs:%d ds:%d s:%d sd:%d\n",
+ // asf_st->ds_packet_size, asf_st->ds_chunk_size,
+ // asf_st->ds_data_size, asf_st->ds_span, asf_st->ds_silence_data);
+ if (asf_st->ds_span > 1) {
+ if (!asf_st->ds_chunk_size
+ || (asf_st->ds_packet_size/asf_st->ds_chunk_size <= 1))
+ asf_st->ds_span = 0; // disable descrambling
+ }
+ switch (st->codec->codec_id) {
+ case CODEC_ID_MP3:
+ st->codec->frame_size = MPA_FRAME_SIZE;
+ break;
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S16BE:
+ case CODEC_ID_PCM_U16LE:
+ case CODEC_ID_PCM_U16BE:
+ case CODEC_ID_PCM_S8:
+ case CODEC_ID_PCM_U8:
+ case CODEC_ID_PCM_ALAW:
+ case CODEC_ID_PCM_MULAW:
+ st->codec->frame_size = 1;
+ break;
+ default:
+ /* This is probably wrong, but it prevents a crash later */
+ st->codec->frame_size = 1;
+ break;
+ }
+ } else if (type == CODEC_TYPE_VIDEO) {
+ get_le32(pb);
+ get_le32(pb);
+ get_byte(pb);
+ size = get_le16(pb); /* size */
+ sizeX= get_le32(pb); /* size */
+ st->codec->width = get_le32(pb);
+ st->codec->height = get_le32(pb);
+ /* not available for asf */
+ get_le16(pb); /* panes */
+ st->codec->bits_per_sample = get_le16(pb); /* depth */
+ tag1 = get_le32(pb);
+ url_fskip(pb, 20);
+// av_log(NULL, AV_LOG_DEBUG, "size:%d tsize:%d sizeX:%d\n", size, total_size, sizeX);
+ size= sizeX;
+ if (size > 40) {
+ st->codec->extradata_size = size - 40;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
+ }
+
+ /* Extract palette from extradata if bpp <= 8 */
+ /* This code assumes that extradata contains only palette */
+ /* This is true for all paletted codecs implemented in ffmpeg */
+ if (st->codec->extradata_size && (st->codec->bits_per_sample <= 8)) {
+ st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
+#ifdef WORDS_BIGENDIAN
+ for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
+ st->codec->palctrl->palette[i] = bswap_32(((uint32_t*)st->codec->extradata)[i]);
+#else
+ memcpy(st->codec->palctrl->palette, st->codec->extradata,
+ FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
+#endif
+ st->codec->palctrl->palette_changed = 1;
+ }
+
+ st->codec->codec_tag = tag1;
+ st->codec->codec_id = codec_get_id(codec_bmp_tags, tag1);
+ if(tag1 == MKTAG('D', 'V', 'R', ' '))
+ st->need_parsing = 1;
+ }
+ pos2 = url_ftell(pb);
+ url_fskip(pb, gsize - (pos2 - pos1 + 24));
+ } else if (!memcmp(&g, &data_header, sizeof(GUID))) {
+ asf->data_object_offset = url_ftell(pb);
+ if (gsize != (uint64_t)-1 && gsize >= 24) {
+ asf->data_object_size = gsize - 24;
+ } else {
+ asf->data_object_size = (uint64_t)-1;
+ }
+ break;
+ } else if (!memcmp(&g, &comment_header, sizeof(GUID))) {
+ int len1, len2, len3, len4, len5;
+
+ len1 = get_le16(pb);
+ len2 = get_le16(pb);
+ len3 = get_le16(pb);
+ len4 = get_le16(pb);
+ len5 = get_le16(pb);
+ get_str16_nolen(pb, len1, s->title, sizeof(s->title));
+ get_str16_nolen(pb, len2, s->author, sizeof(s->author));
+ get_str16_nolen(pb, len3, s->copyright, sizeof(s->copyright));
+ get_str16_nolen(pb, len4, s->comment, sizeof(s->comment));
+ url_fskip(pb, len5);
+ } else if (!memcmp(&g, &extended_content_header, sizeof(GUID))) {
+ int desc_count, i;
+
+ desc_count = get_le16(pb);
+ for(i=0;i<desc_count;i++)
+ {
+ int name_len,value_type,value_len;
+ uint64_t value_num = 0;
+ char *name, *value;
+
+ name_len = get_le16(pb);
+ name = (char *)av_malloc(name_len * 2);
+ get_str16_nolen(pb, name_len, name, name_len * 2);
+ value_type = get_le16(pb);
+ value_len = get_le16(pb);
+ if ((value_type == 0) || (value_type == 1)) // unicode or byte
+ {
+ value = (char *)av_malloc(value_len * 2);
+ get_str16_nolen(pb, value_len, value,
+ value_len * 2);
+ if (strcmp(name,"WM/AlbumTitle")==0) { pstrcpy(s->album, sizeof(s->album), value); }
+ av_free(value);
+ }
+ if ((value_type >= 2) && (value_type <= 5)) // boolean or DWORD or QWORD or WORD
+ {
+ if (value_type==2) value_num = get_le32(pb);
+ if (value_type==3) value_num = get_le32(pb);
+ if (value_type==4) value_num = get_le64(pb);
+ if (value_type==5) value_num = get_le16(pb);
+ if (strcmp(name,"WM/Track")==0) s->track = value_num + 1;
+ if (strcmp(name,"WM/TrackNumber")==0) s->track = value_num;
+ }
+ av_free(name);
+ }
+ } else if (!memcmp(&g, &ext_stream_header, sizeof(GUID))) {
+ int ext_len, payload_ext_ct, stream_ct;
+ uint32_t ext_d;
+ int64_t pos_ex_st;
+ pos_ex_st = url_ftell(pb);
+
+ get_le64(pb);
+ get_le64(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le32(pb);
+ get_le16(pb);
+ get_le16(pb);
+ get_le64(pb);
+ stream_ct = get_le16(pb);
+ payload_ext_ct = get_le16(pb);
+
+ for (i=0; i<stream_ct; i++){
+ get_le16(pb);
+ ext_len = get_le16(pb);
+ url_fseek(pb, ext_len, SEEK_CUR);
+ }
+
+ for (i=0; i<payload_ext_ct; i++){
+ get_guid(pb, &g);
+ ext_d=get_le16(pb);
+ ext_len=get_le32(pb);
+ url_fseek(pb, ext_len, SEEK_CUR);
+ }
+
+ // there could be a optional stream properties object to follow
+ // if so the next iteration will pick it up
+ } else if (!memcmp(&g, &head1_guid, sizeof(GUID))) {
+ int v1, v2;
+ get_guid(pb, &g);
+ v1 = get_le32(pb);
+ v2 = get_le16(pb);
+#if 0
+ } else if (!memcmp(&g, &codec_comment_header, sizeof(GUID))) {
+ int len, v1, n, num;
+ char str[256], *q;
+ char tag[16];
+
+ get_guid(pb, &g);
+ print_guid(&g);
+
+ n = get_le32(pb);
+ for(i=0;i<n;i++) {
+ num = get_le16(pb); /* stream number */
+ get_str16(pb, str, sizeof(str));
+ get_str16(pb, str, sizeof(str));
+ len = get_le16(pb);
+ q = tag;
+ while (len > 0) {
+ v1 = get_byte(pb);
+ if ((q - tag) < sizeof(tag) - 1)
+ *q++ = v1;
+ len--;
+ }
+ *q = '\0';
+ }
+#endif
+ } else if (url_feof(pb)) {
+ goto fail;
+ } else {
+ url_fseek(pb, gsize - 24, SEEK_CUR);
+ }
+ }
+ get_guid(pb, &g);
+ get_le64(pb);
+ get_byte(pb);
+ get_byte(pb);
+ if (url_feof(pb))
+ goto fail;
+ asf->data_offset = url_ftell(pb);
+ asf->packet_size_left = 0;
+
+ return 0;
+
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st = s->streams[i];
+ if (st) {
+ av_free(st->priv_data);
+ av_free(st->codec->extradata);
+ }
+ av_free(st);
+ }
+ return -1;
+}
+
+#define DO_2BITS(bits, var, defval) \
+ switch (bits & 3) \
+ { \
+ case 3: var = get_le32(pb); rsize += 4; break; \
+ case 2: var = get_le16(pb); rsize += 2; break; \
+ case 1: var = get_byte(pb); rsize++; break; \
+ default: var = defval; break; \
+ }
+
+static int asf_get_packet(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t packet_length, padsize;
+ int rsize = 9;
+ int c;
+
+ assert((url_ftell(&s->pb) - s->data_offset) % asf->packet_size == 0);
+
+ c = get_byte(pb);
+ if (c != 0x82) {
+ if (!url_feof(pb))
+ av_log(s, AV_LOG_ERROR, "ff asf bad header %x at:%"PRId64"\n", c, url_ftell(pb));
+ }
+ if ((c & 0x0f) == 2) { // always true for now
+ if (get_le16(pb) != 0) {
+ if (!url_feof(pb))
+ av_log(s, AV_LOG_ERROR, "ff asf bad non zero\n");
+ return AVERROR_IO;
+ }
+ rsize+=2;
+/* }else{
+ if (!url_feof(pb))
+ printf("ff asf bad header %x at:%"PRId64"\n", c, url_ftell(pb));
+ return AVERROR_IO;*/
+ }
+
+ asf->packet_flags = get_byte(pb);
+ asf->packet_property = get_byte(pb);
+
+ DO_2BITS(asf->packet_flags >> 5, packet_length, asf->packet_size);
+ DO_2BITS(asf->packet_flags >> 1, padsize, 0); // sequence ignored
+ DO_2BITS(asf->packet_flags >> 3, padsize, 0); // padding length
+
+ //the following checks prevent overflows and infinite loops
+ if(packet_length >= (1U<<29)){
+ av_log(s, AV_LOG_ERROR, "invalid packet_length %d at:%"PRId64"\n", packet_length, url_ftell(pb));
+ return 0; // FIXME this should be -1
+ }
+ if(padsize >= (1U<<29)){
+ av_log(s, AV_LOG_ERROR, "invalid padsize %d at:%"PRId64"\n", padsize, url_ftell(pb));
+ return 0; // FIXME this should be -1
+ }
+
+ asf->packet_timestamp = get_le32(pb);
+ get_le16(pb); /* duration */
+ // rsize has at least 11 bytes which have to be present
+
+ if (asf->packet_flags & 0x01) {
+ asf->packet_segsizetype = get_byte(pb); rsize++;
+ asf->packet_segments = asf->packet_segsizetype & 0x3f;
+ } else {
+ asf->packet_segments = 1;
+ asf->packet_segsizetype = 0x80;
+ }
+ asf->packet_size_left = packet_length - padsize - rsize;
+ if (packet_length < asf->hdr.min_pktsize)
+ padsize += asf->hdr.min_pktsize - packet_length;
+ asf->packet_padsize = padsize;
+#ifdef DEBUG
+ printf("packet: size=%d padsize=%d left=%d\n", asf->packet_size, asf->packet_padsize, asf->packet_size_left);
+#endif
+ return 0;
+}
+
+static int asf_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ASFContext *asf = s->priv_data;
+ ASFStream *asf_st = 0;
+ ByteIOContext *pb = &s->pb;
+ //static int pc = 0;
+ for (;;) {
+ int rsize = 0;
+ if (asf->packet_size_left < FRAME_HEADER_SIZE
+ || asf->packet_segments < 1) {
+ //asf->packet_size_left <= asf->packet_padsize) {
+ int ret = asf->packet_size_left + asf->packet_padsize;
+ //printf("PacketLeftSize:%d Pad:%d Pos:%"PRId64"\n", asf->packet_size_left, asf->packet_padsize, url_ftell(pb));
+ if((url_ftell(&s->pb) + ret - s->data_offset) % asf->packet_size)
+ ret += asf->packet_size - ((url_ftell(&s->pb) + ret - s->data_offset) % asf->packet_size);
+ assert(ret>=0);
+ /* fail safe */
+ url_fskip(pb, ret);
+ asf->packet_pos= url_ftell(&s->pb);
+ if (asf->data_object_size != (uint64_t)-1 &&
+ (asf->packet_pos - asf->data_object_offset >= asf->data_object_size))
+ return AVERROR_IO; /* Do not exceed the size of the data object */
+ ret = asf_get_packet(s);
+ //printf("READ ASF PACKET %d r:%d c:%d\n", ret, asf->packet_size_left, pc++);
+ if (ret < 0 || url_feof(pb))
+ return AVERROR_IO;
+ asf->packet_time_start = 0;
+ continue;
+ }
+ if (asf->packet_time_start == 0) {
+ /* read frame header */
+ int num = get_byte(pb);
+ asf->packet_segments--;
+ rsize++;
+ asf->packet_key_frame = (num & 0x80) >> 7;
+ asf->stream_index = asf->asfid2avid[num & 0x7f];
+ // sequence should be ignored!
+ DO_2BITS(asf->packet_property >> 4, asf->packet_seq, 0);
+ DO_2BITS(asf->packet_property >> 2, asf->packet_frag_offset, 0);
+ DO_2BITS(asf->packet_property, asf->packet_replic_size, 0);
+//printf("key:%d stream:%d seq:%d offset:%d replic_size:%d\n", asf->packet_key_frame, asf->stream_index, asf->packet_seq, //asf->packet_frag_offset, asf->packet_replic_size);
+ if (asf->packet_replic_size > 1) {
+ assert(asf->packet_replic_size >= 8);
+ // it should be always at least 8 bytes - FIXME validate
+ asf->packet_obj_size = get_le32(pb);
+ asf->packet_frag_timestamp = get_le32(pb); // timestamp
+ if (asf->packet_replic_size > 8)
+ url_fskip(pb, asf->packet_replic_size - 8);
+ rsize += asf->packet_replic_size; // FIXME - check validity
+ } else if (asf->packet_replic_size==1){
+ // multipacket - frag_offset is begining timestamp
+ asf->packet_time_start = asf->packet_frag_offset;
+ asf->packet_frag_offset = 0;
+ asf->packet_frag_timestamp = asf->packet_timestamp;
+
+ asf->packet_time_delta = get_byte(pb);
+ rsize++;
+ }else{
+ assert(asf->packet_replic_size==0);
+ }
+ if (asf->packet_flags & 0x01) {
+ DO_2BITS(asf->packet_segsizetype >> 6, asf->packet_frag_size, 0); // 0 is illegal
+#undef DO_2BITS
+ //printf("Fragsize %d\n", asf->packet_frag_size);
+ } else {
+ asf->packet_frag_size = asf->packet_size_left - rsize;
+ //printf("Using rest %d %d %d\n", asf->packet_frag_size, asf->packet_size_left, rsize);
+ }
+ if (asf->packet_replic_size == 1) {
+ asf->packet_multi_size = asf->packet_frag_size;
+ if (asf->packet_multi_size > asf->packet_size_left) {
+ asf->packet_segments = 0;
+ continue;
+ }
+ }
+ asf->packet_size_left -= rsize;
+ //printf("___objsize____ %d %d rs:%d\n", asf->packet_obj_size, asf->packet_frag_offset, rsize);
+
+ if (asf->stream_index < 0
+ || s->streams[asf->stream_index]->discard >= AVDISCARD_ALL
+ || (!asf->packet_key_frame && s->streams[asf->stream_index]->discard >= AVDISCARD_NONKEY)
+ ) {
+ asf->packet_time_start = 0;
+ /* unhandled packet (should not happen) */
+ url_fskip(pb, asf->packet_frag_size);
+ asf->packet_size_left -= asf->packet_frag_size;
+ if(asf->stream_index < 0)
+ av_log(s, AV_LOG_ERROR, "ff asf skip %d %d\n", asf->packet_frag_size, num & 0x7f);
+ continue;
+ }
+ asf->asf_st = s->streams[asf->stream_index]->priv_data;
+ }
+ asf_st = asf->asf_st;
+
+ if ((asf->packet_frag_offset != asf_st->frag_offset
+ || (asf->packet_frag_offset
+ && asf->packet_seq != asf_st->seq)) // seq should be ignored
+ ) {
+ /* cannot continue current packet: free it */
+ // FIXME better check if packet was already allocated
+ av_log(s, AV_LOG_INFO, "ff asf parser skips: %d - %d o:%d - %d %d %d fl:%d\n",
+ asf_st->pkt.size,
+ asf->packet_obj_size,
+ asf->packet_frag_offset, asf_st->frag_offset,
+ asf->packet_seq, asf_st->seq, asf->packet_frag_size);
+ if (asf_st->pkt.size)
+ av_free_packet(&asf_st->pkt);
+ asf_st->frag_offset = 0;
+ if (asf->packet_frag_offset != 0) {
+ url_fskip(pb, asf->packet_frag_size);
+ av_log(s, AV_LOG_INFO, "ff asf parser skipping %db\n", asf->packet_frag_size);
+ asf->packet_size_left -= asf->packet_frag_size;
+ continue;
+ }
+ }
+ if (asf->packet_replic_size == 1) {
+ // frag_offset is here used as the begining timestamp
+ asf->packet_frag_timestamp = asf->packet_time_start;
+ asf->packet_time_start += asf->packet_time_delta;
+ asf->packet_obj_size = asf->packet_frag_size = get_byte(pb);
+ asf->packet_size_left--;
+ asf->packet_multi_size--;
+ if (asf->packet_multi_size < asf->packet_obj_size)
+ {
+ asf->packet_time_start = 0;
+ url_fskip(pb, asf->packet_multi_size);
+ asf->packet_size_left -= asf->packet_multi_size;
+ continue;
+ }
+ asf->packet_multi_size -= asf->packet_obj_size;
+ //printf("COMPRESS size %d %d %d ms:%d\n", asf->packet_obj_size, asf->packet_frag_timestamp, asf->packet_size_left, asf->packet_multi_size);
+ }
+ if (asf_st->frag_offset == 0) {
+ /* new packet */
+ av_new_packet(&asf_st->pkt, asf->packet_obj_size);
+ asf_st->seq = asf->packet_seq;
+ asf_st->pkt.pts = asf->packet_frag_timestamp;
+ asf_st->pkt.stream_index = asf->stream_index;
+ asf_st->pkt.pos =
+ asf_st->packet_pos= asf->packet_pos;
+//printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n",
+//asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & PKT_FLAG_KEY,
+//s->streams[asf->stream_index]->codec->codec_type == CODEC_TYPE_AUDIO, asf->packet_obj_size);
+ if (s->streams[asf->stream_index]->codec->codec_type == CODEC_TYPE_AUDIO)
+ asf->packet_key_frame = 1;
+ if (asf->packet_key_frame)
+ asf_st->pkt.flags |= PKT_FLAG_KEY;
+ }
+
+ /* read data */
+ //printf("READ PACKET s:%d os:%d o:%d,%d l:%d DATA:%p\n",
+ // asf->packet_size, asf_st->pkt.size, asf->packet_frag_offset,
+ // asf_st->frag_offset, asf->packet_frag_size, asf_st->pkt.data);
+ asf->packet_size_left -= asf->packet_frag_size;
+ if (asf->packet_size_left < 0)
+ continue;
+ get_buffer(pb, asf_st->pkt.data + asf->packet_frag_offset,
+ asf->packet_frag_size);
+ asf_st->frag_offset += asf->packet_frag_size;
+ /* test if whole packet is read */
+ if (asf_st->frag_offset == asf_st->pkt.size) {
+ /* return packet */
+ if (asf_st->ds_span > 1) {
+ /* packet descrambling */
+ uint8_t *newdata = av_malloc(asf_st->pkt.size);
+ if (newdata) {
+ int offset = 0;
+ while (offset < asf_st->pkt.size) {
+ int off = offset / asf_st->ds_chunk_size;
+ int row = off / asf_st->ds_span;
+ int col = off % asf_st->ds_span;
+ int idx = row + col * asf_st->ds_packet_size / asf_st->ds_chunk_size;
+ //printf("off:%d row:%d col:%d idx:%d\n", off, row, col, idx);
+ memcpy(newdata + offset,
+ asf_st->pkt.data + idx * asf_st->ds_chunk_size,
+ asf_st->ds_chunk_size);
+ offset += asf_st->ds_chunk_size;
+ }
+ av_free(asf_st->pkt.data);
+ asf_st->pkt.data = newdata;
+ }
+ }
+ asf_st->frag_offset = 0;
+ memcpy(pkt, &asf_st->pkt, sizeof(AVPacket));
+ //printf("packet %d %d\n", asf_st->pkt.size, asf->packet_frag_size);
+ asf_st->pkt.size = 0;
+ asf_st->pkt.data = 0;
+ break; // packet completed
+ }
+ }
+ return 0;
+}
+
+static int asf_read_close(AVFormatContext *s)
+{
+ int i;
+
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st = s->streams[i];
+ av_free(st->priv_data);
+ av_free(st->codec->palctrl);
+ }
+ return 0;
+}
+
+// Added to support seeking after packets have been read
+// If information is not reset, read_packet fails due to
+// leftover information from previous reads
+static void asf_reset_header(AVFormatContext *s)
+{
+ ASFContext *asf = s->priv_data;
+ ASFStream *asf_st;
+ int i;
+
+ asf->packet_nb_frames = 0;
+ asf->packet_timestamp_start = -1;
+ asf->packet_timestamp_end = -1;
+ asf->packet_size_left = 0;
+ asf->packet_segments = 0;
+ asf->packet_flags = 0;
+ asf->packet_property = 0;
+ asf->packet_timestamp = 0;
+ asf->packet_segsizetype = 0;
+ asf->packet_segments = 0;
+ asf->packet_seq = 0;
+ asf->packet_replic_size = 0;
+ asf->packet_key_frame = 0;
+ asf->packet_padsize = 0;
+ asf->packet_frag_offset = 0;
+ asf->packet_frag_size = 0;
+ asf->packet_frag_timestamp = 0;
+ asf->packet_multi_size = 0;
+ asf->packet_obj_size = 0;
+ asf->packet_time_delta = 0;
+ asf->packet_time_start = 0;
+
+ for(i=0; i<s->nb_streams; i++){
+ asf_st= s->streams[i]->priv_data;
+ av_free_packet(&asf_st->pkt);
+ asf_st->frag_offset=0;
+ asf_st->seq=0;
+ }
+ asf->asf_st= NULL;
+}
+
+static int64_t asf_read_pts(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit)
+{
+ ASFContext *asf = s->priv_data;
+ AVPacket pkt1, *pkt = &pkt1;
+ ASFStream *asf_st;
+ int64_t pts;
+ int64_t pos= *ppos;
+ int i;
+ int64_t start_pos[s->nb_streams];
+
+ for(i=0; i<s->nb_streams; i++){
+ start_pos[i]= pos;
+ }
+
+ pos= (pos+asf->packet_size-1-s->data_offset)/asf->packet_size*asf->packet_size+ s->data_offset;
+ *ppos= pos;
+ url_fseek(&s->pb, pos, SEEK_SET);
+
+//printf("asf_read_pts\n");
+ asf_reset_header(s);
+ for(;;){
+ if (av_read_frame(s, pkt) < 0){
+ av_log(s, AV_LOG_INFO, "seek failed\n");
+ return AV_NOPTS_VALUE;
+ }
+
+ pts= pkt->pts;
+
+ av_free_packet(pkt);
+ if(pkt->flags&PKT_FLAG_KEY){
+ i= pkt->stream_index;
+
+ asf_st= s->streams[i]->priv_data;
+
+ assert((asf_st->packet_pos - s->data_offset) % asf->packet_size == 0);
+ pos= asf_st->packet_pos;
+
+ av_add_index_entry(s->streams[i], pos, pts, pkt->size, pos - start_pos[i] + 1, AVINDEX_KEYFRAME);
+ start_pos[i]= asf_st->packet_pos + 1;
+
+ if(pkt->stream_index == stream_index)
+ break;
+ }
+ }
+
+ *ppos= pos;
+//printf("found keyframe at %"PRId64" stream %d stamp:%"PRId64"\n", *ppos, stream_index, pts);
+
+ return pts;
+}
+
+static void asf_build_simple_index(AVFormatContext *s, int stream_index)
+{
+ GUID g;
+ ASFContext *asf = s->priv_data;
+ int64_t gsize, itime;
+ int64_t pos, current_pos, index_pts;
+ int i;
+ int pct,ict;
+
+ current_pos = url_ftell(&s->pb);
+
+ url_fseek(&s->pb, asf->data_object_offset + asf->data_object_size, SEEK_SET);
+ get_guid(&s->pb, &g);
+ if (!memcmp(&g, &index_guid, sizeof(GUID))) {
+ gsize = get_le64(&s->pb);
+ get_guid(&s->pb, &g);
+ itime=get_le64(&s->pb);
+ pct=get_le32(&s->pb);
+ ict=get_le32(&s->pb);
+ av_log(NULL, AV_LOG_DEBUG, "itime:0x%"PRIx64", pct:%d, ict:%d\n",itime,pct,ict);
+
+ for (i=0;i<ict;i++){
+ int pktnum=get_le32(&s->pb);
+ int pktct =get_le16(&s->pb);
+ av_log(NULL, AV_LOG_DEBUG, "pktnum:%d, pktct:%d\n", pktnum, pktct);
+
+ pos=s->data_offset + asf->packet_size*(int64_t)pktnum;
+ index_pts=av_rescale(itime, i, 10000);
+
+ av_add_index_entry(s->streams[stream_index], pos, index_pts, asf->packet_size, 0, AVINDEX_KEYFRAME);
+ }
+ asf->index_read= 1;
+ }
+ url_fseek(&s->pb, current_pos, SEEK_SET);
+}
+
+static int asf_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags)
+{
+ ASFContext *asf = s->priv_data;
+ AVStream *st = s->streams[stream_index];
+ int64_t pos;
+ int index;
+
+ if (asf->packet_size <= 0)
+ return -1;
+
+ if (!asf->index_read)
+ asf_build_simple_index(s, stream_index);
+
+ if(!(asf->index_read && st->index_entries)){
+ if(av_seek_frame_binary(s, stream_index, pts, flags)<0)
+ return -1;
+ }else{
+ index= av_index_search_timestamp(st, pts, flags);
+ if(index<0)
+ return -1;
+
+ /* find the position */
+ pos = st->index_entries[index].pos;
+ pts = st->index_entries[index].timestamp;
+
+ // various attempts to find key frame have failed so far
+ // asf_reset_header(s);
+ // url_fseek(&s->pb, pos, SEEK_SET);
+ // key_pos = pos;
+ // for(i=0;i<16;i++){
+ // pos = url_ftell(&s->pb);
+ // if (av_read_frame(s, &pkt) < 0){
+ // av_log(s, AV_LOG_INFO, "seek failed\n");
+ // return -1;
+ // }
+ // asf_st = s->streams[stream_index]->priv_data;
+ // pos += st->parser->frame_offset;
+ //
+ // if (pkt.size > b) {
+ // b = pkt.size;
+ // key_pos = pos;
+ // }
+ //
+ // av_free_packet(&pkt);
+ // }
+
+ /* do the seek */
+ av_log(NULL, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos);
+ url_fseek(&s->pb, pos, SEEK_SET);
+ }
+ asf_reset_header(s);
+ return 0;
+}
+
+AVInputFormat asf_demuxer = {
+ "asf",
+ "asf format",
+ sizeof(ASFContext),
+ asf_probe,
+ asf_read_header,
+ asf_read_packet,
+ asf_read_close,
+ asf_read_seek,
+ asf_read_pts,
+};
diff --git a/contrib/ffmpeg/libavformat/asf.h b/contrib/ffmpeg/libavformat/asf.h
new file mode 100644
index 000000000..bbe88801a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/asf.h
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#define PACKET_SIZE 3200
+
+typedef struct {
+ int num;
+ unsigned char seq;
+ /* use for reading */
+ AVPacket pkt;
+ int frag_offset;
+ int timestamp;
+ int64_t duration;
+
+ int ds_span; /* descrambling */
+ int ds_packet_size;
+ int ds_chunk_size;
+ int ds_data_size;
+ int ds_silence_data;
+
+ int64_t packet_pos;
+
+} ASFStream;
+
+typedef struct {
+ uint32_t v1;
+ uint16_t v2;
+ uint16_t v3;
+ uint8_t v4[8];
+} GUID;
+
+typedef struct {
+ GUID guid; // generated by client computer
+ uint64_t file_size; // in bytes
+ // invalid if broadcasting
+ uint64_t create_time; // time of creation, in 100-nanosecond units since 1.1.1601
+ // invalid if broadcasting
+ uint64_t packets_count; // how many packets are there in the file
+ // invalid if broadcasting
+ uint64_t play_time; // play time, in 100-nanosecond units
+ // invalid if broadcasting
+ uint64_t send_time; // time to send file, in 100-nanosecond units
+ // invalid if broadcasting (could be ignored)
+ uint32_t preroll; // timestamp of the first packet, in milliseconds
+ // if nonzero - substract from time
+ uint32_t ignore; // preroll is 64bit - but let's just ignore it
+ uint32_t flags; // 0x01 - broadcast
+ // 0x02 - seekable
+ // rest is reserved should be 0
+ uint32_t min_pktsize; // size of a data packet
+ // invalid if broadcasting
+ uint32_t max_pktsize; // shall be the same as for min_pktsize
+ // invalid if broadcasting
+ uint32_t max_bitrate; // bandwith of stream in bps
+ // should be the sum of bitrates of the
+ // individual media streams
+} ASFMainHeader;
+
+
+typedef struct {
+ uint32_t packet_number;
+ uint16_t packet_count;
+} ASFIndex;
+
+
+typedef struct {
+ uint32_t seqno;
+ unsigned int packet_size;
+ int is_streamed;
+ int asfid2avid[128]; /* conversion table from asf ID 2 AVStream ID */
+ ASFStream streams[128]; /* it's max number and it's not that big */
+ /* non streamed additonnal info */
+ int64_t nb_packets;
+ int64_t duration; /* in 100ns units */
+ /* packet filling */
+ unsigned char multi_payloads_present;
+ int packet_size_left;
+ int prev_packet_sent_time;
+ int packet_timestamp_start;
+ int packet_timestamp_end;
+ unsigned int packet_nb_payloads;
+ int packet_nb_frames;
+ uint8_t packet_buf[PACKET_SIZE];
+ ByteIOContext pb;
+ /* only for reading */
+ uint64_t data_offset; /* begining of the first data packet */
+ uint64_t data_object_offset; /* data object offset (excl. GUID & size)*/
+ uint64_t data_object_size; /* size of the data object */
+ int index_read;
+
+ ASFMainHeader hdr;
+
+ int packet_flags;
+ int packet_property;
+ int packet_timestamp;
+ int packet_segsizetype;
+ int packet_segments;
+ int packet_seq;
+ int packet_replic_size;
+ int packet_key_frame;
+ int packet_padsize;
+ int packet_frag_offset;
+ int packet_frag_size;
+ int packet_frag_timestamp;
+ int packet_multi_size;
+ int packet_obj_size;
+ int packet_time_delta;
+ int packet_time_start;
+ int64_t packet_pos;
+
+ int stream_index;
+
+
+ int64_t last_indexed_pts;
+ ASFIndex* index_ptr;
+ uint32_t nb_index_count;
+ uint32_t nb_index_memory_alloc;
+ uint16_t maximum_packet;
+
+ ASFStream* asf_st; /* currently decoded stream */
+} ASFContext;
+
+static const GUID asf_header = {
+ 0x75B22630, 0x668E, 0x11CF, { 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C },
+};
+
+static const GUID file_header = {
+ 0x8CABDCA1, 0xA947, 0x11CF, { 0x8E, 0xE4, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 },
+};
+
+static const GUID stream_header = {
+ 0xB7DC0791, 0xA9B7, 0x11CF, { 0x8E, 0xE6, 0x00, 0xC0, 0x0C, 0x20, 0x53, 0x65 },
+};
+
+static const GUID ext_stream_header = {
+ 0x14E6A5CB, 0xC672, 0x4332, { 0x83, 0x99, 0xA9, 0x69, 0x52, 0x06, 0x5B, 0x5A },
+};
+
+static const GUID audio_stream = {
+ 0xF8699E40, 0x5B4D, 0x11CF, { 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B },
+};
+
+static const GUID audio_conceal_none = {
+ // 0x49f1a440, 0x4ece, 0x11d0, { 0xa3, 0xac, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6 },
+ // New value lifted from avifile
+ 0x20fb5700, 0x5b55, 0x11cf, { 0xa8, 0xfd, 0x00, 0x80, 0x5f, 0x5c, 0x44, 0x2b },
+};
+
+static const GUID audio_conceal_spread = {
+ 0xBFC3CD50, 0x618F, 0x11CF, { 0x8B, 0xB2, 0x00, 0xAA, 0x00, 0xB4, 0xE2, 0x20 },
+};
+
+static const GUID video_stream = {
+ 0xBC19EFC0, 0x5B4D, 0x11CF, { 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B },
+};
+
+static const GUID video_conceal_none = {
+ 0x20FB5700, 0x5B55, 0x11CF, { 0xA8, 0xFD, 0x00, 0x80, 0x5F, 0x5C, 0x44, 0x2B },
+};
+
+static const GUID command_stream = {
+ 0x59DACFC0, 0x59E6, 0x11D0, { 0xA3, 0xAC, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6 },
+};
+
+static const GUID comment_header = {
+ 0x75b22633, 0x668e, 0x11cf, { 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c },
+};
+
+static const GUID codec_comment_header = {
+ 0x86D15240, 0x311D, 0x11D0, { 0xA3, 0xA4, 0x00, 0xA0, 0xC9, 0x03, 0x48, 0xF6 },
+};
+static const GUID codec_comment1_header = {
+ 0x86d15241, 0x311d, 0x11d0, { 0xa3, 0xa4, 0x00, 0xa0, 0xc9, 0x03, 0x48, 0xf6 },
+};
+
+static const GUID data_header = {
+ 0x75b22636, 0x668e, 0x11cf, { 0xa6, 0xd9, 0x00, 0xaa, 0x00, 0x62, 0xce, 0x6c },
+};
+
+static const GUID head1_guid = {
+ 0x5fbf03b5, 0xa92e, 0x11cf, { 0x8e, 0xe3, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65 },
+};
+
+static const GUID head2_guid = {
+ 0xabd3d211, 0xa9ba, 0x11cf, { 0x8e, 0xe6, 0x00, 0xc0, 0x0c, 0x20, 0x53, 0x65 },
+};
+
+static const GUID extended_content_header = {
+ 0xD2D0A440, 0xE307, 0x11D2, { 0x97, 0xF0, 0x00, 0xA0, 0xC9, 0x5E, 0xA8, 0x50 },
+};
+
+static const GUID simple_index_header = {
+ 0x33000890, 0xE5B1, 0x11CF, { 0x89, 0xF4, 0x00, 0xA0, 0xC9, 0x03, 0x49, 0xCB },
+};
+
+static const GUID ext_stream_embed_stream_header = {
+ 0x3afb65e2, 0x47ef, 0x40f2, { 0xac, 0x2c, 0x70, 0xa9, 0x0d, 0x71, 0xd3, 0x43}
+};
+
+static const GUID ext_stream_audio_stream = {
+ 0x31178c9d, 0x03e1, 0x4528, { 0xb5, 0x82, 0x3d, 0xf9, 0xdb, 0x22, 0xf5, 0x03}
+};
+
+/* I am not a number !!! This GUID is the one found on the PC used to
+ generate the stream */
+static const GUID my_guid = {
+ 0, 0, 0, { 0, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+#define ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT 0x80 //1000 0000
+
+
+// ASF data packet structure
+// =========================
+//
+//
+// -----------------------------------
+// | Error Correction Data | Optional
+// -----------------------------------
+// | Payload Parsing Information (PPI) |
+// -----------------------------------
+// | Payload Data |
+// -----------------------------------
+// | Padding Data |
+// -----------------------------------
+
+
+// PPI_FLAG - Payload parsing information flags
+#define ASF_PPI_FLAG_MULTIPLE_PAYLOADS_PRESENT 1
+
+#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_BYTE 0x02 //0000 0010
+#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_WORD 0x04 //0000 0100
+#define ASF_PPI_FLAG_SEQUENCE_FIELD_IS_DWORD 0x06 //0000 0110
+#define ASF_PPI_MASK_SEQUENCE_FIELD_SIZE 0x06 //0000 0110
+
+#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_BYTE 0x08 //0000 1000
+#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_WORD 0x10 //0001 0000
+#define ASF_PPI_FLAG_PADDING_LENGTH_FIELD_IS_DWORD 0x18 //0001 1000
+#define ASF_PPI_MASK_PADDING_LENGTH_FIELD_SIZE 0x18 //0001 1000
+
+#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_BYTE 0x20 //0010 0000
+#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_WORD 0x40 //0100 0000
+#define ASF_PPI_FLAG_PACKET_LENGTH_FIELD_IS_DWORD 0x60 //0110 0000
+#define ASF_PPI_MASK_PACKET_LENGTH_FIELD_SIZE 0x60 //0110 0000
+
+// PL_FLAG - Payload flags
+#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_BYTE 0x01 //0000 0001
+#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_WORD 0x02 //0000 0010
+#define ASF_PL_FLAG_REPLICATED_DATA_LENGTH_FIELD_IS_DWORD 0x03 //0000 0011
+#define ASF_PL_MASK_REPLICATED_DATA_LENGTH_FIELD_SIZE 0x03 //0000 0011
+
+#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_BYTE 0x04 //0000 0100
+#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_WORD 0x08 //0000 1000
+#define ASF_PL_FLAG_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_IS_DWORD 0x0c //0000 1100
+#define ASF_PL_MASK_OFFSET_INTO_MEDIA_OBJECT_LENGTH_FIELD_SIZE 0x0c //0000 1100
+
+#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_BYTE 0x10 //0001 0000
+#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_WORD 0x20 //0010 0000
+#define ASF_PL_FLAG_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_IS_DWORD 0x30 //0011 0000
+#define ASF_PL_MASK_MEDIA_OBJECT_NUMBER_LENGTH_FIELD_SIZE 0x30 //0011 0000
+
+#define ASF_PL_FLAG_STREAM_NUMBER_LENGTH_FIELD_IS_BYTE 0x40 //0100 0000
+#define ASF_PL_MASK_STREAM_NUMBER_LENGTH_FIELD_SIZE 0xc0 //1100 0000
+
+#define ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_BYTE 0x40 //0100 0000
+#define ASF_PL_FLAG_PAYLOAD_LENGTH_FIELD_IS_WORD 0x80 //1000 0000
+#define ASF_PL_MASK_PAYLOAD_LENGTH_FIELD_SIZE 0xc0 //1100 0000
+
+#define ASF_PL_FLAG_KEY_FRAME 0x80 //1000 0000
diff --git a/contrib/ffmpeg/libavformat/au.c b/contrib/ffmpeg/libavformat/au.c
new file mode 100644
index 000000000..27c7cdc85
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/au.c
@@ -0,0 +1,209 @@
+/*
+ * AU muxer and demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * First version by Francois Revol revol@free.fr
+ *
+ * Reference documents:
+ * http://www.opengroup.org/public/pubs/external/auformat.html
+ * http://www.goice.co.jp/member/mo/formats/au.html
+ */
+
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+
+/* if we don't know the size in advance */
+#define AU_UNKOWN_SIZE ((uint32_t)(~0))
+
+/* The ffmpeg codecs we support, and the IDs they have in the file */
+static const CodecTag codec_au_tags[] = {
+ { CODEC_ID_PCM_MULAW, 1 },
+ { CODEC_ID_PCM_S16BE, 3 },
+ { CODEC_ID_PCM_ALAW, 27 },
+ { 0, 0 },
+};
+
+#ifdef CONFIG_MUXERS
+/* AUDIO_FILE header */
+static int put_au_header(ByteIOContext *pb, AVCodecContext *enc)
+{
+ if(!enc->codec_tag)
+ enc->codec_tag = codec_get_tag(codec_au_tags, enc->codec_id);
+ if(!enc->codec_tag)
+ return -1;
+ put_tag(pb, ".snd"); /* magic number */
+ put_be32(pb, 24); /* header size */
+ put_be32(pb, AU_UNKOWN_SIZE); /* data size */
+ put_be32(pb, (uint32_t)enc->codec_tag); /* codec ID */
+ put_be32(pb, enc->sample_rate);
+ put_be32(pb, (uint32_t)enc->channels);
+ return 0;
+}
+
+static int au_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+
+ s->priv_data = NULL;
+
+ /* format header */
+ if (put_au_header(pb, s->streams[0]->codec) < 0) {
+ return -1;
+ }
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int au_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+static int au_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ offset_t file_size;
+
+ if (!url_is_streamed(&s->pb)) {
+
+ /* update file size */
+ file_size = url_ftell(pb);
+ url_fseek(pb, 8, SEEK_SET);
+ put_be32(pb, (uint32_t)(file_size - 24));
+ url_fseek(pb, file_size, SEEK_SET);
+
+ put_flush_packet(pb);
+ }
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+static int au_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 24)
+ return 0;
+ if (p->buf[0] == '.' && p->buf[1] == 's' &&
+ p->buf[2] == 'n' && p->buf[3] == 'd')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* au input */
+static int au_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int size;
+ unsigned int tag;
+ ByteIOContext *pb = &s->pb;
+ unsigned int id, codec, channels, rate;
+ AVStream *st;
+
+ /* check ".snd" header */
+ tag = get_le32(pb);
+ if (tag != MKTAG('.', 's', 'n', 'd'))
+ return -1;
+ size = get_be32(pb); /* header size */
+ get_be32(pb); /* data size */
+
+ id = get_be32(pb);
+ rate = get_be32(pb);
+ channels = get_be32(pb);
+
+ codec = codec_get_id(codec_au_tags, id);
+
+ if (size >= 24) {
+ /* skip unused data */
+ url_fseek(pb, size - 24, SEEK_CUR);
+ }
+
+ /* now we are ready: build format streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = id;
+ st->codec->codec_id = codec;
+ st->codec->channels = channels;
+ st->codec->sample_rate = rate;
+ av_set_pts_info(st, 64, 1, rate);
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int au_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ int ret;
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+ ret= av_get_packet(&s->pb, pkt, MAX_SIZE);
+ if (ret < 0)
+ return AVERROR_IO;
+ pkt->stream_index = 0;
+
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return 0;
+}
+
+static int au_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+#ifdef CONFIG_AU_DEMUXER
+AVInputFormat au_demuxer = {
+ "au",
+ "SUN AU Format",
+ 0,
+ au_probe,
+ au_read_header,
+ au_read_packet,
+ au_read_close,
+ pcm_read_seek,
+};
+#endif
+
+#ifdef CONFIG_AU_MUXER
+AVOutputFormat au_muxer = {
+ "au",
+ "SUN AU Format",
+ "audio/basic",
+ "au",
+ 0,
+ CODEC_ID_PCM_S16BE,
+ CODEC_ID_NONE,
+ au_write_header,
+ au_write_packet,
+ au_write_trailer,
+};
+#endif //CONFIG_AU_MUXER
diff --git a/contrib/ffmpeg/libavformat/audio.c b/contrib/ffmpeg/libavformat/audio.c
new file mode 100644
index 000000000..1dfccccb8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/audio.c
@@ -0,0 +1,352 @@
+/*
+ * Linux audio play and grab interface
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifdef __OpenBSD__
+#include <soundcard.h>
+#else
+#include <sys/soundcard.h>
+#endif
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+
+#define AUDIO_BLOCK_SIZE 4096
+
+typedef struct {
+ int fd;
+ int sample_rate;
+ int channels;
+ int frame_size; /* in bytes ! */
+ int codec_id;
+ int flip_left : 1;
+ uint8_t buffer[AUDIO_BLOCK_SIZE];
+ int buffer_ptr;
+} AudioData;
+
+static int audio_open(AudioData *s, int is_output, const char *audio_device)
+{
+ int audio_fd;
+ int tmp, err;
+ char *flip = getenv("AUDIO_FLIP_LEFT");
+
+ /* open linux audio device */
+ if (!audio_device)
+#ifdef __OpenBSD__
+ audio_device = "/dev/sound";
+#else
+ audio_device = "/dev/dsp";
+#endif
+
+ if (is_output)
+ audio_fd = open(audio_device, O_WRONLY);
+ else
+ audio_fd = open(audio_device, O_RDONLY);
+ if (audio_fd < 0) {
+ perror(audio_device);
+ return AVERROR_IO;
+ }
+
+ if (flip && *flip == '1') {
+ s->flip_left = 1;
+ }
+
+ /* non blocking mode */
+ if (!is_output)
+ fcntl(audio_fd, F_SETFL, O_NONBLOCK);
+
+ s->frame_size = AUDIO_BLOCK_SIZE;
+#if 0
+ tmp = (NB_FRAGMENTS << 16) | FRAGMENT_BITS;
+ err = ioctl(audio_fd, SNDCTL_DSP_SETFRAGMENT, &tmp);
+ if (err < 0) {
+ perror("SNDCTL_DSP_SETFRAGMENT");
+ }
+#endif
+
+ /* select format : favour native format */
+ err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
+
+#ifdef WORDS_BIGENDIAN
+ if (tmp & AFMT_S16_BE) {
+ tmp = AFMT_S16_BE;
+ } else if (tmp & AFMT_S16_LE) {
+ tmp = AFMT_S16_LE;
+ } else {
+ tmp = 0;
+ }
+#else
+ if (tmp & AFMT_S16_LE) {
+ tmp = AFMT_S16_LE;
+ } else if (tmp & AFMT_S16_BE) {
+ tmp = AFMT_S16_BE;
+ } else {
+ tmp = 0;
+ }
+#endif
+
+ switch(tmp) {
+ case AFMT_S16_LE:
+ s->codec_id = CODEC_ID_PCM_S16LE;
+ break;
+ case AFMT_S16_BE:
+ s->codec_id = CODEC_ID_PCM_S16BE;
+ break;
+ default:
+ av_log(NULL, AV_LOG_ERROR, "Soundcard does not support 16 bit sample format\n");
+ close(audio_fd);
+ return AVERROR_IO;
+ }
+ err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp);
+ if (err < 0) {
+ perror("SNDCTL_DSP_SETFMT");
+ goto fail;
+ }
+
+ tmp = (s->channels == 2);
+ err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp);
+ if (err < 0) {
+ perror("SNDCTL_DSP_STEREO");
+ goto fail;
+ }
+ if (tmp)
+ s->channels = 2;
+
+ tmp = s->sample_rate;
+ err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp);
+ if (err < 0) {
+ perror("SNDCTL_DSP_SPEED");
+ goto fail;
+ }
+ s->sample_rate = tmp; /* store real sample rate */
+ s->fd = audio_fd;
+
+ return 0;
+ fail:
+ close(audio_fd);
+ return AVERROR_IO;
+}
+
+static int audio_close(AudioData *s)
+{
+ close(s->fd);
+ return 0;
+}
+
+/* sound output support */
+static int audio_write_header(AVFormatContext *s1)
+{
+ AudioData *s = s1->priv_data;
+ AVStream *st;
+ int ret;
+
+ st = s1->streams[0];
+ s->sample_rate = st->codec->sample_rate;
+ s->channels = st->codec->channels;
+ ret = audio_open(s, 1, NULL);
+ if (ret < 0) {
+ return AVERROR_IO;
+ } else {
+ return 0;
+ }
+}
+
+static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ AudioData *s = s1->priv_data;
+ int len, ret;
+ int size= pkt->size;
+ uint8_t *buf= pkt->data;
+
+ while (size > 0) {
+ len = AUDIO_BLOCK_SIZE - s->buffer_ptr;
+ if (len > size)
+ len = size;
+ memcpy(s->buffer + s->buffer_ptr, buf, len);
+ s->buffer_ptr += len;
+ if (s->buffer_ptr >= AUDIO_BLOCK_SIZE) {
+ for(;;) {
+ ret = write(s->fd, s->buffer, AUDIO_BLOCK_SIZE);
+ if (ret > 0)
+ break;
+ if (ret < 0 && (errno != EAGAIN && errno != EINTR))
+ return AVERROR_IO;
+ }
+ s->buffer_ptr = 0;
+ }
+ buf += len;
+ size -= len;
+ }
+ return 0;
+}
+
+static int audio_write_trailer(AVFormatContext *s1)
+{
+ AudioData *s = s1->priv_data;
+
+ audio_close(s);
+ return 0;
+}
+
+/* grab support */
+
+static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ AudioData *s = s1->priv_data;
+ AVStream *st;
+ int ret;
+
+ if (ap->sample_rate <= 0 || ap->channels <= 0)
+ return -1;
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+ s->sample_rate = ap->sample_rate;
+ s->channels = ap->channels;
+
+ ret = audio_open(s, 0, ap->device);
+ if (ret < 0) {
+ av_free(st);
+ return AVERROR_IO;
+ }
+
+ /* take real parameters */
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = s->codec_id;
+ st->codec->sample_rate = s->sample_rate;
+ st->codec->channels = s->channels;
+
+ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+ return 0;
+}
+
+static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ AudioData *s = s1->priv_data;
+ int ret, bdelay;
+ int64_t cur_time;
+ struct audio_buf_info abufi;
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return AVERROR_IO;
+ for(;;) {
+ struct timeval tv;
+ fd_set fds;
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 30 * 1000; /* 30 msecs -- a bit shorter than 1 frame at 30fps */
+
+ FD_ZERO(&fds);
+ FD_SET(s->fd, &fds);
+
+ /* This will block until data is available or we get a timeout */
+ (void) select(s->fd + 1, &fds, 0, 0, &tv);
+
+ ret = read(s->fd, pkt->data, pkt->size);
+ if (ret > 0)
+ break;
+ if (ret == -1 && (errno == EAGAIN || errno == EINTR)) {
+ av_free_packet(pkt);
+ pkt->size = 0;
+ pkt->pts = av_gettime();
+ return 0;
+ }
+ if (!(ret == 0 || (ret == -1 && (errno == EAGAIN || errno == EINTR)))) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ }
+ pkt->size = ret;
+
+ /* compute pts of the start of the packet */
+ cur_time = av_gettime();
+ bdelay = ret;
+ if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) {
+ bdelay += abufi.bytes;
+ }
+ /* substract time represented by the number of bytes in the audio fifo */
+ cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels);
+
+ /* convert to wanted units */
+ pkt->pts = cur_time;
+
+ if (s->flip_left && s->channels == 2) {
+ int i;
+ short *p = (short *) pkt->data;
+
+ for (i = 0; i < ret; i += 4) {
+ *p = ~*p;
+ p += 2;
+ }
+ }
+ return 0;
+}
+
+static int audio_read_close(AVFormatContext *s1)
+{
+ AudioData *s = s1->priv_data;
+
+ audio_close(s);
+ return 0;
+}
+
+#ifdef CONFIG_AUDIO_DEMUXER
+AVInputFormat audio_demuxer = {
+ "audio_device",
+ "audio grab and output",
+ sizeof(AudioData),
+ NULL,
+ audio_read_header,
+ audio_read_packet,
+ audio_read_close,
+ .flags = AVFMT_NOFILE,
+};
+#endif
+
+#ifdef CONFIG_AUDIO_MUXER
+AVOutputFormat audio_muxer = {
+ "audio_device",
+ "audio grab and output",
+ "",
+ "",
+ sizeof(AudioData),
+ /* XXX: we make the assumption that the soundcard accepts this format */
+ /* XXX: find better solution with "preinit" method, needed also in
+ other formats */
+#ifdef WORDS_BIGENDIAN
+ CODEC_ID_PCM_S16BE,
+#else
+ CODEC_ID_PCM_S16LE,
+#endif
+ CODEC_ID_NONE,
+ audio_write_header,
+ audio_write_packet,
+ audio_write_trailer,
+ .flags = AVFMT_NOFILE,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/avformat.h b/contrib/ffmpeg/libavformat/avformat.h
new file mode 100644
index 000000000..5dc41d273
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avformat.h
@@ -0,0 +1,539 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFORMAT_H
+#define AVFORMAT_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LIBAVFORMAT_VERSION_INT ((51<<16)+(6<<8)+0)
+#define LIBAVFORMAT_VERSION 51.6.0
+#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT
+
+#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION)
+
+#include <time.h>
+#include <stdio.h> /* FILE */
+#include "avcodec.h"
+
+#include "avio.h"
+
+/* packet functions */
+
+#ifndef MAXINT64
+#define MAXINT64 int64_t_C(0x7fffffffffffffff)
+#endif
+
+#ifndef MININT64
+#define MININT64 int64_t_C(0x8000000000000000)
+#endif
+
+typedef struct AVPacket {
+ int64_t pts; ///< presentation time stamp in time_base units
+ int64_t dts; ///< decompression time stamp in time_base units
+ uint8_t *data;
+ int size;
+ int stream_index;
+ int flags;
+ int duration; ///< presentation duration in time_base units (0 if not available)
+ void (*destruct)(struct AVPacket *);
+ void *priv;
+ int64_t pos; ///< byte position in stream, -1 if unknown
+} AVPacket;
+#define PKT_FLAG_KEY 0x0001
+
+void av_destruct_packet_nofree(AVPacket *pkt);
+void av_destruct_packet(AVPacket *pkt);
+
+/* initialize optional fields of a packet */
+static inline void av_init_packet(AVPacket *pkt)
+{
+ pkt->pts = AV_NOPTS_VALUE;
+ pkt->dts = AV_NOPTS_VALUE;
+ pkt->pos = -1;
+ pkt->duration = 0;
+ pkt->flags = 0;
+ pkt->stream_index = 0;
+ pkt->destruct= av_destruct_packet_nofree;
+}
+
+int av_new_packet(AVPacket *pkt, int size);
+int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size);
+int av_dup_packet(AVPacket *pkt);
+
+/**
+ * Free a packet
+ *
+ * @param pkt packet to free
+ */
+static inline void av_free_packet(AVPacket *pkt)
+{
+ if (pkt && pkt->destruct) {
+ pkt->destruct(pkt);
+ }
+}
+
+/*************************************************/
+/* fractional numbers for exact pts handling */
+
+/* the exact value of the fractional number is: 'val + num / den'. num
+ is assumed to be such as 0 <= num < den */
+typedef struct AVFrac {
+ int64_t val, num, den;
+} AVFrac attribute_deprecated;
+
+/*************************************************/
+/* input/output formats */
+
+struct AVFormatContext;
+
+/* this structure contains the data a format has to probe a file */
+typedef struct AVProbeData {
+ const char *filename;
+ unsigned char *buf;
+ int buf_size;
+} AVProbeData;
+
+#define AVPROBE_SCORE_MAX 100 ///< max score, half of that is used for file extension based detection
+
+typedef struct AVFormatParameters {
+ AVRational time_base;
+ int sample_rate;
+ int channels;
+ int width;
+ int height;
+ enum PixelFormat pix_fmt;
+ int channel; /* used to select dv channel */
+ const char *device; /* video, audio or DV device */
+ const char *standard; /* tv standard, NTSC, PAL, SECAM */
+ int mpeg2ts_raw:1; /* force raw MPEG2 transport stream output, if possible */
+ int mpeg2ts_compute_pcr:1; /* compute exact PCR for each transport
+ stream packet (only meaningful if
+ mpeg2ts_raw is TRUE */
+ int initial_pause:1; /* do not begin to play the stream
+ immediately (RTSP only) */
+ int prealloced_context:1;
+ enum CodecID video_codec_id;
+ enum CodecID audio_codec_id;
+} AVFormatParameters;
+
+#define AVFMT_NOFILE 0x0001 /* no file should be opened */
+#define AVFMT_NEEDNUMBER 0x0002 /* needs '%d' in filename */
+#define AVFMT_SHOW_IDS 0x0008 /* show format stream IDs numbers */
+#define AVFMT_RAWPICTURE 0x0020 /* format wants AVPicture structure for
+ raw picture data */
+#define AVFMT_GLOBALHEADER 0x0040 /* format wants global header */
+#define AVFMT_NOTIMESTAMPS 0x0080 /* format doesnt need / has any timestamps */
+
+typedef struct AVOutputFormat {
+ const char *name;
+ const char *long_name;
+ const char *mime_type;
+ const char *extensions; /* comma separated extensions */
+ /* size of private data so that it can be allocated in the wrapper */
+ int priv_data_size;
+ /* output support */
+ enum CodecID audio_codec; /* default audio codec */
+ enum CodecID video_codec; /* default video codec */
+ int (*write_header)(struct AVFormatContext *);
+ int (*write_packet)(struct AVFormatContext *, AVPacket *pkt);
+ int (*write_trailer)(struct AVFormatContext *);
+ /* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER */
+ int flags;
+ /* currently only used to set pixel format if not YUV420P */
+ int (*set_parameters)(struct AVFormatContext *, AVFormatParameters *);
+ int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, AVPacket *in, int flush);
+ /* private fields */
+ struct AVOutputFormat *next;
+} AVOutputFormat;
+
+typedef struct AVInputFormat {
+ const char *name;
+ const char *long_name;
+ /* size of private data so that it can be allocated in the wrapper */
+ int priv_data_size;
+ /* tell if a given file has a chance of being parsing by this format */
+ int (*read_probe)(AVProbeData *);
+ /* read the format header and initialize the AVFormatContext
+ structure. Return 0 if OK. 'ap' if non NULL contains
+ additionnal paramters. Only used in raw format right
+ now. 'av_new_stream' should be called to create new streams. */
+ int (*read_header)(struct AVFormatContext *,
+ AVFormatParameters *ap);
+ /* read one packet and put it in 'pkt'. pts and flags are also
+ set. 'av_new_stream' can be called only if the flag
+ AVFMTCTX_NOHEADER is used. */
+ int (*read_packet)(struct AVFormatContext *, AVPacket *pkt);
+ /* close the stream. The AVFormatContext and AVStreams are not
+ freed by this function */
+ int (*read_close)(struct AVFormatContext *);
+ /**
+ * seek to a given timestamp relative to the frames in
+ * stream component stream_index
+ * @param stream_index must not be -1
+ * @param flags selects which direction should be preferred if no exact
+ * match is available
+ */
+ int (*read_seek)(struct AVFormatContext *,
+ int stream_index, int64_t timestamp, int flags);
+ /**
+ * gets the next timestamp in AV_TIME_BASE units.
+ */
+ int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index,
+ int64_t *pos, int64_t pos_limit);
+ /* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER */
+ int flags;
+ /* if extensions are defined, then no probe is done. You should
+ usually not use extension format guessing because it is not
+ reliable enough */
+ const char *extensions;
+ /* general purpose read only value that the format can use */
+ int value;
+
+ /* start/resume playing - only meaningful if using a network based format
+ (RTSP) */
+ int (*read_play)(struct AVFormatContext *);
+
+ /* pause playing - only meaningful if using a network based format
+ (RTSP) */
+ int (*read_pause)(struct AVFormatContext *);
+
+ /* private fields */
+ struct AVInputFormat *next;
+} AVInputFormat;
+
+typedef struct AVIndexEntry {
+ int64_t pos;
+ int64_t timestamp;
+#define AVINDEX_KEYFRAME 0x0001
+ int flags:2;
+ int size:30; //yeah trying to keep the size of this small to reduce memory requirements (its 24 vs 32 byte due to possible 8byte align)
+ int min_distance; /* min distance between this and the previous keyframe, used to avoid unneeded searching */
+} AVIndexEntry;
+
+typedef struct AVStream {
+ int index; /* stream index in AVFormatContext */
+ int id; /* format specific stream id */
+ AVCodecContext *codec; /* codec context */
+ /**
+ * real base frame rate of the stream.
+ * this is the lowest framerate with which all timestamps can be
+ * represented accurately (its the least common multiple of all
+ * framerates in the stream), Note, this value is just a guess!
+ * for example if the timebase is 1/90000 and all frames have either
+ * approximately 3600 or 1800 timer ticks then r_frame_rate will be 50/1
+ */
+ AVRational r_frame_rate;
+ void *priv_data;
+ /* internal data used in av_find_stream_info() */
+ int64_t codec_info_duration;
+ int codec_info_nb_frames;
+ /* encoding: PTS generation when outputing stream */
+ AVFrac pts;
+
+ /**
+ * this is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. for fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ */
+ AVRational time_base;
+ int pts_wrap_bits; /* number of bits in pts (used for wrapping control) */
+ /* ffmpeg.c private use */
+ int stream_copy; /* if TRUE, just copy stream */
+ enum AVDiscard discard; ///< selects which packets can be discarded at will and dont need to be demuxed
+ //FIXME move stuff to a flags field?
+ /* quality, as it has been removed from AVCodecContext and put in AVVideoFrame
+ * MN:dunno if thats the right place, for it */
+ float quality;
+ /* decoding: position of the first frame of the component, in
+ AV_TIME_BASE fractional seconds. */
+ int64_t start_time;
+ /* decoding: duration of the stream, in AV_TIME_BASE fractional
+ seconds. */
+ int64_t duration;
+
+ char language[4]; /* ISO 639 3-letter language code (empty string if undefined) */
+
+ /* av_read_frame() support */
+ int need_parsing; ///< 1->full parsing needed, 2->only parse headers dont repack
+ struct AVCodecParserContext *parser;
+
+ int64_t cur_dts;
+ int last_IP_duration;
+ int64_t last_IP_pts;
+ /* av_seek_frame() support */
+ AVIndexEntry *index_entries; /* only used if the format does not
+ support seeking natively */
+ int nb_index_entries;
+ unsigned int index_entries_allocated_size;
+
+ int64_t nb_frames; ///< number of frames in this stream if known or 0
+
+#define MAX_REORDER_DELAY 4
+ int64_t pts_buffer[MAX_REORDER_DELAY+1];
+} AVStream;
+
+#define AVFMTCTX_NOHEADER 0x0001 /* signal that no header is present
+ (streams are added dynamically) */
+
+#define MAX_STREAMS 20
+
+/* format I/O context */
+typedef struct AVFormatContext {
+ const AVClass *av_class; /* set by av_alloc_format_context */
+ /* can only be iformat or oformat, not both at the same time */
+ struct AVInputFormat *iformat;
+ struct AVOutputFormat *oformat;
+ void *priv_data;
+ ByteIOContext pb;
+ int nb_streams;
+ AVStream *streams[MAX_STREAMS];
+ char filename[1024]; /* input or output filename */
+ /* stream info */
+ int64_t timestamp;
+ char title[512];
+ char author[512];
+ char copyright[512];
+ char comment[512];
+ char album[512];
+ int year; /* ID3 year, 0 if none */
+ int track; /* track number, 0 if none */
+ char genre[32]; /* ID3 genre */
+
+ int ctx_flags; /* format specific flags, see AVFMTCTX_xx */
+ /* private data for pts handling (do not modify directly) */
+ /* This buffer is only needed when packets were already buffered but
+ not decoded, for example to get the codec parameters in mpeg
+ streams */
+ struct AVPacketList *packet_buffer;
+
+ /* decoding: position of the first frame of the component, in
+ AV_TIME_BASE fractional seconds. NEVER set this value directly:
+ it is deduced from the AVStream values. */
+ int64_t start_time;
+ /* decoding: duration of the stream, in AV_TIME_BASE fractional
+ seconds. NEVER set this value directly: it is deduced from the
+ AVStream values. */
+ int64_t duration;
+ /* decoding: total file size. 0 if unknown */
+ int64_t file_size;
+ /* decoding: total stream bitrate in bit/s, 0 if not
+ available. Never set it directly if the file_size and the
+ duration are known as ffmpeg can compute it automatically. */
+ int bit_rate;
+
+ /* av_read_frame() support */
+ AVStream *cur_st;
+ const uint8_t *cur_ptr;
+ int cur_len;
+ AVPacket cur_pkt;
+
+ /* av_seek_frame() support */
+ int64_t data_offset; /* offset of the first packet */
+ int index_built;
+
+ int mux_rate;
+ int packet_size;
+ int preload;
+ int max_delay;
+
+#define AVFMT_NOOUTPUTLOOP -1
+#define AVFMT_INFINITEOUTPUTLOOP 0
+ /* number of times to loop output in formats that support it */
+ int loop_output;
+
+ int flags;
+#define AVFMT_FLAG_GENPTS 0x0001 ///< generate pts if missing even if it requires parsing future frames
+#define AVFMT_FLAG_IGNIDX 0x0002 ///< ignore index
+
+ int loop_input;
+ /* decoding: size of data to probe; encoding unused */
+ unsigned int probesize;
+} AVFormatContext;
+
+typedef struct AVPacketList {
+ AVPacket pkt;
+ struct AVPacketList *next;
+} AVPacketList;
+
+extern AVInputFormat *first_iformat;
+extern AVOutputFormat *first_oformat;
+
+enum CodecID av_guess_image2_codec(const char *filename);
+
+/* XXX: use automatic init with either ELF sections or C file parser */
+/* modules */
+
+#include "rtp.h"
+
+#include "rtsp.h"
+
+/* utils.c */
+void av_register_input_format(AVInputFormat *format);
+void av_register_output_format(AVOutputFormat *format);
+AVOutputFormat *guess_stream_format(const char *short_name,
+ const char *filename, const char *mime_type);
+AVOutputFormat *guess_format(const char *short_name,
+ const char *filename, const char *mime_type);
+enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
+ const char *filename, const char *mime_type, enum CodecType type);
+
+void av_hex_dump(FILE *f, uint8_t *buf, int size);
+void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload);
+
+void av_register_all(void);
+
+/* media file input */
+AVInputFormat *av_find_input_format(const char *short_name);
+AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);
+int av_open_input_stream(AVFormatContext **ic_ptr,
+ ByteIOContext *pb, const char *filename,
+ AVInputFormat *fmt, AVFormatParameters *ap);
+int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
+ AVInputFormat *fmt,
+ int buf_size,
+ AVFormatParameters *ap);
+/* no av_open for output, so applications will need this: */
+AVFormatContext *av_alloc_format_context(void);
+
+#define AVERROR_UNKNOWN (-1) /* unknown error */
+#define AVERROR_IO (-2) /* i/o error */
+#define AVERROR_NUMEXPECTED (-3) /* number syntax expected in filename */
+#define AVERROR_INVALIDDATA (-4) /* invalid data found */
+#define AVERROR_NOMEM (-5) /* not enough memory */
+#define AVERROR_NOFMT (-6) /* unknown format */
+#define AVERROR_NOTSUPP (-7) /* operation not supported */
+
+int av_find_stream_info(AVFormatContext *ic);
+int av_read_packet(AVFormatContext *s, AVPacket *pkt);
+int av_read_frame(AVFormatContext *s, AVPacket *pkt);
+int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags);
+int av_read_play(AVFormatContext *s);
+int av_read_pause(AVFormatContext *s);
+void av_close_input_file(AVFormatContext *s);
+AVStream *av_new_stream(AVFormatContext *s, int id);
+void av_set_pts_info(AVStream *s, int pts_wrap_bits,
+ int pts_num, int pts_den);
+
+#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward
+#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes
+#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non keyframes
+
+int av_find_default_stream_index(AVFormatContext *s);
+int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
+int av_add_index_entry(AVStream *st,
+ int64_t pos, int64_t timestamp, int size, int distance, int flags);
+int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags);
+void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp);
+int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ));
+
+/* media file output */
+int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap);
+int av_write_header(AVFormatContext *s);
+int av_write_frame(AVFormatContext *s, AVPacket *pkt);
+int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);
+int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush);
+
+int av_write_trailer(AVFormatContext *s);
+
+void dump_format(AVFormatContext *ic,
+ int index,
+ const char *url,
+ int is_output);
+int parse_image_size(int *width_ptr, int *height_ptr, const char *str);
+int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg);
+int64_t parse_date(const char *datestr, int duration);
+
+int64_t av_gettime(void);
+
+/* ffm specific for ffserver */
+#define FFM_PACKET_SIZE 4096
+offset_t ffm_read_write_index(int fd);
+void ffm_write_write_index(int fd, offset_t pos);
+void ffm_set_write_index(AVFormatContext *s, offset_t pos, offset_t file_size);
+
+int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
+
+int av_get_frame_filename(char *buf, int buf_size,
+ const char *path, int number);
+int av_filename_number_test(const char *filename);
+
+/* grab specific */
+int video_grab_init(void);
+int audio_init(void);
+
+/* DV1394 */
+int dv1394_init(void);
+int dc1394_init(void);
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "os_support.h"
+
+int strstart(const char *str, const char *val, const char **ptr);
+int stristart(const char *str, const char *val, const char **ptr);
+void pstrcpy(char *buf, int buf_size, const char *str);
+char *pstrcat(char *buf, int buf_size, const char *s);
+
+void __dynarray_add(unsigned long **tab_ptr, int *nb_ptr, unsigned long elem);
+
+#ifdef __GNUC__
+#define dynarray_add(tab, nb_ptr, elem)\
+do {\
+ typeof(tab) _tab = (tab);\
+ typeof(elem) _elem = (elem);\
+ (void)sizeof(**_tab == _elem); /* check that types are compatible */\
+ __dynarray_add((unsigned long **)_tab, nb_ptr, (unsigned long)_elem);\
+} while(0)
+#else
+#define dynarray_add(tab, nb_ptr, elem)\
+do {\
+ __dynarray_add((unsigned long **)(tab), nb_ptr, (unsigned long)(elem));\
+} while(0)
+#endif
+
+time_t mktimegm(struct tm *tm);
+struct tm *brktimegm(time_t secs, struct tm *tm);
+const char *small_strptime(const char *p, const char *fmt,
+ struct tm *dt);
+
+struct in_addr;
+int resolve_host(struct in_addr *sin_addr, const char *hostname);
+
+void url_split(char *proto, int proto_size,
+ char *authorization, int authorization_size,
+ char *hostname, int hostname_size,
+ int *port_ptr,
+ char *path, int path_size,
+ const char *url);
+
+int match_ext(const char *filename, const char *extensions);
+
+#endif /* HAVE_AV_CONFIG_H */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AVFORMAT_H */
+
diff --git a/contrib/ffmpeg/libavformat/avi.h b/contrib/ffmpeg/libavformat/avi.h
new file mode 100644
index 000000000..2c360689b
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avi.h
@@ -0,0 +1,39 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FFMPEG_AVI_H
+#define FFMPEG_AVI_H
+
+#include "avcodec.h"
+
+#define AVIF_HASINDEX 0x00000010 // Index at end of file?
+#define AVIF_MUSTUSEINDEX 0x00000020
+#define AVIF_ISINTERLEAVED 0x00000100
+#define AVIF_TRUSTCKTYPE 0x00000800 // Use CKType to find key frames?
+#define AVIF_WASCAPTUREFILE 0x00010000
+#define AVIF_COPYRIGHTED 0x00020000
+
+#define AVI_MAX_RIFF_SIZE 0x40000000LL
+#define AVI_MASTER_INDEX_SIZE 256
+
+/* index flags */
+#define AVIIF_INDEX 0x10
+
+#endif /* FFMPEG_AVI_H */
diff --git a/contrib/ffmpeg/libavformat/avidec.c b/contrib/ffmpeg/libavformat/avidec.c
new file mode 100644
index 000000000..d1af79fa3
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avidec.c
@@ -0,0 +1,989 @@
+/*
+ * AVI demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "avi.h"
+#include "dv.h"
+#include "riff.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+//#define DEBUG
+//#define DEBUG_SEEK
+
+typedef struct AVIStream {
+ int64_t frame_offset; /* current frame (video) or byte (audio) counter
+ (used to compute the pts) */
+ int remaining;
+ int packet_size;
+
+ int scale;
+ int rate;
+ int sample_size; /* size of one sample (or packet) (in the rate/scale sense) in bytes */
+
+ int64_t cum_len; /* temporary storage (used during seek) */
+
+ int prefix; ///< normally 'd'<<8 + 'c' or 'w'<<8 + 'b'
+ int prefix_count;
+} AVIStream;
+
+typedef struct {
+ int64_t riff_end;
+ int64_t movi_end;
+ offset_t movi_list;
+ int index_loaded;
+ int is_odml;
+ int non_interleaved;
+ int stream_index;
+ DVDemuxContext* dv_demux;
+} AVIContext;
+
+static int avi_load_index(AVFormatContext *s);
+static int guess_ni_flag(AVFormatContext *s);
+
+#ifdef DEBUG
+static void print_tag(const char *str, unsigned int tag, int size)
+{
+ printf("%s: tag=%c%c%c%c size=0x%x\n",
+ str, tag & 0xff,
+ (tag >> 8) & 0xff,
+ (tag >> 16) & 0xff,
+ (tag >> 24) & 0xff,
+ size);
+}
+#endif
+
+static int get_riff(AVIContext *avi, ByteIOContext *pb)
+{
+ uint32_t tag;
+ /* check RIFF header */
+ tag = get_le32(pb);
+
+ if (tag != MKTAG('R', 'I', 'F', 'F'))
+ return -1;
+ avi->riff_end = get_le32(pb); /* RIFF chunk size */
+ avi->riff_end += url_ftell(pb); /* RIFF chunk end */
+ tag = get_le32(pb);
+ if (tag != MKTAG('A', 'V', 'I', ' ') && tag != MKTAG('A', 'V', 'I', 'X'))
+ return -1;
+
+ return 0;
+}
+
+static int read_braindead_odml_indx(AVFormatContext *s, int frame_num){
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int longs_pre_entry= get_le16(pb);
+ int index_sub_type = get_byte(pb);
+ int index_type = get_byte(pb);
+ int entries_in_use = get_le32(pb);
+ int chunk_id = get_le32(pb);
+ int64_t base = get_le64(pb);
+ int stream_id= 10*((chunk_id&0xFF) - '0') + (((chunk_id>>8)&0xFF) - '0');
+ AVStream *st;
+ AVIStream *ast;
+ int i;
+ int64_t last_pos= -1;
+ int64_t filesize= url_fsize(&s->pb);
+
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_ERROR, "longs_pre_entry:%d index_type:%d entries_in_use:%d chunk_id:%X base:%16"PRIX64"\n",
+ longs_pre_entry,index_type, entries_in_use, chunk_id, base);
+#endif
+
+ if(stream_id > s->nb_streams || stream_id < 0)
+ return -1;
+ st= s->streams[stream_id];
+ ast = st->priv_data;
+
+ if(index_sub_type)
+ return -1;
+
+ get_le32(pb);
+
+ if(index_type && longs_pre_entry != 2)
+ return -1;
+ if(index_type>1)
+ return -1;
+
+ if(filesize > 0 && base >= filesize){
+ av_log(s, AV_LOG_ERROR, "ODML index invalid\n");
+ if(base>>32 == (base & 0xFFFFFFFF) && (base & 0xFFFFFFFF) < filesize && filesize <= 0xFFFFFFFF)
+ base &= 0xFFFFFFFF;
+ else
+ return -1;
+ }
+
+ for(i=0; i<entries_in_use; i++){
+ if(index_type){
+ int64_t pos= get_le32(pb) + base - 8;
+ int len = get_le32(pb);
+ int key= len >= 0;
+ len &= 0x7FFFFFFF;
+
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_ERROR, "pos:%"PRId64", len:%X\n", pos, len);
+#endif
+ if(last_pos == pos || pos == base - 8)
+ avi->non_interleaved= 1;
+ else
+ av_add_index_entry(st, pos, ast->cum_len / FFMAX(1, ast->sample_size), len, 0, key ? AVINDEX_KEYFRAME : 0);
+
+ if(ast->sample_size)
+ ast->cum_len += len;
+ else
+ ast->cum_len ++;
+ last_pos= pos;
+ }else{
+ int64_t offset, pos;
+ int duration;
+ offset = get_le64(pb);
+ get_le32(pb); /* size */
+ duration = get_le32(pb);
+ pos = url_ftell(pb);
+
+ url_fseek(pb, offset+8, SEEK_SET);
+ read_braindead_odml_indx(s, frame_num);
+ frame_num += duration;
+
+ url_fseek(pb, pos, SEEK_SET);
+ }
+ }
+ avi->index_loaded=1;
+ return 0;
+}
+
+static void clean_index(AVFormatContext *s){
+ int i;
+ int64_t j;
+
+ for(i=0; i<s->nb_streams; i++){
+ AVStream *st = s->streams[i];
+ AVIStream *ast = st->priv_data;
+ int n= st->nb_index_entries;
+ int max= ast->sample_size;
+ int64_t pos, size, ts;
+
+ if(n != 1 || ast->sample_size==0)
+ continue;
+
+ while(max < 1024) max+=max;
+
+ pos= st->index_entries[0].pos;
+ size= st->index_entries[0].size;
+ ts= st->index_entries[0].timestamp;
+
+ for(j=0; j<size; j+=max){
+ av_add_index_entry(st, pos+j, ts + j/ast->sample_size, FFMIN(max, size-j), 0, AVINDEX_KEYFRAME);
+ }
+ }
+}
+
+static int avi_read_tag(ByteIOContext *pb, char *buf, int maxlen, unsigned int size)
+{
+ offset_t i = url_ftell(pb);
+ size += (size & 1);
+ get_strz(pb, buf, maxlen);
+ url_fseek(pb, i+size, SEEK_SET);
+ return 0;
+}
+
+static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t tag, tag1, handler;
+ int codec_type, stream_index, frame_period, bit_rate;
+ unsigned int size, nb_frames;
+ int i, n;
+ AVStream *st;
+ AVIStream *ast = NULL;
+ char str_track[4];
+
+ avi->stream_index= -1;
+
+ if (get_riff(avi, pb) < 0)
+ return -1;
+
+ /* first list tag */
+ stream_index = -1;
+ codec_type = -1;
+ frame_period = 0;
+ for(;;) {
+ if (url_feof(pb))
+ goto fail;
+ tag = get_le32(pb);
+ size = get_le32(pb);
+#ifdef DEBUG
+ print_tag("tag", tag, size);
+#endif
+
+ switch(tag) {
+ case MKTAG('L', 'I', 'S', 'T'):
+ /* ignored, except when start of video packets */
+ tag1 = get_le32(pb);
+#ifdef DEBUG
+ print_tag("list", tag1, 0);
+#endif
+ if (tag1 == MKTAG('m', 'o', 'v', 'i')) {
+ avi->movi_list = url_ftell(pb) - 4;
+ if(size) avi->movi_end = avi->movi_list + size + (size & 1);
+ else avi->movi_end = url_fsize(pb);
+#ifdef DEBUG
+ printf("movi end=%"PRIx64"\n", avi->movi_end);
+#endif
+ goto end_of_header;
+ }
+ break;
+ case MKTAG('d', 'm', 'l', 'h'):
+ avi->is_odml = 1;
+ url_fskip(pb, size + (size & 1));
+ break;
+ case MKTAG('a', 'v', 'i', 'h'):
+ /* avi header */
+ /* using frame_period is bad idea */
+ frame_period = get_le32(pb);
+ bit_rate = get_le32(pb) * 8;
+ get_le32(pb);
+ avi->non_interleaved |= get_le32(pb) & AVIF_MUSTUSEINDEX;
+
+ url_fskip(pb, 2 * 4);
+ n = get_le32(pb);
+ for(i=0;i<n;i++) {
+ AVIStream *ast;
+ st = av_new_stream(s, i);
+ if (!st)
+ goto fail;
+
+ ast = av_mallocz(sizeof(AVIStream));
+ if (!ast)
+ goto fail;
+ st->priv_data = ast;
+ }
+ url_fskip(pb, size - 7 * 4);
+ break;
+ case MKTAG('s', 't', 'r', 'h'):
+ /* stream header */
+ stream_index++;
+ tag1 = get_le32(pb);
+ handler = get_le32(pb); /* codec tag */
+#ifdef DEBUG
+ print_tag("strh", tag1, -1);
+#endif
+ if(tag1 == MKTAG('i', 'a', 'v', 's') || tag1 == MKTAG('i', 'v', 'a', 's')){
+ /*
+ * After some consideration -- I don't think we
+ * have to support anything but DV in a type1 AVIs.
+ */
+ if (s->nb_streams != 1)
+ goto fail;
+
+ if (handler != MKTAG('d', 'v', 's', 'd') &&
+ handler != MKTAG('d', 'v', 'h', 'd') &&
+ handler != MKTAG('d', 'v', 's', 'l'))
+ goto fail;
+
+ ast = s->streams[0]->priv_data;
+ av_freep(&s->streams[0]->codec->extradata);
+ av_freep(&s->streams[0]);
+ s->nb_streams = 0;
+ if (ENABLE_DV_DEMUXER) {
+ avi->dv_demux = dv_init_demux(s);
+ if (!avi->dv_demux)
+ goto fail;
+ }
+ s->streams[0]->priv_data = ast;
+ url_fskip(pb, 3 * 4);
+ ast->scale = get_le32(pb);
+ ast->rate = get_le32(pb);
+ stream_index = s->nb_streams - 1;
+ url_fskip(pb, size - 7*4);
+ break;
+ }
+
+ if (stream_index >= s->nb_streams) {
+ url_fskip(pb, size - 8);
+ /* ignore padding stream */
+ if (tag1 == MKTAG('p', 'a', 'd', 's'))
+ stream_index--;
+ break;
+ }
+ st = s->streams[stream_index];
+ ast = st->priv_data;
+ st->codec->stream_codec_tag= handler;
+
+ get_le32(pb); /* flags */
+ get_le16(pb); /* priority */
+ get_le16(pb); /* language */
+ get_le32(pb); /* initial frame */
+ ast->scale = get_le32(pb);
+ ast->rate = get_le32(pb);
+ if(ast->scale && ast->rate){
+ }else if(frame_period){
+ ast->rate = 1000000;
+ ast->scale = frame_period;
+ }else{
+ ast->rate = 25;
+ ast->scale = 1;
+ }
+ av_set_pts_info(st, 64, ast->scale, ast->rate);
+
+ ast->cum_len=get_le32(pb); /* start */
+ nb_frames = get_le32(pb);
+
+ st->start_time = 0;
+ st->duration = nb_frames;
+ get_le32(pb); /* buffer size */
+ get_le32(pb); /* quality */
+ ast->sample_size = get_le32(pb); /* sample ssize */
+ ast->cum_len *= FFMAX(1, ast->sample_size);
+// av_log(NULL, AV_LOG_DEBUG, "%d %d %d %d\n", ast->rate, ast->scale, ast->start, ast->sample_size);
+
+ switch(tag1) {
+ case MKTAG('v', 'i', 'd', 's'):
+ codec_type = CODEC_TYPE_VIDEO;
+
+ ast->sample_size = 0;
+ break;
+ case MKTAG('a', 'u', 'd', 's'):
+ codec_type = CODEC_TYPE_AUDIO;
+ break;
+ case MKTAG('t', 'x', 't', 's'):
+ //FIXME
+ codec_type = CODEC_TYPE_DATA; //CODEC_TYPE_SUB ? FIXME
+ break;
+ case MKTAG('p', 'a', 'd', 's'):
+ codec_type = CODEC_TYPE_UNKNOWN;
+ stream_index--;
+ break;
+ default:
+ av_log(s, AV_LOG_ERROR, "unknown stream type %X\n", tag1);
+ goto fail;
+ }
+ ast->frame_offset= ast->cum_len;
+ url_fskip(pb, size - 12 * 4);
+ break;
+ case MKTAG('s', 't', 'r', 'f'):
+ /* stream header */
+ if (stream_index >= s->nb_streams || avi->dv_demux) {
+ url_fskip(pb, size);
+ } else {
+ st = s->streams[stream_index];
+ switch(codec_type) {
+ case CODEC_TYPE_VIDEO:
+ get_le32(pb); /* size */
+ st->codec->width = get_le32(pb);
+ st->codec->height = get_le32(pb);
+ get_le16(pb); /* panes */
+ st->codec->bits_per_sample= get_le16(pb); /* depth */
+ tag1 = get_le32(pb);
+ get_le32(pb); /* ImageSize */
+ get_le32(pb); /* XPelsPerMeter */
+ get_le32(pb); /* YPelsPerMeter */
+ get_le32(pb); /* ClrUsed */
+ get_le32(pb); /* ClrImportant */
+
+ if(size > 10*4 && size<(1<<30)){
+ st->codec->extradata_size= size - 10*4;
+ st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
+ }
+
+ if(st->codec->extradata_size & 1) //FIXME check if the encoder really did this correctly
+ get_byte(pb);
+
+ /* Extract palette from extradata if bpp <= 8 */
+ /* This code assumes that extradata contains only palette */
+ /* This is true for all paletted codecs implemented in ffmpeg */
+ if (st->codec->extradata_size && (st->codec->bits_per_sample <= 8)) {
+ st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
+#ifdef WORDS_BIGENDIAN
+ for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
+ st->codec->palctrl->palette[i] = bswap_32(((uint32_t*)st->codec->extradata)[i]);
+#else
+ memcpy(st->codec->palctrl->palette, st->codec->extradata,
+ FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
+#endif
+ st->codec->palctrl->palette_changed = 1;
+ }
+
+#ifdef DEBUG
+ print_tag("video", tag1, 0);
+#endif
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_tag = tag1;
+ st->codec->codec_id = codec_get_id(codec_bmp_tags, tag1);
+ st->need_parsing = 2; //only parse headers dont do slower repacketization, this is needed to get the pict type which is needed for generating correct pts
+// url_fskip(pb, size - 5 * 4);
+ break;
+ case CODEC_TYPE_AUDIO:
+ get_wav_header(pb, st->codec, size);
+ if(ast->sample_size && st->codec->block_align && ast->sample_size % st->codec->block_align)
+ av_log(s, AV_LOG_DEBUG, "invalid sample size or block align detected\n");
+ if (size%2) /* 2-aligned (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */
+ url_fskip(pb, 1);
+ /* Force parsing as several audio frames can be in
+ * one packet. */
+ st->need_parsing = 1;
+ /* ADTS header is in extradata, AAC without header must be stored as exact frames, parser not needed and it will fail */
+ if (st->codec->codec_id == CODEC_ID_AAC && st->codec->extradata_size)
+ st->need_parsing = 0;
+ /* AVI files with Xan DPCM audio (wrongly) declare PCM
+ * audio in the header but have Axan as stream_code_tag. */
+ if (st->codec->stream_codec_tag == ff_get_fourcc("Axan")){
+ st->codec->codec_id = CODEC_ID_XAN_DPCM;
+ st->codec->codec_tag = 0;
+ }
+ break;
+ default:
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ st->codec->codec_id= CODEC_ID_NONE;
+ st->codec->codec_tag= 0;
+ url_fskip(pb, size);
+ break;
+ }
+ }
+ break;
+ case MKTAG('i', 'n', 'd', 'x'):
+ i= url_ftell(pb);
+ if(!url_is_streamed(pb) && !(s->flags & AVFMT_FLAG_IGNIDX)){
+ read_braindead_odml_indx(s, 0);
+ }
+ url_fseek(pb, i+size, SEEK_SET);
+ break;
+ case MKTAG('I', 'N', 'A', 'M'):
+ avi_read_tag(pb, s->title, sizeof(s->title), size);
+ break;
+ case MKTAG('I', 'A', 'R', 'T'):
+ avi_read_tag(pb, s->author, sizeof(s->author), size);
+ break;
+ case MKTAG('I', 'C', 'O', 'P'):
+ avi_read_tag(pb, s->copyright, sizeof(s->copyright), size);
+ break;
+ case MKTAG('I', 'C', 'M', 'T'):
+ avi_read_tag(pb, s->comment, sizeof(s->comment), size);
+ break;
+ case MKTAG('I', 'G', 'N', 'R'):
+ avi_read_tag(pb, s->genre, sizeof(s->genre), size);
+ break;
+ case MKTAG('I', 'P', 'R', 'D'):
+ avi_read_tag(pb, s->album, sizeof(s->album), size);
+ break;
+ case MKTAG('I', 'P', 'R', 'T'):
+ avi_read_tag(pb, str_track, sizeof(str_track), size);
+ sscanf(str_track, "%d", &s->track);
+ break;
+ default:
+ /* skip tag */
+ size += (size & 1);
+ url_fskip(pb, size);
+ break;
+ }
+ }
+ end_of_header:
+ /* check stream number */
+ if (stream_index != s->nb_streams - 1) {
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ av_freep(&s->streams[i]->codec->extradata);
+ av_freep(&s->streams[i]);
+ }
+ return -1;
+ }
+
+ if(!avi->index_loaded && !url_is_streamed(pb))
+ avi_load_index(s);
+ avi->index_loaded = 1;
+ avi->non_interleaved |= guess_ni_flag(s);
+ if(avi->non_interleaved)
+ clean_index(s);
+
+ return 0;
+}
+
+static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int n, d[8], size;
+ offset_t i, sync;
+ void* dstr;
+
+ if (ENABLE_DV_DEMUXER && avi->dv_demux) {
+ size = dv_get_packet(avi->dv_demux, pkt);
+ if (size >= 0)
+ return size;
+ }
+
+ if(avi->non_interleaved){
+ int best_stream_index = 0;
+ AVStream *best_st= NULL;
+ AVIStream *best_ast;
+ int64_t best_ts= INT64_MAX;
+ int i;
+
+ for(i=0; i<s->nb_streams; i++){
+ AVStream *st = s->streams[i];
+ AVIStream *ast = st->priv_data;
+ int64_t ts= ast->frame_offset;
+
+ if(ast->sample_size)
+ ts /= ast->sample_size;
+ ts= av_rescale(ts, AV_TIME_BASE * (int64_t)st->time_base.num, st->time_base.den);
+
+// av_log(NULL, AV_LOG_DEBUG, "%"PRId64" %d/%d %"PRId64"\n", ts, st->time_base.num, st->time_base.den, ast->frame_offset);
+ if(ts < best_ts){
+ best_ts= ts;
+ best_st= st;
+ best_stream_index= i;
+ }
+ }
+ best_ast = best_st->priv_data;
+ best_ts= av_rescale(best_ts, best_st->time_base.den, AV_TIME_BASE * (int64_t)best_st->time_base.num); //FIXME a little ugly
+ if(best_ast->remaining)
+ i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
+ else
+ i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY);
+
+// av_log(NULL, AV_LOG_DEBUG, "%d\n", i);
+ if(i>=0){
+ int64_t pos= best_st->index_entries[i].pos;
+ pos += best_ast->packet_size - best_ast->remaining;
+ url_fseek(&s->pb, pos + 8, SEEK_SET);
+// av_log(NULL, AV_LOG_DEBUG, "pos=%"PRId64"\n", pos);
+
+ assert(best_ast->remaining <= best_ast->packet_size);
+
+ avi->stream_index= best_stream_index;
+ if(!best_ast->remaining)
+ best_ast->packet_size=
+ best_ast->remaining= best_st->index_entries[i].size;
+ }
+ }
+
+resync:
+ if(avi->stream_index >= 0){
+ AVStream *st= s->streams[ avi->stream_index ];
+ AVIStream *ast= st->priv_data;
+ int size;
+
+ if(ast->sample_size <= 1) // minorityreport.AVI block_align=1024 sample_size=1 IMA-ADPCM
+ size= INT_MAX;
+ else if(ast->sample_size < 32)
+ size= 64*ast->sample_size;
+ else
+ size= ast->sample_size;
+
+ if(size > ast->remaining)
+ size= ast->remaining;
+ av_get_packet(pb, pkt, size);
+
+ if (ENABLE_DV_DEMUXER && avi->dv_demux) {
+ dstr = pkt->destruct;
+ size = dv_produce_packet(avi->dv_demux, pkt,
+ pkt->data, pkt->size);
+ pkt->destruct = dstr;
+ pkt->flags |= PKT_FLAG_KEY;
+ } else {
+ /* XXX: how to handle B frames in avi ? */
+ pkt->dts = ast->frame_offset;
+// pkt->dts += ast->start;
+ if(ast->sample_size)
+ pkt->dts /= ast->sample_size;
+//av_log(NULL, AV_LOG_DEBUG, "dts:%"PRId64" offset:%"PRId64" %d/%d smpl_siz:%d base:%d st:%d size:%d\n", pkt->dts, ast->frame_offset, ast->scale, ast->rate, ast->sample_size, AV_TIME_BASE, avi->stream_index, size);
+ pkt->stream_index = avi->stream_index;
+
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ if(st->index_entries){
+ AVIndexEntry *e;
+ int index;
+
+ index= av_index_search_timestamp(st, pkt->dts, 0);
+ e= &st->index_entries[index];
+
+ if(index >= 0 && e->timestamp == ast->frame_offset){
+ if (e->flags & AVINDEX_KEYFRAME)
+ pkt->flags |= PKT_FLAG_KEY;
+ }
+ } else {
+ /* if no index, better to say that all frames
+ are key frames */
+ pkt->flags |= PKT_FLAG_KEY;
+ }
+ } else {
+ pkt->flags |= PKT_FLAG_KEY;
+ }
+ if(ast->sample_size)
+ ast->frame_offset += pkt->size;
+ else
+ ast->frame_offset++;
+ }
+ ast->remaining -= size;
+ if(!ast->remaining){
+ avi->stream_index= -1;
+ ast->packet_size= 0;
+ if (size & 1) {
+ get_byte(pb);
+ size++;
+ }
+ }
+
+ return size;
+ }
+
+ memset(d, -1, sizeof(int)*8);
+ for(i=sync=url_ftell(pb); !url_feof(pb); i++) {
+ int j;
+
+ if (i >= avi->movi_end) {
+ if (avi->is_odml) {
+ url_fskip(pb, avi->riff_end - i);
+ avi->riff_end = avi->movi_end = url_fsize(pb);
+ } else
+ break;
+ }
+
+ for(j=0; j<7; j++)
+ d[j]= d[j+1];
+ d[7]= get_byte(pb);
+
+ size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24);
+
+ if( d[2] >= '0' && d[2] <= '9'
+ && d[3] >= '0' && d[3] <= '9'){
+ n= (d[2] - '0') * 10 + (d[3] - '0');
+ }else{
+ n= 100; //invalid stream id
+ }
+//av_log(NULL, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %"PRId64" %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
+ if(i + size > avi->movi_end || d[0]<0)
+ continue;
+
+ //parse ix##
+ if( (d[0] == 'i' && d[1] == 'x' && n < s->nb_streams)
+ //parse JUNK
+ ||(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K')){
+ url_fskip(pb, size);
+//av_log(NULL, AV_LOG_DEBUG, "SKIP\n");
+ goto resync;
+ }
+
+ if( d[0] >= '0' && d[0] <= '9'
+ && d[1] >= '0' && d[1] <= '9'){
+ n= (d[0] - '0') * 10 + (d[1] - '0');
+ }else{
+ n= 100; //invalid stream id
+ }
+
+ //parse ##dc/##wb
+ if(n < s->nb_streams){
+ AVStream *st;
+ AVIStream *ast;
+ st = s->streams[n];
+ ast = st->priv_data;
+
+ if( (st->discard >= AVDISCARD_DEFAULT && size==0)
+ /*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & PKT_FLAG_KEY))*/ //FIXME needs a little reordering
+ || st->discard >= AVDISCARD_ALL){
+ if(ast->sample_size) ast->frame_offset += pkt->size;
+ else ast->frame_offset++;
+ url_fskip(pb, size);
+ goto resync;
+ }
+
+ if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) ||
+ d[2]*256+d[3] == ast->prefix /*||
+ (d[2] == 'd' && d[3] == 'c') ||
+ (d[2] == 'w' && d[3] == 'b')*/) {
+
+//av_log(NULL, AV_LOG_DEBUG, "OK\n");
+ if(d[2]*256+d[3] == ast->prefix)
+ ast->prefix_count++;
+ else{
+ ast->prefix= d[2]*256+d[3];
+ ast->prefix_count= 0;
+ }
+
+ avi->stream_index= n;
+ ast->packet_size= size + 8;
+ ast->remaining= size;
+ goto resync;
+ }
+ }
+ /* palette changed chunk */
+ if ( d[0] >= '0' && d[0] <= '9'
+ && d[1] >= '0' && d[1] <= '9'
+ && ((d[2] == 'p' && d[3] == 'c'))
+ && n < s->nb_streams && i + size <= avi->movi_end) {
+
+ AVStream *st;
+ int first, clr, flags, k, p;
+
+ st = s->streams[n];
+
+ first = get_byte(pb);
+ clr = get_byte(pb);
+ if(!clr) /* all 256 colors used */
+ clr = 256;
+ flags = get_le16(pb);
+ p = 4;
+ for (k = first; k < clr + first; k++) {
+ int r, g, b;
+ r = get_byte(pb);
+ g = get_byte(pb);
+ b = get_byte(pb);
+ get_byte(pb);
+ st->codec->palctrl->palette[k] = b + (g << 8) + (r << 16);
+ }
+ st->codec->palctrl->palette_changed = 1;
+ goto resync;
+ }
+
+ }
+
+ return -1;
+}
+
+/* XXX: we make the implicit supposition that the position are sorted
+ for each stream */
+static int avi_read_idx1(AVFormatContext *s, int size)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int nb_index_entries, i;
+ AVStream *st;
+ AVIStream *ast;
+ unsigned int index, tag, flags, pos, len;
+ unsigned last_pos= -1;
+
+ nb_index_entries = size / 16;
+ if (nb_index_entries <= 0)
+ return -1;
+
+ /* read the entries and sort them in each stream component */
+ for(i = 0; i < nb_index_entries; i++) {
+ tag = get_le32(pb);
+ flags = get_le32(pb);
+ pos = get_le32(pb);
+ len = get_le32(pb);
+#if defined(DEBUG_SEEK)
+ av_log(NULL, AV_LOG_DEBUG, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/",
+ i, tag, flags, pos, len);
+#endif
+ if(i==0 && pos > avi->movi_list)
+ avi->movi_list= 0; //FIXME better check
+ pos += avi->movi_list;
+
+ index = ((tag & 0xff) - '0') * 10;
+ index += ((tag >> 8) & 0xff) - '0';
+ if (index >= s->nb_streams)
+ continue;
+ st = s->streams[index];
+ ast = st->priv_data;
+
+#if defined(DEBUG_SEEK)
+ av_log(NULL, AV_LOG_DEBUG, "%d cum_len=%"PRId64"\n", len, ast->cum_len);
+#endif
+ if(last_pos == pos)
+ avi->non_interleaved= 1;
+ else
+ av_add_index_entry(st, pos, ast->cum_len / FFMAX(1, ast->sample_size), len, 0, (flags&AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
+ if(ast->sample_size)
+ ast->cum_len += len;
+ else
+ ast->cum_len ++;
+ last_pos= pos;
+ }
+ return 0;
+}
+
+static int guess_ni_flag(AVFormatContext *s){
+ int i;
+ int64_t last_start=0;
+ int64_t first_end= INT64_MAX;
+
+ for(i=0; i<s->nb_streams; i++){
+ AVStream *st = s->streams[i];
+ int n= st->nb_index_entries;
+
+ if(n <= 0)
+ continue;
+
+ if(st->index_entries[0].pos > last_start)
+ last_start= st->index_entries[0].pos;
+ if(st->index_entries[n-1].pos < first_end)
+ first_end= st->index_entries[n-1].pos;
+ }
+ return last_start > first_end;
+}
+
+static int avi_load_index(AVFormatContext *s)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t tag, size;
+ offset_t pos= url_ftell(pb);
+
+ url_fseek(pb, avi->movi_end, SEEK_SET);
+#ifdef DEBUG_SEEK
+ printf("movi_end=0x%"PRIx64"\n", avi->movi_end);
+#endif
+ for(;;) {
+ if (url_feof(pb))
+ break;
+ tag = get_le32(pb);
+ size = get_le32(pb);
+#ifdef DEBUG_SEEK
+ printf("tag=%c%c%c%c size=0x%x\n",
+ tag & 0xff,
+ (tag >> 8) & 0xff,
+ (tag >> 16) & 0xff,
+ (tag >> 24) & 0xff,
+ size);
+#endif
+ switch(tag) {
+ case MKTAG('i', 'd', 'x', '1'):
+ if (avi_read_idx1(s, size) < 0)
+ goto skip;
+ else
+ goto the_end;
+ break;
+ default:
+ skip:
+ size += (size & 1);
+ url_fskip(pb, size);
+ break;
+ }
+ }
+ the_end:
+ url_fseek(pb, pos, SEEK_SET);
+ return 0;
+}
+
+static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+ AVIContext *avi = s->priv_data;
+ AVStream *st;
+ int i, index;
+ int64_t pos;
+
+ if (!avi->index_loaded) {
+ /* we only load the index on demand */
+ avi_load_index(s);
+ avi->index_loaded = 1;
+ }
+ assert(stream_index>= 0);
+
+ st = s->streams[stream_index];
+ index= av_index_search_timestamp(st, timestamp, flags);
+ if(index<0)
+ return -1;
+
+ /* find the position */
+ pos = st->index_entries[index].pos;
+ timestamp = st->index_entries[index].timestamp;
+
+// av_log(NULL, AV_LOG_DEBUG, "XX %"PRId64" %d %"PRId64"\n", timestamp, index, st->index_entries[index].timestamp);
+
+ for(i = 0; i < s->nb_streams; i++) {
+ AVStream *st2 = s->streams[i];
+ AVIStream *ast2 = st2->priv_data;
+
+ ast2->packet_size=
+ ast2->remaining= 0;
+
+ if (st2->nb_index_entries <= 0)
+ continue;
+
+// assert(st2->codec->block_align);
+ assert(st2->time_base.den == ast2->rate);
+ assert(st2->time_base.num == ast2->scale);
+ index = av_index_search_timestamp(
+ st2,
+ av_rescale(timestamp, st2->time_base.den*(int64_t)st->time_base.num, st->time_base.den * (int64_t)st2->time_base.num),
+ flags | AVSEEK_FLAG_BACKWARD);
+ if(index<0)
+ index=0;
+
+ if(!avi->non_interleaved){
+ while(index>0 && st2->index_entries[index].pos > pos)
+ index--;
+ while(index+1 < st2->nb_index_entries && st2->index_entries[index].pos < pos)
+ index++;
+ }
+
+// av_log(NULL, AV_LOG_DEBUG, "%"PRId64" %d %"PRId64"\n", timestamp, index, st2->index_entries[index].timestamp);
+ /* extract the current frame number */
+ ast2->frame_offset = st2->index_entries[index].timestamp;
+ if(ast2->sample_size)
+ ast2->frame_offset *=ast2->sample_size;
+ }
+
+ if (ENABLE_DV_DEMUXER && avi->dv_demux)
+ dv_flush_audio_packets(avi->dv_demux);
+ /* do the seek */
+ url_fseek(&s->pb, pos, SEEK_SET);
+ avi->stream_index= -1;
+ return 0;
+}
+
+static int avi_read_close(AVFormatContext *s)
+{
+ int i;
+ AVIContext *avi = s->priv_data;
+
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st = s->streams[i];
+ AVIStream *ast = st->priv_data;
+ av_free(ast);
+ av_free(st->codec->palctrl);
+ }
+
+ if (avi->dv_demux)
+ av_free(avi->dv_demux);
+
+ return 0;
+}
+
+static int avi_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'R' && p->buf[1] == 'I' &&
+ p->buf[2] == 'F' && p->buf[3] == 'F' &&
+ p->buf[8] == 'A' && p->buf[9] == 'V' &&
+ p->buf[10] == 'I' && p->buf[11] == ' ')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+AVInputFormat avi_demuxer = {
+ "avi",
+ "avi format",
+ sizeof(AVIContext),
+ avi_probe,
+ avi_read_header,
+ avi_read_packet,
+ avi_read_close,
+ avi_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/avienc.c b/contrib/ffmpeg/libavformat/avienc.c
new file mode 100644
index 000000000..296608704
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avienc.c
@@ -0,0 +1,580 @@
+/*
+ * AVI muxer
+ * Copyright (c) 2000 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "avi.h"
+#include "riff.h"
+
+/*
+ * TODO:
+ * - fill all fields if non streamed (nb_frames for example)
+ */
+
+#ifdef CONFIG_AVI_MUXER
+typedef struct AVIIentry {
+ unsigned int flags, pos, len;
+} AVIIentry;
+
+#define AVI_INDEX_CLUSTER_SIZE 16384
+
+typedef struct AVIIndex {
+ offset_t indx_start;
+ int entry;
+ int ents_allocated;
+ AVIIentry** cluster;
+} AVIIndex;
+
+typedef struct {
+ offset_t riff_start, movi_list, odml_list;
+ offset_t frames_hdr_all, frames_hdr_strm[MAX_STREAMS];
+ int audio_strm_length[MAX_STREAMS];
+ int riff_id;
+ int packet_count[MAX_STREAMS];
+
+ AVIIndex indexes[MAX_STREAMS];
+} AVIContext;
+
+static inline AVIIentry* avi_get_ientry(AVIIndex* idx, int ent_id)
+{
+ int cl = ent_id / AVI_INDEX_CLUSTER_SIZE;
+ int id = ent_id % AVI_INDEX_CLUSTER_SIZE;
+ return &idx->cluster[cl][id];
+}
+
+static offset_t avi_start_new_riff(AVIContext *avi, ByteIOContext *pb,
+ const char* riff_tag, const char* list_tag)
+{
+ offset_t loff;
+ int i;
+
+ avi->riff_id++;
+ for (i=0; i<MAX_STREAMS; i++)
+ avi->indexes[i].entry = 0;
+
+ avi->riff_start = start_tag(pb, "RIFF");
+ put_tag(pb, riff_tag);
+ loff = start_tag(pb, "LIST");
+ put_tag(pb, list_tag);
+ return loff;
+}
+
+static char* avi_stream2fourcc(char* tag, int index, enum CodecType type)
+{
+ tag[0] = '0';
+ tag[1] = '0' + index;
+ if (type == CODEC_TYPE_VIDEO) {
+ tag[2] = 'd';
+ tag[3] = 'c';
+ } else {
+ tag[2] = 'w';
+ tag[3] = 'b';
+ }
+ tag[4] = '\0';
+ return tag;
+}
+
+static void avi_write_info_tag(ByteIOContext *pb, const char *tag, const char *str)
+{
+ int len = strlen(str);
+ if (len > 0) {
+ len++;
+ put_tag(pb, tag);
+ put_le32(pb, len);
+ put_strz(pb, str);
+ if (len & 1)
+ put_byte(pb, 0);
+ }
+}
+
+static int avi_write_counters(AVFormatContext* s, int riff_id)
+{
+ ByteIOContext *pb = &s->pb;
+ AVIContext *avi = s->priv_data;
+ int n, au_byterate, au_ssize, au_scale, nb_frames = 0;
+ offset_t file_size;
+ AVCodecContext* stream;
+
+ file_size = url_ftell(pb);
+ for(n = 0; n < s->nb_streams; n++) {
+ assert(avi->frames_hdr_strm[n]);
+ stream = s->streams[n]->codec;
+ url_fseek(pb, avi->frames_hdr_strm[n], SEEK_SET);
+ ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
+ if(au_ssize == 0) {
+ put_le32(pb, avi->packet_count[n]);
+ } else {
+ put_le32(pb, avi->audio_strm_length[n] / au_ssize);
+ }
+ if(stream->codec_type == CODEC_TYPE_VIDEO)
+ nb_frames = FFMAX(nb_frames, avi->packet_count[n]);
+ }
+ if(riff_id == 1) {
+ assert(avi->frames_hdr_all);
+ url_fseek(pb, avi->frames_hdr_all, SEEK_SET);
+ put_le32(pb, nb_frames);
+ }
+ url_fseek(pb, file_size, SEEK_SET);
+
+ return 0;
+}
+
+static int avi_write_header(AVFormatContext *s)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
+ AVCodecContext *stream, *video_enc;
+ offset_t list1, list2, strh, strf;
+
+ /* header list */
+ avi->riff_id = 0;
+ list1 = avi_start_new_riff(avi, pb, "AVI ", "hdrl");
+
+ /* avi header */
+ put_tag(pb, "avih");
+ put_le32(pb, 14 * 4);
+ bitrate = 0;
+
+ video_enc = NULL;
+ for(n=0;n<s->nb_streams;n++) {
+ stream = s->streams[n]->codec;
+ bitrate += stream->bit_rate;
+ if (stream->codec_type == CODEC_TYPE_VIDEO)
+ video_enc = stream;
+ }
+
+ nb_frames = 0;
+
+ if(video_enc){
+ put_le32(pb, (uint32_t)(int64_t_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
+ } else {
+ put_le32(pb, 0);
+ }
+ put_le32(pb, bitrate / 8); /* XXX: not quite exact */
+ put_le32(pb, 0); /* padding */
+ if (url_is_streamed(pb))
+ put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
+ else
+ put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
+ avi->frames_hdr_all = url_ftell(pb); /* remember this offset to fill later */
+ put_le32(pb, nb_frames); /* nb frames, filled later */
+ put_le32(pb, 0); /* initial frame */
+ put_le32(pb, s->nb_streams); /* nb streams */
+ put_le32(pb, 1024 * 1024); /* suggested buffer size */
+ if(video_enc){
+ put_le32(pb, video_enc->width);
+ put_le32(pb, video_enc->height);
+ } else {
+ put_le32(pb, 0);
+ put_le32(pb, 0);
+ }
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+
+ /* stream list */
+ for(i=0;i<n;i++) {
+ list2 = start_tag(pb, "LIST");
+ put_tag(pb, "strl");
+
+ stream = s->streams[i]->codec;
+
+ /* FourCC should really be set by the codec itself */
+ if (! stream->codec_tag) {
+ stream->codec_tag = codec_get_bmp_tag(stream->codec_id);
+ }
+
+ /* stream generic header */
+ strh = start_tag(pb, "strh");
+ switch(stream->codec_type) {
+ case CODEC_TYPE_VIDEO: put_tag(pb, "vids"); break;
+ case CODEC_TYPE_AUDIO: put_tag(pb, "auds"); break;
+// case CODEC_TYPE_TEXT : put_tag(pb, "txts"); break;
+ case CODEC_TYPE_DATA : put_tag(pb, "dats"); break;
+ }
+ if(stream->codec_type == CODEC_TYPE_VIDEO)
+ put_le32(pb, stream->codec_tag);
+ else
+ put_le32(pb, 1);
+ put_le32(pb, 0); /* flags */
+ put_le16(pb, 0); /* priority */
+ put_le16(pb, 0); /* language */
+ put_le32(pb, 0); /* initial frame */
+
+ ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
+
+ put_le32(pb, au_scale); /* scale */
+ put_le32(pb, au_byterate); /* rate */
+ av_set_pts_info(s->streams[i], 64, au_scale, au_byterate);
+
+ put_le32(pb, 0); /* start */
+ avi->frames_hdr_strm[i] = url_ftell(pb); /* remember this offset to fill later */
+ if (url_is_streamed(pb))
+ put_le32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
+ else
+ put_le32(pb, 0); /* length, XXX: filled later */
+
+ /* suggested buffer size */ //FIXME set at the end to largest chunk
+ if(stream->codec_type == CODEC_TYPE_VIDEO)
+ put_le32(pb, 1024 * 1024);
+ else if(stream->codec_type == CODEC_TYPE_AUDIO)
+ put_le32(pb, 12 * 1024);
+ else
+ put_le32(pb, 0);
+ put_le32(pb, -1); /* quality */
+ put_le32(pb, au_ssize); /* sample size */
+ put_le32(pb, 0);
+ put_le16(pb, stream->width);
+ put_le16(pb, stream->height);
+ end_tag(pb, strh);
+
+ if(stream->codec_type != CODEC_TYPE_DATA){
+ strf = start_tag(pb, "strf");
+ switch(stream->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ put_bmp_header(pb, stream, codec_bmp_tags, 0);
+ break;
+ case CODEC_TYPE_AUDIO:
+ if (put_wav_header(pb, stream) < 0) {
+ av_free(avi);
+ return -1;
+ }
+ break;
+ default:
+ return -1;
+ }
+ end_tag(pb, strf);
+ }
+
+ if (!url_is_streamed(pb)) {
+ unsigned char tag[5];
+ int j;
+
+ /* Starting to lay out AVI OpenDML master index.
+ * We want to make it JUNK entry for now, since we'd
+ * like to get away without making AVI an OpenDML one
+ * for compatibility reasons.
+ */
+ avi->indexes[i].entry = avi->indexes[i].ents_allocated = 0;
+ avi->indexes[i].indx_start = start_tag(pb, "JUNK");
+ put_le16(pb, 4); /* wLongsPerEntry */
+ put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
+ put_byte(pb, 0); /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
+ put_le32(pb, 0); /* nEntriesInUse (will fill out later on) */
+ put_tag(pb, avi_stream2fourcc(&tag[0], i, stream->codec_type));
+ /* dwChunkId */
+ put_le64(pb, 0); /* dwReserved[3]
+ put_le32(pb, 0); Must be 0. */
+ for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
+ put_le64(pb, 0);
+ end_tag(pb, avi->indexes[i].indx_start);
+ }
+
+ end_tag(pb, list2);
+ }
+
+ if (!url_is_streamed(pb)) {
+ /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
+ avi->odml_list = start_tag(pb, "JUNK");
+ put_tag(pb, "odml");
+ put_tag(pb, "dmlh");
+ put_le32(pb, 248);
+ for (i = 0; i < 248; i+= 4)
+ put_le32(pb, 0);
+ end_tag(pb, avi->odml_list);
+ }
+
+ end_tag(pb, list1);
+
+ list2 = start_tag(pb, "LIST");
+ put_tag(pb, "INFO");
+ avi_write_info_tag(pb, "INAM", s->title);
+ avi_write_info_tag(pb, "IART", s->author);
+ avi_write_info_tag(pb, "ICOP", s->copyright);
+ avi_write_info_tag(pb, "ICMT", s->comment);
+ avi_write_info_tag(pb, "IPRD", s->album);
+ avi_write_info_tag(pb, "IGNR", s->genre);
+ if (s->track) {
+ char str_track[4];
+ snprintf(str_track, 4, "%d", s->track);
+ avi_write_info_tag(pb, "IPRT", str_track);
+ }
+ if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
+ avi_write_info_tag(pb, "ISFT", LIBAVFORMAT_IDENT);
+ end_tag(pb, list2);
+
+ /* some padding for easier tag editing */
+ list2 = start_tag(pb, "JUNK");
+ for (i = 0; i < 1016; i += 4)
+ put_le32(pb, 0);
+ end_tag(pb, list2);
+
+ avi->movi_list = start_tag(pb, "LIST");
+ put_tag(pb, "movi");
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int avi_write_ix(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ AVIContext *avi = s->priv_data;
+ char tag[5];
+ char ix_tag[] = "ix00";
+ int i, j;
+
+ assert(!url_is_streamed(pb));
+
+ if (avi->riff_id > AVI_MASTER_INDEX_SIZE)
+ return -1;
+
+ for (i=0;i<s->nb_streams;i++) {
+ offset_t ix, pos;
+
+ avi_stream2fourcc(&tag[0], i, s->streams[i]->codec->codec_type);
+ ix_tag[3] = '0' + i;
+
+ /* Writing AVI OpenDML leaf index chunk */
+ ix = url_ftell(pb);
+ put_tag(pb, &ix_tag[0]); /* ix?? */
+ put_le32(pb, avi->indexes[i].entry * 8 + 24);
+ /* chunk size */
+ put_le16(pb, 2); /* wLongsPerEntry */
+ put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
+ put_byte(pb, 1); /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */
+ put_le32(pb, avi->indexes[i].entry);
+ /* nEntriesInUse */
+ put_tag(pb, &tag[0]); /* dwChunkId */
+ put_le64(pb, avi->movi_list);/* qwBaseOffset */
+ put_le32(pb, 0); /* dwReserved_3 (must be 0) */
+
+ for (j=0; j<avi->indexes[i].entry; j++) {
+ AVIIentry* ie = avi_get_ientry(&avi->indexes[i], j);
+ put_le32(pb, ie->pos + 8);
+ put_le32(pb, ((uint32_t)ie->len & ~0x80000000) |
+ (ie->flags & 0x10 ? 0 : 0x80000000));
+ }
+ put_flush_packet(pb);
+ pos = url_ftell(pb);
+
+ /* Updating one entry in the AVI OpenDML master index */
+ url_fseek(pb, avi->indexes[i].indx_start - 8, SEEK_SET);
+ put_tag(pb, "indx"); /* enabling this entry */
+ url_fskip(pb, 8);
+ put_le32(pb, avi->riff_id); /* nEntriesInUse */
+ url_fskip(pb, 16*avi->riff_id);
+ put_le64(pb, ix); /* qwOffset */
+ put_le32(pb, pos - ix); /* dwSize */
+ put_le32(pb, avi->indexes[i].entry); /* dwDuration */
+
+ url_fseek(pb, pos, SEEK_SET);
+ }
+ return 0;
+}
+
+static int avi_write_idx1(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ AVIContext *avi = s->priv_data;
+ offset_t idx_chunk;
+ int i;
+ char tag[5];
+
+ if (!url_is_streamed(pb)) {
+ AVIIentry* ie = 0, *tie;
+ int entry[MAX_STREAMS];
+ int empty, stream_id = -1;
+
+ idx_chunk = start_tag(pb, "idx1");
+ memset(&entry[0], 0, sizeof(entry));
+ do {
+ empty = 1;
+ for (i=0; i<s->nb_streams; i++) {
+ if (avi->indexes[i].entry <= entry[i])
+ continue;
+
+ tie = avi_get_ientry(&avi->indexes[i], entry[i]);
+ if (empty || tie->pos < ie->pos) {
+ ie = tie;
+ stream_id = i;
+ }
+ empty = 0;
+ }
+ if (!empty) {
+ avi_stream2fourcc(&tag[0], stream_id,
+ s->streams[stream_id]->codec->codec_type);
+ put_tag(pb, &tag[0]);
+ put_le32(pb, ie->flags);
+ put_le32(pb, ie->pos);
+ put_le32(pb, ie->len);
+ entry[stream_id]++;
+ }
+ } while (!empty);
+ end_tag(pb, idx_chunk);
+
+ avi_write_counters(s, avi->riff_id);
+ }
+ return 0;
+}
+
+static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned char tag[5];
+ unsigned int flags=0;
+ const int stream_index= pkt->stream_index;
+ AVCodecContext *enc= s->streams[stream_index]->codec;
+ int size= pkt->size;
+
+// av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %d\n", pkt->dts, avi->packet_count[stream_index], stream_index);
+ while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avi->packet_count[stream_index]){
+ AVPacket empty_packet;
+
+ av_init_packet(&empty_packet);
+ empty_packet.size= 0;
+ empty_packet.data= NULL;
+ empty_packet.stream_index= stream_index;
+ avi_write_packet(s, &empty_packet);
+// av_log(s, AV_LOG_DEBUG, "dup %"PRId64" %d\n", pkt->dts, avi->packet_count[stream_index]);
+ }
+ avi->packet_count[stream_index]++;
+
+ // Make sure to put an OpenDML chunk when the file size exceeds the limits
+ if (!url_is_streamed(pb) &&
+ (url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {
+
+ avi_write_ix(s);
+ end_tag(pb, avi->movi_list);
+
+ if (avi->riff_id == 1)
+ avi_write_idx1(s);
+
+ end_tag(pb, avi->riff_start);
+ avi->movi_list = avi_start_new_riff(avi, pb, "AVIX", "movi");
+ }
+
+ avi_stream2fourcc(&tag[0], stream_index, enc->codec_type);
+ if(pkt->flags&PKT_FLAG_KEY)
+ flags = 0x10;
+ if (enc->codec_type == CODEC_TYPE_AUDIO) {
+ avi->audio_strm_length[stream_index] += size;
+ }
+
+ if (!url_is_streamed(&s->pb)) {
+ AVIIndex* idx = &avi->indexes[stream_index];
+ int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
+ int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
+ if (idx->ents_allocated <= idx->entry) {
+ idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*));
+ if (!idx->cluster)
+ return -1;
+ idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
+ if (!idx->cluster[cl])
+ return -1;
+ idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
+ }
+
+ idx->cluster[cl][id].flags = flags;
+ idx->cluster[cl][id].pos = url_ftell(pb) - avi->movi_list;
+ idx->cluster[cl][id].len = size;
+ idx->entry++;
+ }
+
+ put_buffer(pb, tag, 4);
+ put_le32(pb, size);
+ put_buffer(pb, pkt->data, size);
+ if (size & 1)
+ put_byte(pb, 0);
+
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int avi_write_trailer(AVFormatContext *s)
+{
+ AVIContext *avi = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int res = 0;
+ int i, j, n, nb_frames;
+ offset_t file_size;
+
+ if (!url_is_streamed(pb))
+ {
+ if (avi->riff_id == 1) {
+ end_tag(pb, avi->movi_list);
+ res = avi_write_idx1(s);
+ end_tag(pb, avi->riff_start);
+ } else {
+ avi_write_ix(s);
+ end_tag(pb, avi->movi_list);
+ end_tag(pb, avi->riff_start);
+
+ file_size = url_ftell(pb);
+ url_fseek(pb, avi->odml_list - 8, SEEK_SET);
+ put_tag(pb, "LIST"); /* Making this AVI OpenDML one */
+ url_fskip(pb, 16);
+
+ for (n=nb_frames=0;n<s->nb_streams;n++) {
+ AVCodecContext *stream = s->streams[n]->codec;
+ if (stream->codec_type == CODEC_TYPE_VIDEO) {
+ if (nb_frames < avi->packet_count[n])
+ nb_frames = avi->packet_count[n];
+ } else {
+ if (stream->codec_id == CODEC_ID_MP2 || stream->codec_id == CODEC_ID_MP3) {
+ nb_frames += avi->packet_count[n];
+ }
+ }
+ }
+ put_le32(pb, nb_frames);
+ url_fseek(pb, file_size, SEEK_SET);
+
+ avi_write_counters(s, avi->riff_id);
+ }
+ }
+ put_flush_packet(pb);
+
+ for (i=0; i<MAX_STREAMS; i++) {
+ for (j=0; j<avi->indexes[i].ents_allocated/AVI_INDEX_CLUSTER_SIZE; j++)
+ av_free(avi->indexes[i].cluster[j]);
+ av_free(avi->indexes[i].cluster);
+ avi->indexes[i].cluster = NULL;
+ avi->indexes[i].ents_allocated = avi->indexes[i].entry = 0;
+ }
+
+ return res;
+}
+
+AVOutputFormat avi_muxer = {
+ "avi",
+ "avi format",
+ "video/x-msvideo",
+ "avi",
+ sizeof(AVIContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG4,
+ avi_write_header,
+ avi_write_packet,
+ avi_write_trailer,
+};
+#endif //CONFIG_AVI_MUXER
diff --git a/contrib/ffmpeg/libavformat/avio.c b/contrib/ffmpeg/libavformat/avio.c
new file mode 100644
index 000000000..a2b8a8325
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avio.c
@@ -0,0 +1,192 @@
+/*
+ * Unbuffered io for ffmpeg system
+ * Copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static int default_interrupt_cb(void);
+
+URLProtocol *first_protocol = NULL;
+URLInterruptCB *url_interrupt_cb = default_interrupt_cb;
+
+int register_protocol(URLProtocol *protocol)
+{
+ URLProtocol **p;
+ p = &first_protocol;
+ while (*p != NULL) p = &(*p)->next;
+ *p = protocol;
+ protocol->next = NULL;
+ return 0;
+}
+
+int url_open(URLContext **puc, const char *filename, int flags)
+{
+ URLContext *uc;
+ URLProtocol *up;
+ const char *p;
+ char proto_str[128], *q;
+ int err;
+
+ p = filename;
+ q = proto_str;
+ while (*p != '\0' && *p != ':') {
+ /* protocols can only contain alphabetic chars */
+ if (!isalpha(*p))
+ goto file_proto;
+ if ((q - proto_str) < sizeof(proto_str) - 1)
+ *q++ = *p;
+ p++;
+ }
+ /* if the protocol has length 1, we consider it is a dos drive */
+ if (*p == '\0' || (q - proto_str) <= 1) {
+ file_proto:
+ strcpy(proto_str, "file");
+ } else {
+ *q = '\0';
+ }
+
+ up = first_protocol;
+ while (up != NULL) {
+ if (!strcmp(proto_str, up->name))
+ goto found;
+ up = up->next;
+ }
+ err = -ENOENT;
+ goto fail;
+ found:
+ uc = av_malloc(sizeof(URLContext) + strlen(filename));
+ if (!uc) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ strcpy(uc->filename, filename);
+ uc->prot = up;
+ uc->flags = flags;
+ uc->is_streamed = 0; /* default = not streamed */
+ uc->max_packet_size = 0; /* default: stream file */
+ err = up->url_open(uc, filename, flags);
+ if (err < 0) {
+ av_free(uc);
+ *puc = NULL;
+ return err;
+ }
+ *puc = uc;
+ return 0;
+ fail:
+ *puc = NULL;
+ return err;
+}
+
+int url_read(URLContext *h, unsigned char *buf, int size)
+{
+ int ret;
+ if (h->flags & URL_WRONLY)
+ return AVERROR_IO;
+ ret = h->prot->url_read(h, buf, size);
+ return ret;
+}
+
+#if defined(CONFIG_MUXERS) || defined(CONFIG_PROTOCOLS)
+int url_write(URLContext *h, unsigned char *buf, int size)
+{
+ int ret;
+ if (!(h->flags & (URL_WRONLY | URL_RDWR)))
+ return AVERROR_IO;
+ /* avoid sending too big packets */
+ if (h->max_packet_size && size > h->max_packet_size)
+ return AVERROR_IO;
+ ret = h->prot->url_write(h, buf, size);
+ return ret;
+}
+#endif //CONFIG_MUXERS || CONFIG_PROTOCOLS
+
+offset_t url_seek(URLContext *h, offset_t pos, int whence)
+{
+ offset_t ret;
+
+ if (!h->prot->url_seek)
+ return -EPIPE;
+ ret = h->prot->url_seek(h, pos, whence);
+ return ret;
+}
+
+int url_close(URLContext *h)
+{
+ int ret;
+
+ ret = h->prot->url_close(h);
+ av_free(h);
+ return ret;
+}
+
+int url_exist(const char *filename)
+{
+ URLContext *h;
+ if (url_open(&h, filename, URL_RDONLY) < 0)
+ return 0;
+ url_close(h);
+ return 1;
+}
+
+offset_t url_filesize(URLContext *h)
+{
+ offset_t pos, size;
+
+ pos = url_seek(h, 0, SEEK_CUR);
+ size = url_seek(h, -1, SEEK_END)+1;
+ url_seek(h, pos, SEEK_SET);
+ return size;
+}
+
+/*
+ * Return the maximum packet size associated to packetized file
+ * handle. If the file is not packetized (stream like http or file on
+ * disk), then 0 is returned.
+ *
+ * @param h file handle
+ * @return maximum packet size in bytes
+ */
+int url_get_max_packet_size(URLContext *h)
+{
+ return h->max_packet_size;
+}
+
+void url_get_filename(URLContext *h, char *buf, int buf_size)
+{
+ pstrcpy(buf, buf_size, h->filename);
+}
+
+
+static int default_interrupt_cb(void)
+{
+ return 0;
+}
+
+/**
+ * The callback is called in blocking functions to test regulary if
+ * asynchronous interruption is needed. -EINTR is returned in this
+ * case by the interrupted function. 'NULL' means no interrupt
+ * callback is given.
+ */
+void url_set_interrupt_cb(URLInterruptCB *interrupt_cb)
+{
+ if (!interrupt_cb)
+ interrupt_cb = default_interrupt_cb;
+ url_interrupt_cb = interrupt_cb;
+}
diff --git a/contrib/ffmpeg/libavformat/avio.h b/contrib/ffmpeg/libavformat/avio.h
new file mode 100644
index 000000000..f0fd1a85c
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avio.h
@@ -0,0 +1,201 @@
+/*
+ * unbuffered io for ffmpeg system
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef AVIO_H
+#define AVIO_H
+
+/* output byte stream handling */
+
+typedef int64_t offset_t;
+
+/* unbuffered I/O */
+
+struct URLContext {
+ struct URLProtocol *prot;
+ int flags;
+ int is_streamed; /* true if streamed (no seek possible), default = false */
+ int max_packet_size; /* if non zero, the stream is packetized with this max packet size */
+ void *priv_data;
+ char filename[1]; /* specified filename */
+};
+
+typedef struct URLContext URLContext;
+
+typedef struct URLPollEntry {
+ URLContext *handle;
+ int events;
+ int revents;
+} URLPollEntry;
+
+#define URL_RDONLY 0
+#define URL_WRONLY 1
+#define URL_RDWR 2
+
+typedef int URLInterruptCB(void);
+
+int url_open(URLContext **h, const char *filename, int flags);
+int url_read(URLContext *h, unsigned char *buf, int size);
+int url_write(URLContext *h, unsigned char *buf, int size);
+offset_t url_seek(URLContext *h, offset_t pos, int whence);
+int url_close(URLContext *h);
+int url_exist(const char *filename);
+offset_t url_filesize(URLContext *h);
+int url_get_max_packet_size(URLContext *h);
+void url_get_filename(URLContext *h, char *buf, int buf_size);
+
+/* the callback is called in blocking functions to test regulary if
+ asynchronous interruption is needed. -EINTR is returned in this
+ case by the interrupted function. 'NULL' means no interrupt
+ callback is given. */
+void url_set_interrupt_cb(URLInterruptCB *interrupt_cb);
+
+/* not implemented */
+int url_poll(URLPollEntry *poll_table, int n, int timeout);
+
+typedef struct URLProtocol {
+ const char *name;
+ int (*url_open)(URLContext *h, const char *filename, int flags);
+ int (*url_read)(URLContext *h, unsigned char *buf, int size);
+ int (*url_write)(URLContext *h, unsigned char *buf, int size);
+ offset_t (*url_seek)(URLContext *h, offset_t pos, int whence);
+ int (*url_close)(URLContext *h);
+ struct URLProtocol *next;
+} URLProtocol;
+
+extern URLProtocol *first_protocol;
+extern URLInterruptCB *url_interrupt_cb;
+
+int register_protocol(URLProtocol *protocol);
+
+typedef struct {
+ unsigned char *buffer;
+ int buffer_size;
+ unsigned char *buf_ptr, *buf_end;
+ void *opaque;
+ int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);
+ int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);
+ offset_t (*seek)(void *opaque, offset_t offset, int whence);
+ offset_t pos; /* position in the file of the current buffer */
+ int must_flush; /* true if the next seek should flush */
+ int eof_reached; /* true if eof reached */
+ int write_flag; /* true if open for writing */
+ int is_streamed;
+ int max_packet_size;
+ unsigned long checksum;
+ unsigned char *checksum_ptr;
+ unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size);
+ int error; ///< contains the error code or 0 if no error happened
+} ByteIOContext;
+
+int init_put_byte(ByteIOContext *s,
+ unsigned char *buffer,
+ int buffer_size,
+ int write_flag,
+ void *opaque,
+ int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
+ int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
+ offset_t (*seek)(void *opaque, offset_t offset, int whence));
+
+void put_byte(ByteIOContext *s, int b);
+void put_buffer(ByteIOContext *s, const unsigned char *buf, int size);
+void put_le64(ByteIOContext *s, uint64_t val);
+void put_be64(ByteIOContext *s, uint64_t val);
+void put_le32(ByteIOContext *s, unsigned int val);
+void put_be32(ByteIOContext *s, unsigned int val);
+void put_le24(ByteIOContext *s, unsigned int val);
+void put_be24(ByteIOContext *s, unsigned int val);
+void put_le16(ByteIOContext *s, unsigned int val);
+void put_be16(ByteIOContext *s, unsigned int val);
+void put_tag(ByteIOContext *s, const char *tag);
+
+void put_strz(ByteIOContext *s, const char *buf);
+
+offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence);
+void url_fskip(ByteIOContext *s, offset_t offset);
+offset_t url_ftell(ByteIOContext *s);
+offset_t url_fsize(ByteIOContext *s);
+int url_feof(ByteIOContext *s);
+int url_ferror(ByteIOContext *s);
+
+#define URL_EOF (-1)
+int url_fgetc(ByteIOContext *s);
+#ifdef __GNUC__
+int url_fprintf(ByteIOContext *s, const char *fmt, ...) __attribute__ ((__format__ (__printf__, 2, 3)));
+#else
+int url_fprintf(ByteIOContext *s, const char *fmt, ...);
+#endif
+char *url_fgets(ByteIOContext *s, char *buf, int buf_size);
+
+void put_flush_packet(ByteIOContext *s);
+
+int get_buffer(ByteIOContext *s, unsigned char *buf, int size);
+int get_partial_buffer(ByteIOContext *s, unsigned char *buf, int size);
+int get_byte(ByteIOContext *s);
+unsigned int get_le24(ByteIOContext *s);
+unsigned int get_le32(ByteIOContext *s);
+uint64_t get_le64(ByteIOContext *s);
+unsigned int get_le16(ByteIOContext *s);
+
+char *get_strz(ByteIOContext *s, char *buf, int maxlen);
+unsigned int get_be16(ByteIOContext *s);
+unsigned int get_be24(ByteIOContext *s);
+unsigned int get_be32(ByteIOContext *s);
+uint64_t get_be64(ByteIOContext *s);
+
+static inline int url_is_streamed(ByteIOContext *s)
+{
+ return s->is_streamed;
+}
+
+int url_fdopen(ByteIOContext *s, URLContext *h);
+int url_setbufsize(ByteIOContext *s, int buf_size);
+int url_fopen(ByteIOContext *s, const char *filename, int flags);
+int url_fclose(ByteIOContext *s);
+URLContext *url_fileno(ByteIOContext *s);
+int url_fget_max_packet_size(ByteIOContext *s);
+
+int url_open_buf(ByteIOContext *s, uint8_t *buf, int buf_size, int flags);
+int url_close_buf(ByteIOContext *s);
+
+int url_open_dyn_buf(ByteIOContext *s);
+int url_open_dyn_packet_buf(ByteIOContext *s, int max_packet_size);
+int url_close_dyn_buf(ByteIOContext *s, uint8_t **pbuffer);
+
+unsigned long get_checksum(ByteIOContext *s);
+void init_checksum(ByteIOContext *s, unsigned long (*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len), unsigned long checksum);
+
+/* file.c */
+extern URLProtocol file_protocol;
+extern URLProtocol pipe_protocol;
+
+/* udp.c */
+extern URLProtocol udp_protocol;
+int udp_set_remote_url(URLContext *h, const char *uri);
+int udp_get_local_port(URLContext *h);
+int udp_get_file_handle(URLContext *h);
+
+/* tcp.c */
+extern URLProtocol tcp_protocol;
+
+/* http.c */
+extern URLProtocol http_protocol;
+
+#endif
+
diff --git a/contrib/ffmpeg/libavformat/aviobuf.c b/contrib/ffmpeg/libavformat/aviobuf.c
new file mode 100644
index 000000000..866641ad0
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/aviobuf.c
@@ -0,0 +1,790 @@
+/*
+ * Buffered I/O for ffmpeg system
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "avio.h"
+#include <stdarg.h>
+
+#define IO_BUFFER_SIZE 32768
+
+static void fill_buffer(ByteIOContext *s);
+
+int init_put_byte(ByteIOContext *s,
+ unsigned char *buffer,
+ int buffer_size,
+ int write_flag,
+ void *opaque,
+ int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
+ int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
+ offset_t (*seek)(void *opaque, offset_t offset, int whence))
+{
+ s->buffer = buffer;
+ s->buffer_size = buffer_size;
+ s->buf_ptr = buffer;
+ s->write_flag = write_flag;
+ if (!s->write_flag)
+ s->buf_end = buffer;
+ else
+ s->buf_end = buffer + buffer_size;
+ s->opaque = opaque;
+ s->write_packet = write_packet;
+ s->read_packet = read_packet;
+ s->seek = seek;
+ s->pos = 0;
+ s->must_flush = 0;
+ s->eof_reached = 0;
+ s->error = 0;
+ s->is_streamed = 0;
+ s->max_packet_size = 0;
+ s->update_checksum= NULL;
+ if(!read_packet && !write_flag){
+ s->pos = buffer_size;
+ s->buf_end = s->buffer + buffer_size;
+ }
+ return 0;
+}
+
+static void flush_buffer(ByteIOContext *s)
+{
+ if (s->buf_ptr > s->buffer) {
+ if (s->write_packet && !s->error){
+ int ret= s->write_packet(s->opaque, s->buffer, s->buf_ptr - s->buffer);
+ if(ret < 0){
+ s->error = ret;
+ }
+ }
+ if(s->update_checksum){
+ s->checksum= s->update_checksum(s->checksum, s->checksum_ptr, s->buf_ptr - s->checksum_ptr);
+ s->checksum_ptr= s->buffer;
+ }
+ s->pos += s->buf_ptr - s->buffer;
+ }
+ s->buf_ptr = s->buffer;
+}
+
+void put_byte(ByteIOContext *s, int b)
+{
+ *(s->buf_ptr)++ = b;
+ if (s->buf_ptr >= s->buf_end)
+ flush_buffer(s);
+}
+
+void put_buffer(ByteIOContext *s, const unsigned char *buf, int size)
+{
+ int len;
+
+ while (size > 0) {
+ len = (s->buf_end - s->buf_ptr);
+ if (len > size)
+ len = size;
+ memcpy(s->buf_ptr, buf, len);
+ s->buf_ptr += len;
+
+ if (s->buf_ptr >= s->buf_end)
+ flush_buffer(s);
+
+ buf += len;
+ size -= len;
+ }
+}
+
+void put_flush_packet(ByteIOContext *s)
+{
+ flush_buffer(s);
+ s->must_flush = 0;
+}
+
+offset_t url_fseek(ByteIOContext *s, offset_t offset, int whence)
+{
+ offset_t offset1;
+ offset_t pos= s->pos - (s->write_flag ? 0 : (s->buf_end - s->buffer));
+
+ if (whence != SEEK_CUR && whence != SEEK_SET)
+ return -EINVAL;
+
+ if (whence == SEEK_CUR) {
+ offset1 = pos + (s->buf_ptr - s->buffer);
+ if (offset == 0)
+ return offset1;
+ offset += offset1;
+ }
+ offset1 = offset - pos;
+ if (!s->must_flush &&
+ offset1 >= 0 && offset1 < (s->buf_end - s->buffer)) {
+ /* can do the seek inside the buffer */
+ s->buf_ptr = s->buffer + offset1;
+ } else if(s->is_streamed && !s->write_flag &&
+ offset1 >= 0 && offset1 < (s->buf_end - s->buffer) + (1<<16)){
+ while(s->pos < offset && !s->eof_reached)
+ fill_buffer(s);
+ s->buf_ptr = s->buf_end + offset - s->pos;
+ } else {
+#if defined(CONFIG_MUXERS) || defined(CONFIG_NETWORK)
+ if (s->write_flag) {
+ flush_buffer(s);
+ s->must_flush = 1;
+ } else
+#endif /* defined(CONFIG_MUXERS) || defined(CONFIG_NETWORK) */
+ {
+ s->buf_end = s->buffer;
+ }
+ s->buf_ptr = s->buffer;
+ if (!s->seek || s->seek(s->opaque, offset, SEEK_SET) == (offset_t)-EPIPE)
+ return -EPIPE;
+ s->pos = offset;
+ }
+ s->eof_reached = 0;
+ return offset;
+}
+
+void url_fskip(ByteIOContext *s, offset_t offset)
+{
+ url_fseek(s, offset, SEEK_CUR);
+}
+
+offset_t url_ftell(ByteIOContext *s)
+{
+ return url_fseek(s, 0, SEEK_CUR);
+}
+
+offset_t url_fsize(ByteIOContext *s)
+{
+ offset_t size;
+
+ if (!s->seek)
+ return -EPIPE;
+ size = s->seek(s->opaque, -1, SEEK_END) + 1;
+ s->seek(s->opaque, s->pos, SEEK_SET);
+ return size;
+}
+
+int url_feof(ByteIOContext *s)
+{
+ return s->eof_reached;
+}
+
+int url_ferror(ByteIOContext *s)
+{
+ return s->error;
+}
+
+#if defined(CONFIG_MUXERS) || defined(CONFIG_PROTOCOLS)
+void put_le32(ByteIOContext *s, unsigned int val)
+{
+ put_byte(s, val);
+ put_byte(s, val >> 8);
+ put_byte(s, val >> 16);
+ put_byte(s, val >> 24);
+}
+
+void put_be32(ByteIOContext *s, unsigned int val)
+{
+ put_byte(s, val >> 24);
+ put_byte(s, val >> 16);
+ put_byte(s, val >> 8);
+ put_byte(s, val);
+}
+
+void put_strz(ByteIOContext *s, const char *str)
+{
+ if (str)
+ put_buffer(s, (const unsigned char *) str, strlen(str) + 1);
+ else
+ put_byte(s, 0);
+}
+
+void put_le64(ByteIOContext *s, uint64_t val)
+{
+ put_le32(s, (uint32_t)(val & 0xffffffff));
+ put_le32(s, (uint32_t)(val >> 32));
+}
+
+void put_be64(ByteIOContext *s, uint64_t val)
+{
+ put_be32(s, (uint32_t)(val >> 32));
+ put_be32(s, (uint32_t)(val & 0xffffffff));
+}
+
+void put_le16(ByteIOContext *s, unsigned int val)
+{
+ put_byte(s, val);
+ put_byte(s, val >> 8);
+}
+
+void put_be16(ByteIOContext *s, unsigned int val)
+{
+ put_byte(s, val >> 8);
+ put_byte(s, val);
+}
+
+void put_le24(ByteIOContext *s, unsigned int val)
+{
+ put_le16(s, val & 0xffff);
+ put_byte(s, val >> 16);
+}
+
+void put_be24(ByteIOContext *s, unsigned int val)
+{
+ put_be16(s, val >> 8);
+ put_byte(s, val);
+}
+
+void put_tag(ByteIOContext *s, const char *tag)
+{
+ while (*tag) {
+ put_byte(s, *tag++);
+ }
+}
+#endif //CONFIG_MUXERS || CONFIG_PROTOCOLS
+
+/* Input stream */
+
+static void fill_buffer(ByteIOContext *s)
+{
+ int len;
+
+ /* no need to do anything if EOF already reached */
+ if (s->eof_reached)
+ return;
+
+ if(s->update_checksum){
+ if(s->buf_end > s->checksum_ptr)
+ s->checksum= s->update_checksum(s->checksum, s->checksum_ptr, s->buf_end - s->checksum_ptr);
+ s->checksum_ptr= s->buffer;
+ }
+
+ len = s->read_packet(s->opaque, s->buffer, s->buffer_size);
+ if (len <= 0) {
+ /* do not modify buffer if EOF reached so that a seek back can
+ be done without rereading data */
+ s->eof_reached = 1;
+ if(len<0)
+ s->error= len;
+ } else {
+ s->pos += len;
+ s->buf_ptr = s->buffer;
+ s->buf_end = s->buffer + len;
+ }
+}
+
+unsigned long get_checksum(ByteIOContext *s){
+ s->checksum= s->update_checksum(s->checksum, s->checksum_ptr, s->buf_ptr - s->checksum_ptr);
+ s->update_checksum= NULL;
+ return s->checksum;
+}
+
+void init_checksum(ByteIOContext *s, unsigned long (*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len), unsigned long checksum){
+ s->update_checksum= update_checksum;
+ if(s->update_checksum){
+ s->checksum= checksum;
+ s->checksum_ptr= s->buf_ptr;
+ }
+}
+
+/* NOTE: return 0 if EOF, so you cannot use it if EOF handling is
+ necessary */
+/* XXX: put an inline version */
+int get_byte(ByteIOContext *s)
+{
+ if (s->buf_ptr < s->buf_end) {
+ return *s->buf_ptr++;
+ } else {
+ fill_buffer(s);
+ if (s->buf_ptr < s->buf_end)
+ return *s->buf_ptr++;
+ else
+ return 0;
+ }
+}
+
+/* NOTE: return URL_EOF (-1) if EOF */
+int url_fgetc(ByteIOContext *s)
+{
+ if (s->buf_ptr < s->buf_end) {
+ return *s->buf_ptr++;
+ } else {
+ fill_buffer(s);
+ if (s->buf_ptr < s->buf_end)
+ return *s->buf_ptr++;
+ else
+ return URL_EOF;
+ }
+}
+
+int get_buffer(ByteIOContext *s, unsigned char *buf, int size)
+{
+ int len, size1;
+
+ size1 = size;
+ while (size > 0) {
+ len = s->buf_end - s->buf_ptr;
+ if (len > size)
+ len = size;
+ if (len == 0) {
+ if(size > s->buffer_size && !s->update_checksum){
+ len = s->read_packet(s->opaque, buf, size);
+ if (len <= 0) {
+ /* do not modify buffer if EOF reached so that a seek back can
+ be done without rereading data */
+ s->eof_reached = 1;
+ if(len<0)
+ s->error= len;
+ break;
+ } else {
+ s->pos += len;
+ size -= len;
+ buf += len;
+ s->buf_ptr = s->buffer;
+ s->buf_end = s->buffer/* + len*/;
+ }
+ }else{
+ fill_buffer(s);
+ len = s->buf_end - s->buf_ptr;
+ if (len == 0)
+ break;
+ }
+ } else {
+ memcpy(buf, s->buf_ptr, len);
+ buf += len;
+ s->buf_ptr += len;
+ size -= len;
+ }
+ }
+ return size1 - size;
+}
+
+int get_partial_buffer(ByteIOContext *s, unsigned char *buf, int size)
+{
+ int len;
+
+ if(size<0)
+ return -1;
+
+ len = s->buf_end - s->buf_ptr;
+ if (len == 0) {
+ fill_buffer(s);
+ len = s->buf_end - s->buf_ptr;
+ }
+ if (len > size)
+ len = size;
+ memcpy(buf, s->buf_ptr, len);
+ s->buf_ptr += len;
+ return len;
+}
+
+unsigned int get_le16(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_byte(s);
+ val |= get_byte(s) << 8;
+ return val;
+}
+
+unsigned int get_le24(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_le16(s);
+ val |= get_byte(s) << 16;
+ return val;
+}
+
+unsigned int get_le32(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_le16(s);
+ val |= get_le16(s) << 16;
+ return val;
+}
+
+uint64_t get_le64(ByteIOContext *s)
+{
+ uint64_t val;
+ val = (uint64_t)get_le32(s);
+ val |= (uint64_t)get_le32(s) << 32;
+ return val;
+}
+
+unsigned int get_be16(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_byte(s) << 8;
+ val |= get_byte(s);
+ return val;
+}
+
+unsigned int get_be24(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_be16(s) << 8;
+ val |= get_byte(s);
+ return val;
+}
+unsigned int get_be32(ByteIOContext *s)
+{
+ unsigned int val;
+ val = get_be16(s) << 16;
+ val |= get_be16(s);
+ return val;
+}
+
+char *get_strz(ByteIOContext *s, char *buf, int maxlen)
+{
+ int i = 0;
+ char c;
+
+ while ((c = get_byte(s))) {
+ if (i < maxlen-1)
+ buf[i++] = c;
+ }
+
+ buf[i] = 0; /* Ensure null terminated, but may be truncated */
+
+ return buf;
+}
+
+uint64_t get_be64(ByteIOContext *s)
+{
+ uint64_t val;
+ val = (uint64_t)get_be32(s) << 32;
+ val |= (uint64_t)get_be32(s);
+ return val;
+}
+
+/* link with avio functions */
+
+#ifdef CONFIG_MUXERS
+static int url_write_packet(void *opaque, uint8_t *buf, int buf_size)
+{
+ URLContext *h = opaque;
+ return url_write(h, buf, buf_size);
+}
+#else
+#define url_write_packet NULL
+#endif //CONFIG_MUXERS
+
+static int url_read_packet(void *opaque, uint8_t *buf, int buf_size)
+{
+ URLContext *h = opaque;
+ return url_read(h, buf, buf_size);
+}
+
+static offset_t url_seek_packet(void *opaque, offset_t offset, int whence)
+{
+ URLContext *h = opaque;
+ return url_seek(h, offset, whence);
+ //return 0;
+}
+
+int url_fdopen(ByteIOContext *s, URLContext *h)
+{
+ uint8_t *buffer;
+ int buffer_size, max_packet_size;
+
+
+ max_packet_size = url_get_max_packet_size(h);
+ if (max_packet_size) {
+ buffer_size = max_packet_size; /* no need to bufferize more than one packet */
+ } else {
+ buffer_size = IO_BUFFER_SIZE;
+ }
+ buffer = av_malloc(buffer_size);
+ if (!buffer)
+ return -ENOMEM;
+
+ if (init_put_byte(s, buffer, buffer_size,
+ (h->flags & URL_WRONLY || h->flags & URL_RDWR), h,
+ url_read_packet, url_write_packet, url_seek_packet) < 0) {
+ av_free(buffer);
+ return AVERROR_IO;
+ }
+ s->is_streamed = h->is_streamed;
+ s->max_packet_size = max_packet_size;
+ return 0;
+}
+
+/* XXX: must be called before any I/O */
+int url_setbufsize(ByteIOContext *s, int buf_size)
+{
+ uint8_t *buffer;
+ buffer = av_malloc(buf_size);
+ if (!buffer)
+ return -ENOMEM;
+
+ av_free(s->buffer);
+ s->buffer = buffer;
+ s->buffer_size = buf_size;
+ s->buf_ptr = buffer;
+ if (!s->write_flag)
+ s->buf_end = buffer;
+ else
+ s->buf_end = buffer + buf_size;
+ return 0;
+}
+
+/* NOTE: when opened as read/write, the buffers are only used for
+ reading */
+int url_fopen(ByteIOContext *s, const char *filename, int flags)
+{
+ URLContext *h;
+ int err;
+
+ err = url_open(&h, filename, flags);
+ if (err < 0)
+ return err;
+ err = url_fdopen(s, h);
+ if (err < 0) {
+ url_close(h);
+ return err;
+ }
+ return 0;
+}
+
+int url_fclose(ByteIOContext *s)
+{
+ URLContext *h = s->opaque;
+
+ av_free(s->buffer);
+ memset(s, 0, sizeof(ByteIOContext));
+ return url_close(h);
+}
+
+URLContext *url_fileno(ByteIOContext *s)
+{
+ return s->opaque;
+}
+
+#ifdef CONFIG_MUXERS
+/* XXX: currently size is limited */
+int url_fprintf(ByteIOContext *s, const char *fmt, ...)
+{
+ va_list ap;
+ char buf[4096];
+ int ret;
+
+ va_start(ap, fmt);
+ ret = vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ put_buffer(s, buf, strlen(buf));
+ return ret;
+}
+#endif //CONFIG_MUXERS
+
+/* note: unlike fgets, the EOL character is not returned and a whole
+ line is parsed. return NULL if first char read was EOF */
+char *url_fgets(ByteIOContext *s, char *buf, int buf_size)
+{
+ int c;
+ char *q;
+
+ c = url_fgetc(s);
+ if (c == EOF)
+ return NULL;
+ q = buf;
+ for(;;) {
+ if (c == EOF || c == '\n')
+ break;
+ if ((q - buf) < buf_size - 1)
+ *q++ = c;
+ c = url_fgetc(s);
+ }
+ if (buf_size > 0)
+ *q = '\0';
+ return buf;
+}
+
+/*
+ * Return the maximum packet size associated to packetized buffered file
+ * handle. If the file is not packetized (stream like http or file on
+ * disk), then 0 is returned.
+ *
+ * @param h buffered file handle
+ * @return maximum packet size in bytes
+ */
+int url_fget_max_packet_size(ByteIOContext *s)
+{
+ return s->max_packet_size;
+}
+
+/* url_open_dyn_buf and url_close_dyn_buf are used in rtp.c to send a response
+ * back to the server even if CONFIG_MUXERS is not set. */
+#if defined(CONFIG_MUXERS) || defined(CONFIG_NETWORK)
+/* buffer handling */
+int url_open_buf(ByteIOContext *s, uint8_t *buf, int buf_size, int flags)
+{
+ return init_put_byte(s, buf, buf_size,
+ (flags & URL_WRONLY || flags & URL_RDWR),
+ NULL, NULL, NULL, NULL);
+}
+
+/* return the written or read size */
+int url_close_buf(ByteIOContext *s)
+{
+ put_flush_packet(s);
+ return s->buf_ptr - s->buffer;
+}
+
+/* output in a dynamic buffer */
+
+typedef struct DynBuffer {
+ int pos, size, allocated_size;
+ uint8_t *buffer;
+ int io_buffer_size;
+ uint8_t io_buffer[1];
+} DynBuffer;
+
+static int dyn_buf_write(void *opaque, uint8_t *buf, int buf_size)
+{
+ DynBuffer *d = opaque;
+ int new_size, new_allocated_size;
+
+ /* reallocate buffer if needed */
+ new_size = d->pos + buf_size;
+ new_allocated_size = d->allocated_size;
+ if(new_size < d->pos || new_size > INT_MAX/2)
+ return -1;
+ while (new_size > new_allocated_size) {
+ if (!new_allocated_size)
+ new_allocated_size = new_size;
+ else
+ new_allocated_size += new_allocated_size / 2 + 1;
+ }
+
+ if (new_allocated_size > d->allocated_size) {
+ d->buffer = av_realloc(d->buffer, new_allocated_size);
+ if(d->buffer == NULL)
+ return -1234;
+ d->allocated_size = new_allocated_size;
+ }
+ memcpy(d->buffer + d->pos, buf, buf_size);
+ d->pos = new_size;
+ if (d->pos > d->size)
+ d->size = d->pos;
+ return buf_size;
+}
+
+static int dyn_packet_buf_write(void *opaque, uint8_t *buf, int buf_size)
+{
+ unsigned char buf1[4];
+ int ret;
+
+ /* packetized write: output the header */
+ buf1[0] = (buf_size >> 24);
+ buf1[1] = (buf_size >> 16);
+ buf1[2] = (buf_size >> 8);
+ buf1[3] = (buf_size);
+ ret= dyn_buf_write(opaque, buf1, 4);
+ if(ret < 0)
+ return ret;
+
+ /* then the data */
+ return dyn_buf_write(opaque, buf, buf_size);
+}
+
+static offset_t dyn_buf_seek(void *opaque, offset_t offset, int whence)
+{
+ DynBuffer *d = opaque;
+
+ if (whence == SEEK_CUR)
+ offset += d->pos;
+ else if (whence == SEEK_END)
+ offset += d->size;
+ if (offset < 0 || offset > 0x7fffffffLL)
+ return -1;
+ d->pos = offset;
+ return 0;
+}
+
+static int url_open_dyn_buf_internal(ByteIOContext *s, int max_packet_size)
+{
+ DynBuffer *d;
+ int io_buffer_size, ret;
+
+ if (max_packet_size)
+ io_buffer_size = max_packet_size;
+ else
+ io_buffer_size = 1024;
+
+ if(sizeof(DynBuffer) + io_buffer_size < io_buffer_size)
+ return -1;
+ d = av_malloc(sizeof(DynBuffer) + io_buffer_size);
+ if (!d)
+ return -1;
+ d->io_buffer_size = io_buffer_size;
+ d->buffer = NULL;
+ d->pos = 0;
+ d->size = 0;
+ d->allocated_size = 0;
+ ret = init_put_byte(s, d->io_buffer, io_buffer_size,
+ 1, d, NULL,
+ max_packet_size ? dyn_packet_buf_write : dyn_buf_write,
+ max_packet_size ? NULL : dyn_buf_seek);
+ if (ret == 0) {
+ s->max_packet_size = max_packet_size;
+ }
+ return ret;
+}
+
+/*
+ * Open a write only memory stream.
+ *
+ * @param s new IO context
+ * @return zero if no error.
+ */
+int url_open_dyn_buf(ByteIOContext *s)
+{
+ return url_open_dyn_buf_internal(s, 0);
+}
+
+/*
+ * Open a write only packetized memory stream with a maximum packet
+ * size of 'max_packet_size'. The stream is stored in a memory buffer
+ * with a big endian 4 byte header giving the packet size in bytes.
+ *
+ * @param s new IO context
+ * @param max_packet_size maximum packet size (must be > 0)
+ * @return zero if no error.
+ */
+int url_open_dyn_packet_buf(ByteIOContext *s, int max_packet_size)
+{
+ if (max_packet_size <= 0)
+ return -1;
+ return url_open_dyn_buf_internal(s, max_packet_size);
+}
+
+/*
+ * Return the written size and a pointer to the buffer. The buffer
+ * must be freed with av_free().
+ * @param s IO context
+ * @param pointer to a byte buffer
+ * @return the length of the byte buffer
+ */
+int url_close_dyn_buf(ByteIOContext *s, uint8_t **pbuffer)
+{
+ DynBuffer *d = s->opaque;
+ int size;
+
+ put_flush_packet(s);
+
+ *pbuffer = d->buffer;
+ size = d->size;
+ av_free(d);
+ return size;
+}
+#endif /* CONFIG_MUXERS || CONFIG_NETWORK */
diff --git a/contrib/ffmpeg/libavformat/avisynth.c b/contrib/ffmpeg/libavformat/avisynth.c
new file mode 100644
index 000000000..1afcdea5e
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avisynth.c
@@ -0,0 +1,222 @@
+/*
+ * AVISynth support for ffmpeg system
+ * Copyright (c) 2006 DivX, Inc.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "riff.h"
+
+#include <windows.h>
+#include <vfw.h>
+
+typedef struct {
+ PAVISTREAM handle;
+ AVISTREAMINFO info;
+ DWORD read;
+ LONG chunck_size;
+ LONG chunck_samples;
+} AVISynthStream;
+
+typedef struct {
+ PAVIFILE file;
+ AVISynthStream *streams;
+ int nb_streams;
+ int next_stream;
+} AVISynthContext;
+
+static int avisynth_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ AVISynthContext *avs = s->priv_data;
+ HRESULT res;
+ AVIFILEINFO info;
+ DWORD id;
+ AVStream *st;
+ AVISynthStream *stream;
+
+ AVIFileInit();
+
+ res = AVIFileOpen(&avs->file, s->filename, OF_READ|OF_SHARE_DENY_WRITE, NULL);
+ if (res != S_OK)
+ {
+ av_log(s, AV_LOG_ERROR, "AVIFileOpen failed with error %ld", res);
+ AVIFileExit();
+ return -1;
+ }
+
+ res = AVIFileInfo(avs->file, &info, sizeof(info));
+ if (res != S_OK)
+ {
+ av_log(s, AV_LOG_ERROR, "AVIFileInfo failed with error %ld", res);
+ AVIFileExit();
+ return -1;
+ }
+
+ avs->streams = av_mallocz(info.dwStreams * sizeof(AVISynthStream));
+
+ for (id=0; id<info.dwStreams; id++)
+ {
+ stream = &avs->streams[id];
+ stream->read = 0;
+ if (AVIFileGetStream(avs->file, &stream->handle, 0, id) == S_OK)
+ {
+ if (AVIStreamInfo(stream->handle, &stream->info, sizeof(stream->info)) == S_OK)
+ {
+ if (stream->info.fccType == streamtypeAUDIO)
+ {
+ WAVEFORMATEX wvfmt;
+ LONG struct_size = sizeof(WAVEFORMATEX);
+ if (AVIStreamReadFormat(stream->handle, 0, &wvfmt, &struct_size) != S_OK)
+ continue;
+
+ st = av_new_stream(s, id);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+
+ st->codec->block_align = wvfmt.nBlockAlign;
+ st->codec->channels = wvfmt.nChannels;
+ st->codec->sample_rate = wvfmt.nSamplesPerSec;
+ st->codec->bit_rate = wvfmt.nAvgBytesPerSec * 8;
+ st->codec->bits_per_sample = wvfmt.wBitsPerSample;
+
+ stream->chunck_samples = wvfmt.nSamplesPerSec * (uint64_t)info.dwScale / (uint64_t)info.dwRate;
+ stream->chunck_size = stream->chunck_samples * wvfmt.nChannels * wvfmt.wBitsPerSample / 8;
+
+ st->codec->codec_tag = wvfmt.wFormatTag;
+ st->codec->codec_id = wav_codec_get_id(wvfmt.wFormatTag, st->codec->bits_per_sample);
+ }
+ else if (stream->info.fccType == streamtypeVIDEO)
+ {
+ BITMAPINFO imgfmt;
+ LONG struct_size = sizeof(BITMAPINFO);
+
+ stream->chunck_size = stream->info.dwSampleSize;
+ stream->chunck_samples = 1;
+
+ if (AVIStreamReadFormat(stream->handle, 0, &imgfmt, &struct_size) != S_OK)
+ continue;
+
+ st = av_new_stream(s, id);
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->r_frame_rate.num = stream->info.dwRate;
+ st->r_frame_rate.den = stream->info.dwScale;
+
+ st->codec->width = imgfmt.bmiHeader.biWidth;
+ st->codec->height = imgfmt.bmiHeader.biHeight;
+
+ st->codec->bits_per_sample = imgfmt.bmiHeader.biBitCount;
+ st->codec->bit_rate = (uint64_t)stream->info.dwSampleSize * (uint64_t)stream->info.dwRate * 8 / (uint64_t)stream->info.dwScale;
+ st->codec->codec_tag = imgfmt.bmiHeader.biCompression;
+ st->codec->codec_id = codec_get_id(codec_bmp_tags, imgfmt.bmiHeader.biCompression);
+
+ st->duration = stream->info.dwLength;
+ }
+ else
+ {
+ AVIStreamRelease(stream->handle);
+ continue;
+ }
+
+ avs->nb_streams++;
+
+ st->codec->stream_codec_tag = stream->info.fccHandler;
+
+ av_set_pts_info(st, 64, info.dwScale, info.dwRate);
+ st->start_time = stream->info.dwStart;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVISynthContext *avs = s->priv_data;
+ HRESULT res;
+ AVISynthStream *stream;
+ int stream_id = avs->next_stream;
+ LONG read_size;
+
+ // handle interleaving manually...
+ stream = &avs->streams[stream_id];
+
+ if (stream->read >= stream->info.dwLength)
+ return AVERROR_IO;
+
+ if (av_new_packet(pkt, stream->chunck_size))
+ return AVERROR_IO;
+ pkt->stream_index = stream_id;
+ pkt->pts = avs->streams[stream_id].read / avs->streams[stream_id].chunck_samples;
+
+ res = AVIStreamRead(stream->handle, stream->read, stream->chunck_samples, pkt->data, stream->chunck_size, &read_size, NULL);
+
+ pkt->pts = stream->read;
+ pkt->size = read_size;
+
+ stream->read += stream->chunck_samples;
+
+ // prepare for the next stream to read
+ do {
+ avs->next_stream = (avs->next_stream+1) % avs->nb_streams;
+ } while (avs->next_stream != stream_id && s->streams[avs->next_stream]->discard >= AVDISCARD_ALL);
+
+ return (res == S_OK) ? pkt->size : -1;
+}
+
+static int avisynth_read_close(AVFormatContext *s)
+{
+ AVISynthContext *avs = s->priv_data;
+ int i;
+
+ for (i=0;i<avs->nb_streams;i++)
+ {
+ AVIStreamRelease(avs->streams[i].handle);
+ }
+
+ av_free(avs->streams);
+ AVIFileRelease(avs->file);
+ AVIFileExit();
+ return 0;
+}
+
+static int avisynth_read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags)
+{
+ AVISynthContext *avs = s->priv_data;
+ int stream_id;
+
+ for (stream_id = 0; stream_id < avs->nb_streams; stream_id++)
+ {
+ avs->streams[stream_id].read = pts * avs->streams[stream_id].chunck_samples;
+ }
+
+ return 0;
+}
+
+AVInputFormat avisynth_demuxer = {
+ "avs",
+ "AVISynth",
+ sizeof(AVISynthContext),
+ NULL,
+ avisynth_read_header,
+ avisynth_read_packet,
+ avisynth_read_close,
+ avisynth_read_seek,
+ NULL,
+ 0,
+ "avs",
+};
diff --git a/contrib/ffmpeg/libavformat/avs.c b/contrib/ffmpeg/libavformat/avs.c
new file mode 100644
index 000000000..0fa77deff
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/avs.c
@@ -0,0 +1,227 @@
+/*
+ * AVS demuxer.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "voc.h"
+
+
+typedef struct avs_format {
+ voc_dec_context_t voc;
+ AVStream *st_video;
+ AVStream *st_audio;
+ int width;
+ int height;
+ int bits_per_sample;
+ int fps;
+ int nb_frames;
+ int remaining_frame_size;
+ int remaining_audio_size;
+} avs_format_t;
+
+typedef enum avs_block_type {
+ AVS_VIDEO = 0x01,
+ AVS_AUDIO = 0x02,
+ AVS_PALETTE = 0x03,
+ AVS_GAME_DATA = 0x04,
+} avs_block_type_t;
+
+static int avs_probe(AVProbeData * p)
+{
+ const uint8_t *d;
+
+ if (p->buf_size < 2)
+ return 0;
+ d = p->buf;
+ if (d[0] == 'w' && d[1] == 'W' && d[2] == 0x10 && d[3] == 0)
+ return 50;
+
+ return 0;
+}
+
+static int avs_read_header(AVFormatContext * s, AVFormatParameters * ap)
+{
+ avs_format_t *avs = s->priv_data;
+
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+
+ url_fskip(&s->pb, 4);
+ avs->width = get_le16(&s->pb);
+ avs->height = get_le16(&s->pb);
+ avs->bits_per_sample = get_le16(&s->pb);
+ avs->fps = get_le16(&s->pb);
+ avs->nb_frames = get_le32(&s->pb);
+ avs->remaining_frame_size = 0;
+ avs->remaining_audio_size = 0;
+
+ avs->st_video = avs->st_audio = NULL;
+
+ if (avs->width != 318 || avs->height != 198)
+ av_log(s, AV_LOG_ERROR, "This avs pretend to be %dx%d "
+ "when the avs format is supposed to be 318x198 only.\n",
+ avs->width, avs->height);
+
+ return 0;
+}
+
+static int
+avs_read_video_packet(AVFormatContext * s, AVPacket * pkt,
+ avs_block_type_t type, int sub_type, int size,
+ uint8_t * palette, int palette_size)
+{
+ avs_format_t *avs = s->priv_data;
+ int ret;
+
+ ret = av_new_packet(pkt, size + palette_size);
+ if (ret < 0)
+ return ret;
+
+ if (palette_size) {
+ pkt->data[0] = 0x00;
+ pkt->data[1] = 0x03;
+ pkt->data[2] = palette_size & 0xFF;
+ pkt->data[3] = (palette_size >> 8) & 0xFF;
+ memcpy(pkt->data + 4, palette, palette_size - 4);
+ }
+
+ pkt->data[palette_size + 0] = sub_type;
+ pkt->data[palette_size + 1] = type;
+ pkt->data[palette_size + 2] = size & 0xFF;
+ pkt->data[palette_size + 3] = (size >> 8) & 0xFF;
+ ret = get_buffer(&s->pb, pkt->data + palette_size + 4, size - 4) + 4;
+ if (ret < size) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+
+ pkt->size = ret + palette_size;
+ pkt->stream_index = avs->st_video->index;
+ if (sub_type == 0)
+ pkt->flags |= PKT_FLAG_KEY;
+
+ return 0;
+}
+
+static int avs_read_audio_packet(AVFormatContext * s, AVPacket * pkt)
+{
+ avs_format_t *avs = s->priv_data;
+ int ret, size;
+
+ size = url_ftell(&s->pb);
+ ret = voc_get_packet(s, pkt, avs->st_audio, avs->remaining_audio_size);
+ size = url_ftell(&s->pb) - size;
+ avs->remaining_audio_size -= size;
+
+ if (ret == AVERROR_IO)
+ return 0; /* this indicate EOS */
+ if (ret < 0)
+ return ret;
+
+ pkt->stream_index = avs->st_audio->index;
+ pkt->flags |= PKT_FLAG_KEY;
+
+ return size;
+}
+
+static int avs_read_packet(AVFormatContext * s, AVPacket * pkt)
+{
+ avs_format_t *avs = s->priv_data;
+ int sub_type = 0, size = 0;
+ avs_block_type_t type = 0;
+ int palette_size = 0;
+ uint8_t palette[4 + 3 * 256];
+ int ret;
+
+ if (avs->remaining_audio_size > 0)
+ if (avs_read_audio_packet(s, pkt) > 0)
+ return 0;
+
+ while (1) {
+ if (avs->remaining_frame_size <= 0) {
+ if (!get_le16(&s->pb)) /* found EOF */
+ return AVERROR_IO;
+ avs->remaining_frame_size = get_le16(&s->pb) - 4;
+ }
+
+ while (avs->remaining_frame_size > 0) {
+ sub_type = get_byte(&s->pb);
+ type = get_byte(&s->pb);
+ size = get_le16(&s->pb);
+ avs->remaining_frame_size -= size;
+
+ switch (type) {
+ case AVS_PALETTE:
+ ret = get_buffer(&s->pb, palette, size - 4);
+ if (ret < size - 4)
+ return AVERROR_IO;
+ palette_size = size;
+ break;
+
+ case AVS_VIDEO:
+ if (!avs->st_video) {
+ avs->st_video = av_new_stream(s, AVS_VIDEO);
+ if (avs->st_video == NULL)
+ return AVERROR_NOMEM;
+ avs->st_video->codec->codec_type = CODEC_TYPE_VIDEO;
+ avs->st_video->codec->codec_id = CODEC_ID_AVS;
+ avs->st_video->codec->width = avs->width;
+ avs->st_video->codec->height = avs->height;
+ avs->st_video->codec->bits_per_sample=avs->bits_per_sample;
+ avs->st_video->nb_frames = avs->nb_frames;
+ avs->st_video->codec->time_base = (AVRational) {
+ 1, avs->fps};
+ }
+ return avs_read_video_packet(s, pkt, type, sub_type, size,
+ palette, palette_size);
+
+ case AVS_AUDIO:
+ if (!avs->st_audio) {
+ avs->st_audio = av_new_stream(s, AVS_AUDIO);
+ if (avs->st_audio == NULL)
+ return AVERROR_NOMEM;
+ avs->st_audio->codec->codec_type = CODEC_TYPE_AUDIO;
+ }
+ avs->remaining_audio_size = size - 4;
+ size = avs_read_audio_packet(s, pkt);
+ if (size != 0)
+ return size;
+ break;
+
+ default:
+ url_fskip(&s->pb, size - 4);
+ }
+ }
+ }
+}
+
+static int avs_read_close(AVFormatContext * s)
+{
+ return 0;
+}
+
+AVInputFormat avs_demuxer = {
+ "avs",
+ "avs format",
+ sizeof(avs_format_t),
+ avs_probe,
+ avs_read_header,
+ avs_read_packet,
+ avs_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/barpainet.h b/contrib/ffmpeg/libavformat/barpainet.h
new file mode 100644
index 000000000..b50bf82b6
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/barpainet.h
@@ -0,0 +1,45 @@
+/*
+ * copyright (c) 2002 Francois Revol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef BARPA_INET_H
+#define BARPA_INET_H
+
+#include "config.h"
+
+#ifdef CONFIG_BEOS_NETSERVER
+
+# include <socket.h>
+int inet_aton (const char * str, struct in_addr * add);
+# define PF_INET AF_INET
+# define SO_SNDBUF 0x40000001
+
+/* fake */
+struct ip_mreq {
+ struct in_addr imr_multiaddr; /* IP multicast address of group */
+ struct in_addr imr_interface; /* local IP address of interface */
+};
+
+#include <netdb.h>
+
+#else
+# include <arpa/inet.h>
+#endif
+
+#endif /* BARPA_INET_H */
diff --git a/contrib/ffmpeg/libavformat/base64.c b/contrib/ffmpeg/libavformat/base64.c
new file mode 100644
index 000000000..6279244d3
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/base64.c
@@ -0,0 +1,231 @@
+/*
+ * Base64.c
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+* @file base64.c
+ * @brief Base64 Encode/Decode
+ * @author Ryan Martell <rdm4@martellventures.com> (with lots of Michael)
+ */
+
+#include "common.h"
+#include "base64.h"
+
+/* ---------------- private code */
+static uint8_t map2[] =
+{
+ 0x3e, 0xff, 0xff, 0xff, 0x3f, 0x34, 0x35, 0x36,
+ 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
+ 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
+ 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1a, 0x1b,
+ 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33
+};
+
+int av_base64_decode(uint8_t * out, const char *in, int out_length)
+{
+ int i, v;
+ uint8_t *dst = out;
+
+ v = 0;
+ for (i = 0; in[i] && in[i] != '='; i++) {
+ unsigned int index= in[i]-43;
+ if (index>=(sizeof(map2)/sizeof(map2[0])) || map2[index] == 0xff)
+ return -1;
+ v = (v << 6) + map2[index];
+ if (i & 3) {
+ if (dst - out < out_length) {
+ *dst++ = v >> (6 - 2 * (i & 3));
+ }
+ }
+ }
+
+ return (dst - out);
+}
+
+/*****************************************************************************
+* b64_encode: stolen from VLC's http.c
+* simplified by michael
+* fixed edge cases and made it work from data (vs. strings) by ryan.
+*****************************************************************************/
+
+char *av_base64_encode(uint8_t * src, int len)
+{
+ static const char b64[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+ char *ret, *dst;
+ unsigned i_bits = 0;
+ int i_shift = 0;
+ int bytes_remaining = len;
+
+ if (len < UINT_MAX / 4) {
+ ret = dst = av_malloc(len * 4 / 3 + 12);
+ } else
+ return NULL;
+
+ if (len) { // special edge case, what should we really do here?
+ while (bytes_remaining) {
+ i_bits = (i_bits << 8) + *src++;
+ bytes_remaining--;
+ i_shift += 8;
+
+ do {
+ *dst++ = b64[(i_bits << 6 >> i_shift) & 0x3f];
+ i_shift -= 6;
+ } while (i_shift > 6 || (bytes_remaining == 0 && i_shift > 0));
+ }
+ while ((dst - ret) & 3)
+ *dst++ = '=';
+ }
+ *dst = '\0';
+
+ return ret;
+}
+
+// #define TEST_BASE64
+
+#ifdef TEST_BASE64
+#include "avutil.h"
+
+int b64test()
+{
+ int numerr = 0;
+ int len;
+ int numtest = 1;
+ uint8_t decode[1000];
+ struct test {
+ void *data;
+ int len;
+ const char *result;
+ } *t, tests[] = {
+ {
+ "", 0, ""}, {
+ "1", 1, "MQ=="}, {
+ "22", 2, "MjI="}, {
+ "333", 3, "MzMz"}, {
+ "4444", 4, "NDQ0NA=="}, {
+ "55555", 5, "NTU1NTU="}, {
+ "abc:def", 7, "YWJjOmRlZg=="}, {
+ NULL}
+ };
+ for (t = tests; t->data; t++) {
+ char *str;
+
+ av_log(NULL, AV_LOG_ERROR, "Encoding %s...\n", (char *) t->data);
+ str = av_base64_encode(t->data, t->len);
+ if (str) {
+ av_log(NULL, AV_LOG_ERROR, "Encoded to %s...\n", str);
+ if (strcmp(str, t->result) != 0) {
+ av_log(NULL, AV_LOG_ERROR, "failed test %d: %s != %s\n",
+ numtest, str, t->result);
+ numerr++;
+ }
+ av_free(str);
+ }
+
+ av_log(NULL, AV_LOG_ERROR, "Done encoding, about to decode...\n");
+ len = av_base64_decode(decode, t->result, sizeof(decode));
+ if (len != t->len) {
+ av_log(NULL, AV_LOG_ERROR, "failed test %d: len %d != %d\n",
+ numtest, len, t->len);
+ numerr++;
+ } else if (memcmp(decode, t->data, t->len) != 0) {
+ av_log(NULL, AV_LOG_ERROR, "failed test %d: data\n", numtest);
+ numerr++;
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "Decoded to %s\n",
+ (char *) t->data);
+ }
+ numtest++;
+ }
+
+#undef srand
+#undef rand
+
+ {
+ int test_count;
+ srand(123141); // time(NULL));
+ for (test_count = 0; test_count < 100; test_count++) {
+ int size = rand() % 1024;
+ int ii;
+ uint8_t *data;
+ char *encoded_result;
+
+ av_log(NULL, AV_LOG_ERROR, "Test %d: Size %d bytes...",
+ test_count, size);
+ data = (uint8_t *) av_malloc(size);
+ for (ii = 0; ii < size; ii++) {
+ data[ii] = rand() % 255;
+ }
+
+ encoded_result = av_base64_encode(data, size);
+ if (encoded_result) {
+ int decode_buffer_size = size + 10; // try without 10 as well
+ uint8_t *decode_buffer = av_malloc(decode_buffer_size);
+ if (decode_buffer) {
+ int decoded_size =
+ av_base64_decode(decode_buffer, encoded_result,
+ decode_buffer_size);
+
+ if (decoded_size != size) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Decoded/Encoded size mismatch (%d != %d)\n",
+ decoded_size, size);
+ } else {
+ if (memcmp(decode_buffer, data, decoded_size) == 0) {
+ av_log(NULL, AV_LOG_ERROR, "Passed!\n");
+ } else {
+ av_log(NULL, AV_LOG_ERROR,
+ "Failed (Data differs)!\n");
+ }
+ }
+ av_free(decode_buffer);
+ }
+
+ av_free(encoded_result);
+ }
+ }
+ }
+
+ // these are invalid strings, that it currently decodes (which it probably shouldn't?)
+ {
+ uint8_t str[32];
+ if (av_base64_decode(str, "M=M=", sizeof(str)) != -1) {
+ av_log(NULL, AV_LOG_ERROR,
+ "failed test %d: successful decode of `M=M='\n",
+ numtest++);
+ numerr++;
+ }
+ if (av_base64_decode(str, "MQ===", sizeof(str)) != -1) {
+ av_log(NULL, AV_LOG_ERROR,
+ "failed test %d: successful decode of `MQ==='\n",
+ numtest++);
+ numerr++;
+ }
+ }
+
+ return numerr;
+}
+#endif
+
diff --git a/contrib/ffmpeg/libavformat/base64.h b/contrib/ffmpeg/libavformat/base64.h
new file mode 100644
index 000000000..03d43afe4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/base64.h
@@ -0,0 +1,24 @@
+/*
+ * Base64.c
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+int av_base64_decode(uint8_t * out, const char *in, int out_length); // param order as strncpy()
+char *av_base64_encode(uint8_t * src, int len); // src is not a string, it's data.
+
diff --git a/contrib/ffmpeg/libavformat/beosaudio.cpp b/contrib/ffmpeg/libavformat/beosaudio.cpp
new file mode 100644
index 000000000..6ac45ebb2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/beosaudio.cpp
@@ -0,0 +1,465 @@
+/*
+ * BeOS audio play interface
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#include <Application.h>
+#include <SoundPlayer.h>
+
+extern "C" {
+#include "avformat.h"
+}
+
+#ifdef HAVE_BSOUNDRECORDER
+#include <SoundRecorder.h>
+using namespace BPrivate::Media::Experimental;
+#endif
+
+/* enable performance checks */
+//#define PERF_CHECK
+
+/* enable Media Kit latency checks */
+//#define LATENCY_CHECK
+
+#define AUDIO_BLOCK_SIZE 4096
+#define AUDIO_BLOCK_COUNT 8
+
+#define AUDIO_BUFFER_SIZE (AUDIO_BLOCK_SIZE*AUDIO_BLOCK_COUNT)
+
+typedef struct {
+ int fd; // UNUSED
+ int sample_rate;
+ int channels;
+ int frame_size; /* in bytes ! */
+ CodecID codec_id;
+ uint8_t buffer[AUDIO_BUFFER_SIZE];
+ int buffer_ptr;
+ /* ring buffer */
+ sem_id input_sem;
+ int input_index;
+ sem_id output_sem;
+ int output_index;
+ BSoundPlayer *player;
+#ifdef HAVE_BSOUNDRECORDER
+ BSoundRecorder *recorder;
+#endif
+ int has_quit; /* signal callbacks not to wait */
+ volatile bigtime_t starve_time;
+} AudioData;
+
+static thread_id main_thid;
+static thread_id bapp_thid;
+static int own_BApp_created = 0;
+static int refcount = 0;
+
+/* create the BApplication and Run() it */
+static int32 bapp_thread(void *arg)
+{
+ new BApplication("application/x-vnd.ffmpeg");
+ own_BApp_created = 1;
+ be_app->Run();
+ /* kill the process group */
+// kill(0, SIGINT);
+// kill(main_thid, SIGHUP);
+ return B_OK;
+}
+
+/* create the BApplication only if needed */
+static void create_bapp_if_needed(void)
+{
+ if (refcount++ == 0) {
+ /* needed by libmedia */
+ if (be_app == NULL) {
+ bapp_thid = spawn_thread(bapp_thread, "ffmpeg BApplication", B_NORMAL_PRIORITY, NULL);
+ resume_thread(bapp_thid);
+ while (!own_BApp_created)
+ snooze(50000);
+ }
+ }
+}
+
+static void destroy_bapp_if_needed(void)
+{
+ if (--refcount == 0 && own_BApp_created) {
+ be_app->Lock();
+ be_app->Quit();
+ be_app = NULL;
+ }
+}
+
+/* called back by BSoundPlayer */
+static void audioplay_callback(void *cookie, void *buffer, size_t bufferSize, const media_raw_audio_format &format)
+{
+ AudioData *s;
+ size_t len, amount;
+ unsigned char *buf = (unsigned char *)buffer;
+
+ s = (AudioData *)cookie;
+ if (s->has_quit)
+ return;
+ while (bufferSize > 0) {
+#ifdef PERF_CHECK
+ bigtime_t t;
+ t = system_time();
+#endif
+ len = MIN(AUDIO_BLOCK_SIZE, bufferSize);
+ if (acquire_sem_etc(s->output_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK) {
+ s->has_quit = 1;
+ s->player->SetHasData(false);
+ return;
+ }
+ amount = MIN(len, (AUDIO_BUFFER_SIZE - s->output_index));
+ memcpy(buf, &s->buffer[s->output_index], amount);
+ s->output_index += amount;
+ if (s->output_index >= AUDIO_BUFFER_SIZE) {
+ s->output_index %= AUDIO_BUFFER_SIZE;
+ memcpy(buf + amount, &s->buffer[s->output_index], len - amount);
+ s->output_index += len-amount;
+ s->output_index %= AUDIO_BUFFER_SIZE;
+ }
+ release_sem_etc(s->input_sem, len, 0);
+#ifdef PERF_CHECK
+ t = system_time() - t;
+ s->starve_time = MAX(s->starve_time, t);
+#endif
+ buf += len;
+ bufferSize -= len;
+ }
+}
+
+#ifdef HAVE_BSOUNDRECORDER
+/* called back by BSoundRecorder */
+static void audiorecord_callback(void *cookie, bigtime_t timestamp, void *buffer, size_t bufferSize, const media_multi_audio_format &format)
+{
+ AudioData *s;
+ size_t len, amount;
+ unsigned char *buf = (unsigned char *)buffer;
+
+ s = (AudioData *)cookie;
+ if (s->has_quit)
+ return;
+
+ while (bufferSize > 0) {
+ len = MIN(bufferSize, AUDIO_BLOCK_SIZE);
+ //printf("acquire_sem(input, %d)\n", len);
+ if (acquire_sem_etc(s->input_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK) {
+ s->has_quit = 1;
+ return;
+ }
+ amount = MIN(len, (AUDIO_BUFFER_SIZE - s->input_index));
+ memcpy(&s->buffer[s->input_index], buf, amount);
+ s->input_index += amount;
+ if (s->input_index >= AUDIO_BUFFER_SIZE) {
+ s->input_index %= AUDIO_BUFFER_SIZE;
+ memcpy(&s->buffer[s->input_index], buf + amount, len - amount);
+ s->input_index += len - amount;
+ }
+ release_sem_etc(s->output_sem, len, 0);
+ //printf("release_sem(output, %d)\n", len);
+ buf += len;
+ bufferSize -= len;
+ }
+}
+#endif
+
+static int audio_open(AudioData *s, int is_output, const char *audio_device)
+{
+ int p[2];
+ int ret;
+ media_raw_audio_format format;
+ media_multi_audio_format iformat;
+
+#ifndef HAVE_BSOUNDRECORDER
+ if (!is_output)
+ return -EIO; /* not for now */
+#endif
+ s->input_sem = create_sem(AUDIO_BUFFER_SIZE, "ffmpeg_ringbuffer_input");
+ if (s->input_sem < B_OK)
+ return -EIO;
+ s->output_sem = create_sem(0, "ffmpeg_ringbuffer_output");
+ if (s->output_sem < B_OK) {
+ delete_sem(s->input_sem);
+ return -EIO;
+ }
+ s->input_index = 0;
+ s->output_index = 0;
+ create_bapp_if_needed();
+ s->frame_size = AUDIO_BLOCK_SIZE;
+ /* bump up the priority (avoid realtime though) */
+ set_thread_priority(find_thread(NULL), B_DISPLAY_PRIORITY+1);
+#ifdef HAVE_BSOUNDRECORDER
+ if (!is_output) {
+ bool wait_for_input = false;
+ if (audio_device && !strcmp(audio_device, "wait:"))
+ wait_for_input = true;
+ s->recorder = new BSoundRecorder(&iformat, wait_for_input, "ffmpeg input", audiorecord_callback);
+ if (wait_for_input && (s->recorder->InitCheck() == B_OK)) {
+ s->recorder->WaitForIncomingConnection(&iformat);
+ }
+ if (s->recorder->InitCheck() != B_OK || iformat.format != media_raw_audio_format::B_AUDIO_SHORT) {
+ delete s->recorder;
+ s->recorder = NULL;
+ if (s->input_sem)
+ delete_sem(s->input_sem);
+ if (s->output_sem)
+ delete_sem(s->output_sem);
+ return -EIO;
+ }
+ s->codec_id = (iformat.byte_order == B_MEDIA_LITTLE_ENDIAN)?CODEC_ID_PCM_S16LE:CODEC_ID_PCM_S16BE;
+ s->channels = iformat.channel_count;
+ s->sample_rate = (int)iformat.frame_rate;
+ s->frame_size = iformat.buffer_size;
+ s->recorder->SetCookie(s);
+ s->recorder->SetVolume(1.0);
+ s->recorder->Start();
+ return 0;
+ }
+#endif
+ format = media_raw_audio_format::wildcard;
+ format.format = media_raw_audio_format::B_AUDIO_SHORT;
+ format.byte_order = B_HOST_IS_LENDIAN ? B_MEDIA_LITTLE_ENDIAN : B_MEDIA_BIG_ENDIAN;
+ format.channel_count = s->channels;
+ format.buffer_size = s->frame_size;
+ format.frame_rate = s->sample_rate;
+ s->player = new BSoundPlayer(&format, "ffmpeg output", audioplay_callback);
+ if (s->player->InitCheck() != B_OK) {
+ delete s->player;
+ s->player = NULL;
+ if (s->input_sem)
+ delete_sem(s->input_sem);
+ if (s->output_sem)
+ delete_sem(s->output_sem);
+ return -EIO;
+ }
+ s->player->SetCookie(s);
+ s->player->SetVolume(1.0);
+ s->player->Start();
+ s->player->SetHasData(true);
+ return 0;
+}
+
+static int audio_close(AudioData *s)
+{
+ if (s->input_sem)
+ delete_sem(s->input_sem);
+ if (s->output_sem)
+ delete_sem(s->output_sem);
+ s->has_quit = 1;
+ if (s->player) {
+ s->player->Stop();
+ }
+ if (s->player)
+ delete s->player;
+#ifdef HAVE_BSOUNDRECORDER
+ if (s->recorder)
+ delete s->recorder;
+#endif
+ destroy_bapp_if_needed();
+ return 0;
+}
+
+/* sound output support */
+static int audio_write_header(AVFormatContext *s1)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+ AVStream *st;
+ int ret;
+
+ st = s1->streams[0];
+ s->sample_rate = st->codec->sample_rate;
+ s->channels = st->codec->channels;
+ ret = audio_open(s, 1, NULL);
+ if (ret < 0)
+ return -EIO;
+ return 0;
+}
+
+static int audio_write_packet(AVFormatContext *s1, int stream_index,
+ const uint8_t *buf, int size, int64_t force_pts)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+ int len, ret;
+#ifdef LATENCY_CHECK
+bigtime_t lat1, lat2;
+lat1 = s->player->Latency();
+#endif
+#ifdef PERF_CHECK
+ bigtime_t t = s->starve_time;
+ s->starve_time = 0;
+ printf("starve_time: %lld \n", t);
+#endif
+ while (size > 0) {
+ int amount;
+ len = MIN(size, AUDIO_BLOCK_SIZE);
+ if (acquire_sem_etc(s->input_sem, len, B_CAN_INTERRUPT, 0LL) < B_OK)
+ return -EIO;
+ amount = MIN(len, (AUDIO_BUFFER_SIZE - s->input_index));
+ memcpy(&s->buffer[s->input_index], buf, amount);
+ s->input_index += amount;
+ if (s->input_index >= AUDIO_BUFFER_SIZE) {
+ s->input_index %= AUDIO_BUFFER_SIZE;
+ memcpy(&s->buffer[s->input_index], buf + amount, len - amount);
+ s->input_index += len - amount;
+ }
+ release_sem_etc(s->output_sem, len, 0);
+ buf += len;
+ size -= len;
+ }
+#ifdef LATENCY_CHECK
+lat2 = s->player->Latency();
+printf("#### BSoundPlayer::Latency(): before= %lld, after= %lld\n", lat1, lat2);
+#endif
+ return 0;
+}
+
+static int audio_write_trailer(AVFormatContext *s1)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+
+ audio_close(s);
+ return 0;
+}
+
+/* grab support */
+
+static int audio_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+ AVStream *st;
+ int ret;
+
+ if (!ap || ap->sample_rate <= 0 || ap->channels <= 0)
+ return -1;
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+ s->sample_rate = ap->sample_rate;
+ s->channels = ap->channels;
+
+ ret = audio_open(s, 0, ap->device);
+ if (ret < 0) {
+ av_free(st);
+ return -EIO;
+ }
+ /* take real parameters */
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = s->codec_id;
+ st->codec->sample_rate = s->sample_rate;
+ st->codec->channels = s->channels;
+ return 0;
+ av_set_pts_info(s1, 48, 1, 1000000); /* 48 bits pts in us */
+}
+
+static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+ int size;
+ size_t len, amount;
+ unsigned char *buf;
+ status_t err;
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return -EIO;
+ buf = (unsigned char *)pkt->data;
+ size = pkt->size;
+ while (size > 0) {
+ len = MIN(AUDIO_BLOCK_SIZE, size);
+ //printf("acquire_sem(output, %d)\n", len);
+ while ((err=acquire_sem_etc(s->output_sem, len, B_CAN_INTERRUPT, 0LL)) == B_INTERRUPTED);
+ if (err < B_OK) {
+ av_free_packet(pkt);
+ return -EIO;
+ }
+ amount = MIN(len, (AUDIO_BUFFER_SIZE - s->output_index));
+ memcpy(buf, &s->buffer[s->output_index], amount);
+ s->output_index += amount;
+ if (s->output_index >= AUDIO_BUFFER_SIZE) {
+ s->output_index %= AUDIO_BUFFER_SIZE;
+ memcpy(buf + amount, &s->buffer[s->output_index], len - amount);
+ s->output_index += len-amount;
+ s->output_index %= AUDIO_BUFFER_SIZE;
+ }
+ release_sem_etc(s->input_sem, len, 0);
+ //printf("release_sem(input, %d)\n", len);
+ buf += len;
+ size -= len;
+ }
+ //XXX: add pts info
+ return 0;
+}
+
+static int audio_read_close(AVFormatContext *s1)
+{
+ AudioData *s = (AudioData *)s1->priv_data;
+
+ audio_close(s);
+ return 0;
+}
+
+static AVInputFormat audio_demuxer = {
+ "audio_device",
+ "audio grab and output",
+ sizeof(AudioData),
+ NULL,
+ audio_read_header,
+ audio_read_packet,
+ audio_read_close,
+ NULL,
+ AVFMT_NOFILE,
+};
+
+AVOutputFormat audio_muxer = {
+ "audio_device",
+ "audio grab and output",
+ "",
+ "",
+ sizeof(AudioData),
+#ifdef WORDS_BIGENDIAN
+ CODEC_ID_PCM_S16BE,
+#else
+ CODEC_ID_PCM_S16LE,
+#endif
+ CODEC_ID_NONE,
+ audio_write_header,
+ audio_write_packet,
+ audio_write_trailer,
+ AVFMT_NOFILE,
+};
+
+extern "C" {
+
+int audio_init(void)
+{
+ main_thid = find_thread(NULL);
+ av_register_input_format(&audio_demuxer);
+ av_register_output_format(&audio_muxer);
+ return 0;
+}
+
+} // "C"
+
diff --git a/contrib/ffmpeg/libavformat/crc.c b/contrib/ffmpeg/libavformat/crc.c
new file mode 100644
index 000000000..bdbe8bcff
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/crc.c
@@ -0,0 +1,98 @@
+/*
+ * CRC decoder (for codec/format testing)
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "adler32.h"
+
+#ifdef CONFIG_CRC_MUXER
+typedef struct CRCState {
+ uint32_t crcval;
+} CRCState;
+
+static int crc_write_header(struct AVFormatContext *s)
+{
+ CRCState *crc = s->priv_data;
+
+ /* init CRC */
+ crc->crcval = 1;
+
+ return 0;
+}
+
+static int crc_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ CRCState *crc = s->priv_data;
+ crc->crcval = av_adler32_update(crc->crcval, pkt->data, pkt->size);
+ return 0;
+}
+
+static int crc_write_trailer(struct AVFormatContext *s)
+{
+ CRCState *crc = s->priv_data;
+ char buf[64];
+
+ snprintf(buf, sizeof(buf), "CRC=0x%08x\n", crc->crcval);
+ put_buffer(&s->pb, buf, strlen(buf));
+ put_flush_packet(&s->pb);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_FRAMECRC_MUXER
+static int framecrc_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ uint32_t crc = av_adler32_update(0, pkt->data, pkt->size);
+ char buf[256];
+
+ snprintf(buf, sizeof(buf), "%d, %"PRId64", %d, 0x%08x\n", pkt->stream_index, pkt->dts, pkt->size, crc);
+ put_buffer(&s->pb, buf, strlen(buf));
+ put_flush_packet(&s->pb);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_CRC_MUXER
+AVOutputFormat crc_muxer = {
+ "crc",
+ "crc testing format",
+ NULL,
+ "",
+ sizeof(CRCState),
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_RAWVIDEO,
+ crc_write_header,
+ crc_write_packet,
+ crc_write_trailer,
+};
+#endif
+#ifdef CONFIG_FRAMECRC_MUXER
+AVOutputFormat framecrc_muxer = {
+ "framecrc",
+ "framecrc testing format",
+ NULL,
+ "",
+ 0,
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_RAWVIDEO,
+ NULL,
+ framecrc_write_packet,
+ NULL,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/cutils.c b/contrib/ffmpeg/libavformat/cutils.c
new file mode 100644
index 000000000..45959ec39
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/cutils.c
@@ -0,0 +1,275 @@
+/*
+ * Various simple utilities for ffmpeg system
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#if !defined(CONFIG_NOCUTILS)
+/**
+ * Return TRUE if val is a prefix of str. If it returns TRUE, ptr is
+ * set to the next character in 'str' after the prefix.
+ *
+ * @param str input string
+ * @param val prefix to test
+ * @param ptr updated after the prefix in str in there is a match
+ * @return TRUE if there is a match
+ */
+int strstart(const char *str, const char *val, const char **ptr)
+{
+ const char *p, *q;
+ p = str;
+ q = val;
+ while (*q != '\0') {
+ if (*p != *q)
+ return 0;
+ p++;
+ q++;
+ }
+ if (ptr)
+ *ptr = p;
+ return 1;
+}
+
+/**
+ * Return TRUE if val is a prefix of str (case independent). If it
+ * returns TRUE, ptr is set to the next character in 'str' after the
+ * prefix.
+ *
+ * @param str input string
+ * @param val prefix to test
+ * @param ptr updated after the prefix in str in there is a match
+ * @return TRUE if there is a match */
+int stristart(const char *str, const char *val, const char **ptr)
+{
+ const char *p, *q;
+ p = str;
+ q = val;
+ while (*q != '\0') {
+ if (toupper(*(const unsigned char *)p) != toupper(*(const unsigned char *)q))
+ return 0;
+ p++;
+ q++;
+ }
+ if (ptr)
+ *ptr = p;
+ return 1;
+}
+
+/**
+ * Copy the string str to buf. If str length is bigger than buf_size -
+ * 1 then it is clamped to buf_size - 1.
+ * NOTE: this function does what strncpy should have done to be
+ * useful. NEVER use strncpy.
+ *
+ * @param buf destination buffer
+ * @param buf_size size of destination buffer
+ * @param str source string
+ */
+void pstrcpy(char *buf, int buf_size, const char *str)
+{
+ int c;
+ char *q = buf;
+
+ if (buf_size <= 0)
+ return;
+
+ for(;;) {
+ c = *str++;
+ if (c == 0 || q >= buf + buf_size - 1)
+ break;
+ *q++ = c;
+ }
+ *q = '\0';
+}
+
+/* strcat and truncate. */
+char *pstrcat(char *buf, int buf_size, const char *s)
+{
+ int len;
+ len = strlen(buf);
+ if (len < buf_size)
+ pstrcpy(buf + len, buf_size - len, s);
+ return buf;
+}
+
+#endif
+
+/* add one element to a dynamic array */
+void __dynarray_add(unsigned long **tab_ptr, int *nb_ptr, unsigned long elem)
+{
+ int nb, nb_alloc;
+ unsigned long *tab;
+
+ nb = *nb_ptr;
+ tab = *tab_ptr;
+ if ((nb & (nb - 1)) == 0) {
+ if (nb == 0)
+ nb_alloc = 1;
+ else
+ nb_alloc = nb * 2;
+ tab = av_realloc(tab, nb_alloc * sizeof(unsigned long));
+ *tab_ptr = tab;
+ }
+ tab[nb++] = elem;
+ *nb_ptr = nb;
+}
+
+time_t mktimegm(struct tm *tm)
+{
+ time_t t;
+
+ int y = tm->tm_year + 1900, m = tm->tm_mon + 1, d = tm->tm_mday;
+
+ if (m < 3) {
+ m += 12;
+ y--;
+ }
+
+ t = 86400 *
+ (d + (153 * m - 457) / 5 + 365 * y + y / 4 - y / 100 + y / 400 - 719469);
+
+ t += 3600 * tm->tm_hour + 60 * tm->tm_min + tm->tm_sec;
+
+ return t;
+}
+
+#define ISLEAP(y) (((y) % 4 == 0) && (((y) % 100) != 0 || ((y) % 400) == 0))
+#define LEAPS_COUNT(y) ((y)/4 - (y)/100 + (y)/400)
+
+/* this is our own gmtime_r. it differs from its POSIX counterpart in a
+ couple of places, though. */
+struct tm *brktimegm(time_t secs, struct tm *tm)
+{
+ int days, y, ny, m;
+ int md[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
+
+ days = secs / 86400;
+ secs %= 86400;
+ tm->tm_hour = secs / 3600;
+ tm->tm_min = (secs % 3600) / 60;
+ tm->tm_sec = secs % 60;
+
+ /* oh well, may be someone some day will invent a formula for this stuff */
+ y = 1970; /* start "guessing" */
+ while (days >= (ISLEAP(y)?366:365)) {
+ ny = (y + days/366);
+ days -= (ny - y) * 365 + LEAPS_COUNT(ny - 1) - LEAPS_COUNT(y - 1);
+ y = ny;
+ }
+ md[1] = ISLEAP(y)?29:28;
+ for (m=0; days >= md[m]; m++)
+ days -= md[m];
+
+ tm->tm_year = y; /* unlike gmtime_r we store complete year here */
+ tm->tm_mon = m+1; /* unlike gmtime_r tm_mon is from 1 to 12 */
+ tm->tm_mday = days+1;
+
+ return tm;
+}
+
+/* get a positive number between n_min and n_max, for a maximum length
+ of len_max. Return -1 if error. */
+static int date_get_num(const char **pp,
+ int n_min, int n_max, int len_max)
+{
+ int i, val, c;
+ const char *p;
+
+ p = *pp;
+ val = 0;
+ for(i = 0; i < len_max; i++) {
+ c = *p;
+ if (!isdigit(c))
+ break;
+ val = (val * 10) + c - '0';
+ p++;
+ }
+ /* no number read ? */
+ if (p == *pp)
+ return -1;
+ if (val < n_min || val > n_max)
+ return -1;
+ *pp = p;
+ return val;
+}
+
+/* small strptime for ffmpeg */
+const char *small_strptime(const char *p, const char *fmt,
+ struct tm *dt)
+{
+ int c, val;
+
+ for(;;) {
+ c = *fmt++;
+ if (c == '\0') {
+ return p;
+ } else if (c == '%') {
+ c = *fmt++;
+ switch(c) {
+ case 'H':
+ val = date_get_num(&p, 0, 23, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_hour = val;
+ break;
+ case 'M':
+ val = date_get_num(&p, 0, 59, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_min = val;
+ break;
+ case 'S':
+ val = date_get_num(&p, 0, 59, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_sec = val;
+ break;
+ case 'Y':
+ val = date_get_num(&p, 0, 9999, 4);
+ if (val == -1)
+ return NULL;
+ dt->tm_year = val - 1900;
+ break;
+ case 'm':
+ val = date_get_num(&p, 1, 12, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_mon = val - 1;
+ break;
+ case 'd':
+ val = date_get_num(&p, 1, 31, 2);
+ if (val == -1)
+ return NULL;
+ dt->tm_mday = val;
+ break;
+ case '%':
+ goto match;
+ default:
+ return NULL;
+ }
+ } else {
+ match:
+ if (c != *p)
+ return NULL;
+ p++;
+ }
+ }
+ return p;
+}
+
diff --git a/contrib/ffmpeg/libavformat/daud.c b/contrib/ffmpeg/libavformat/daud.c
new file mode 100644
index 000000000..ec81b7b1c
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/daud.c
@@ -0,0 +1,58 @@
+/*
+ * D-Cinema audio demuxer
+ * Copyright (c) 2005 Reimar Döffinger.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static int daud_header(AVFormatContext *s, AVFormatParameters *ap) {
+ AVStream *st = av_new_stream(s, 0);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S24DAUD;
+ st->codec->codec_tag = MKTAG('d', 'a', 'u', 'd');
+ st->codec->channels = 6;
+ st->codec->sample_rate = 96000;
+ st->codec->bit_rate = 3 * 6 * 96000 * 8;
+ st->codec->block_align = 3 * 6;
+ st->codec->bits_per_sample = 24;
+ return 0;
+}
+
+static int daud_packet(AVFormatContext *s, AVPacket *pkt) {
+ ByteIOContext *pb = &s->pb;
+ int ret, size;
+ if (url_feof(pb))
+ return AVERROR_IO;
+ size = get_be16(pb);
+ get_be16(pb); // unknown
+ ret = av_get_packet(pb, pkt, size);
+ pkt->stream_index = 0;
+ return ret;
+}
+
+AVInputFormat daud_demuxer = {
+ "daud",
+ "D-Cinema audio format",
+ 0,
+ NULL,
+ daud_header,
+ daud_packet,
+ NULL,
+ NULL,
+ .extensions = "302",
+};
diff --git a/contrib/ffmpeg/libavformat/dc1394.c b/contrib/ffmpeg/libavformat/dc1394.c
new file mode 100644
index 000000000..5098c0fdf
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dc1394.c
@@ -0,0 +1,193 @@
+/*
+ * IIDC1394 grab interface (uses libdc1394 and libraw1394)
+ * Copyright (c) 2004 Roman Shaposhnik
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+
+#include <libraw1394/raw1394.h>
+#include <libdc1394/dc1394_control.h>
+
+#undef free
+
+typedef struct dc1394_data {
+ raw1394handle_t handle;
+ dc1394_cameracapture camera;
+ int current_frame;
+ int fps;
+
+ AVPacket packet;
+} dc1394_data;
+
+struct dc1394_frame_format {
+ int width;
+ int height;
+ enum PixelFormat pix_fmt;
+ int frame_size_id;
+} dc1394_frame_formats[] = {
+ { 320, 240, PIX_FMT_UYVY422, MODE_320x240_YUV422 },
+ { 640, 480, PIX_FMT_UYVY411, MODE_640x480_YUV411 },
+ { 640, 480, PIX_FMT_UYVY422, MODE_640x480_YUV422 },
+ { 0, 0, 0, MODE_320x240_YUV422 } /* default -- gotta be the last one */
+};
+
+struct dc1394_frame_rate {
+ int frame_rate;
+ int frame_rate_id;
+} dc1394_frame_rates[] = {
+ { 1875, FRAMERATE_1_875 },
+ { 3750, FRAMERATE_3_75 },
+ { 7500, FRAMERATE_7_5 },
+ { 15000, FRAMERATE_15 },
+ { 30000, FRAMERATE_30 },
+ { 60000, FRAMERATE_60 },
+ { 0, FRAMERATE_30 } /* default -- gotta be the last one */
+};
+
+static int dc1394_read_header(AVFormatContext *c, AVFormatParameters * ap)
+{
+ dc1394_data* dc1394 = c->priv_data;
+ AVStream* vst;
+ nodeid_t* camera_nodes;
+ int res;
+ struct dc1394_frame_format *fmt;
+ struct dc1394_frame_rate *fps;
+
+ for (fmt = dc1394_frame_formats; fmt->width; fmt++)
+ if (fmt->pix_fmt == ap->pix_fmt && fmt->width == ap->width && fmt->height == ap->height)
+ break;
+
+ for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
+ if (fps->frame_rate == av_rescale(1000, ap->time_base.den, ap->time_base.num))
+ break;
+
+ /* create a video stream */
+ vst = av_new_stream(c, 0);
+ if (!vst)
+ return -1;
+ av_set_pts_info(vst, 64, 1, 1000);
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = CODEC_ID_RAWVIDEO;
+ vst->codec->time_base.den = fps->frame_rate;
+ vst->codec->time_base.num = 1000;
+ vst->codec->width = fmt->width;
+ vst->codec->height = fmt->height;
+ vst->codec->pix_fmt = fmt->pix_fmt;
+
+ /* packet init */
+ av_init_packet(&dc1394->packet);
+ dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height);
+ dc1394->packet.stream_index = vst->index;
+ dc1394->packet.flags |= PKT_FLAG_KEY;
+
+ dc1394->current_frame = 0;
+ dc1394->fps = fps->frame_rate;
+
+ vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000);
+
+ /* Now lets prep the hardware */
+ dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
+ if (!dc1394->handle) {
+ av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */);
+ goto out;
+ }
+ camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
+ if (!camera_nodes || camera_nodes[ap->channel] == DC1394_NO_CAMERA) {
+ av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", ap->channel);
+ goto out_handle;
+ }
+ res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[ap->channel],
+ 0,
+ FORMAT_VGA_NONCOMPRESSED,
+ fmt->frame_size_id,
+ SPEED_400,
+ fps->frame_rate_id, 8, 1,
+ ap->device,
+ &dc1394->camera);
+ dc1394_free_camera_nodes(camera_nodes);
+ if (res != DC1394_SUCCESS) {
+ av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n");
+ goto out_handle;
+ }
+
+ res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node);
+ if (res != DC1394_SUCCESS) {
+ av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n");
+ goto out_handle_dma;
+ }
+
+ return 0;
+
+out_handle_dma:
+ dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
+ dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
+out_handle:
+ dc1394_destroy_handle(dc1394->handle);
+out:
+ return -1;
+}
+
+static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
+{
+ struct dc1394_data *dc1394 = c->priv_data;
+ int res;
+
+ /* discard stale frame */
+ if (dc1394->current_frame++) {
+ if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS)
+ av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
+ }
+
+ res = dc1394_dma_single_capture(&dc1394->camera);
+
+ if (res == DC1394_SUCCESS) {
+ dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
+ dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->fps;
+ res = dc1394->packet.size;
+ } else {
+ av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
+ dc1394->packet.data = NULL;
+ res = -1;
+ }
+
+ *pkt = dc1394->packet;
+ return res;
+}
+
+static int dc1394_close(AVFormatContext * context)
+{
+ struct dc1394_data *dc1394 = context->priv_data;
+
+ dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node);
+ dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
+ dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
+ dc1394_destroy_handle(dc1394->handle);
+
+ return 0;
+}
+
+AVInputFormat dc1394_demuxer = {
+ .name = "dc1394",
+ .long_name = "dc1394 A/V grab",
+ .priv_data_size = sizeof(struct dc1394_data),
+ .read_header = dc1394_read_header,
+ .read_packet = dc1394_read_packet,
+ .read_close = dc1394_close,
+ .flags = AVFMT_NOFILE
+};
diff --git a/contrib/ffmpeg/libavformat/dsicin.c b/contrib/ffmpeg/libavformat/dsicin.c
new file mode 100644
index 000000000..f274eadf8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dsicin.c
@@ -0,0 +1,224 @@
+/*
+ * Delphine Software International CIN File Demuxer
+ * Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file dsicin.c
+ * Delphine Software International CIN file demuxer
+ */
+
+#include "avformat.h"
+
+
+typedef struct CinFileHeader {
+ int video_frame_size;
+ int video_frame_width;
+ int video_frame_height;
+ int audio_frequency;
+ int audio_bits;
+ int audio_stereo;
+ int audio_frame_size;
+} CinFileHeader;
+
+typedef struct CinFrameHeader {
+ int audio_frame_type;
+ int video_frame_type;
+ int pal_colors_count;
+ int audio_frame_size;
+ int video_frame_size;
+} CinFrameHeader;
+
+typedef struct CinDemuxContext {
+ int audio_stream_index;
+ int video_stream_index;
+ CinFileHeader file_header;
+ int64_t audio_stream_pts;
+ int64_t video_stream_pts;
+ CinFrameHeader frame_header;
+ int audio_buffer_size;
+} CinDemuxContext;
+
+
+static int cin_probe(AVProbeData *p)
+{
+ if (p->buf_size < 18)
+ return 0;
+
+ /* header starts with this special marker */
+ if (LE_32(&p->buf[0]) != 0x55AA0000)
+ return 0;
+
+ /* for accuracy, check some header field values */
+ if (LE_32(&p->buf[12]) != 22050 || p->buf[16] != 16 || p->buf[17] != 0)
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int cin_read_file_header(CinDemuxContext *cin, ByteIOContext *pb) {
+ CinFileHeader *hdr = &cin->file_header;
+
+ if (get_le32(pb) != 0x55AA0000)
+ return AVERROR_INVALIDDATA;
+
+ hdr->video_frame_size = get_le32(pb);
+ hdr->video_frame_width = get_le16(pb);
+ hdr->video_frame_height = get_le16(pb);
+ hdr->audio_frequency = get_le32(pb);
+ hdr->audio_bits = get_byte(pb);
+ hdr->audio_stereo = get_byte(pb);
+ hdr->audio_frame_size = get_le16(pb);
+
+ if (hdr->audio_frequency != 22050 || hdr->audio_bits != 16 || hdr->audio_stereo != 0)
+ return AVERROR_INVALIDDATA;
+
+ return 0;
+}
+
+static int cin_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ int rc;
+ CinDemuxContext *cin = (CinDemuxContext *)s->priv_data;
+ CinFileHeader *hdr = &cin->file_header;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+
+ rc = cin_read_file_header(cin, pb);
+ if (rc)
+ return rc;
+
+ cin->video_stream_pts = 0;
+ cin->audio_stream_pts = 0;
+ cin->audio_buffer_size = 0;
+
+ /* initialize the video decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 32, 1, 12);
+ cin->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_DSICINVIDEO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = hdr->video_frame_width;
+ st->codec->height = hdr->video_frame_height;
+
+ /* initialize the audio decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 32, 1, 22050);
+ cin->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_DSICINAUDIO;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = 1;
+ st->codec->sample_rate = 22050;
+ st->codec->bits_per_sample = 16;
+ st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_sample * st->codec->channels;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ return 0;
+}
+
+static int cin_read_frame_header(CinDemuxContext *cin, ByteIOContext *pb) {
+ CinFrameHeader *hdr = &cin->frame_header;
+
+ hdr->video_frame_type = get_byte(pb);
+ hdr->audio_frame_type = get_byte(pb);
+ hdr->pal_colors_count = get_le16(pb);
+ hdr->video_frame_size = get_le32(pb);
+ hdr->audio_frame_size = get_le32(pb);
+
+ if (url_feof(pb) || url_ferror(pb))
+ return AVERROR_IO;
+
+ if (get_le32(pb) != 0xAA55AA55)
+ return AVERROR_INVALIDDATA;
+
+ return 0;
+}
+
+static int cin_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ CinDemuxContext *cin = (CinDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ CinFrameHeader *hdr = &cin->frame_header;
+ int rc, palette_type, pkt_size;
+
+ if (cin->audio_buffer_size == 0) {
+ rc = cin_read_frame_header(cin, pb);
+ if (rc)
+ return rc;
+
+ if ((int16_t)hdr->pal_colors_count < 0) {
+ hdr->pal_colors_count = -(int16_t)hdr->pal_colors_count;
+ palette_type = 1;
+ } else {
+ palette_type = 0;
+ }
+
+ /* palette and video packet */
+ pkt_size = (palette_type + 3) * hdr->pal_colors_count + hdr->video_frame_size;
+
+ if (av_new_packet(pkt, 4 + pkt_size))
+ return AVERROR_NOMEM;
+
+ pkt->stream_index = cin->video_stream_index;
+ pkt->pts = cin->video_stream_pts++;
+
+ pkt->data[0] = palette_type;
+ pkt->data[1] = hdr->pal_colors_count & 0xFF;
+ pkt->data[2] = hdr->pal_colors_count >> 8;
+ pkt->data[3] = hdr->video_frame_type;
+
+ if (get_buffer(pb, &pkt->data[4], pkt_size) != pkt_size)
+ return AVERROR_IO;
+
+ /* sound buffer will be processed on next read_packet() call */
+ cin->audio_buffer_size = hdr->audio_frame_size;
+ return 0;
+ }
+
+ /* audio packet */
+ if (av_new_packet(pkt, cin->audio_buffer_size))
+ return AVERROR_NOMEM;
+
+ pkt->stream_index = cin->audio_stream_index;
+ pkt->pts = cin->audio_stream_pts;
+ cin->audio_stream_pts += cin->audio_buffer_size * 2 / cin->file_header.audio_frame_size;
+
+ if (get_buffer(pb, pkt->data, cin->audio_buffer_size) != cin->audio_buffer_size)
+ return AVERROR_IO;
+
+ cin->audio_buffer_size = 0;
+ return 0;
+}
+
+AVInputFormat dsicin_demuxer = {
+ "dsicin",
+ "Delphine Software International CIN format",
+ sizeof(CinDemuxContext),
+ cin_probe,
+ cin_read_header,
+ cin_read_packet,
+};
diff --git a/contrib/ffmpeg/libavformat/dv.c b/contrib/ffmpeg/libavformat/dv.c
new file mode 100644
index 000000000..3ff8a3fe2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dv.c
@@ -0,0 +1,451 @@
+/*
+ * General DV muxer/demuxer
+ * Copyright (c) 2003 Roman Shaposhnik
+ *
+ * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
+ * of DV technical info.
+ *
+ * Raw DV format
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * 50 Mbps (DVCPRO50) support
+ * Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <time.h>
+#include "avformat.h"
+#include "dvdata.h"
+#include "dv.h"
+
+struct DVDemuxContext {
+ const DVprofile* sys; /* Current DV profile. E.g.: 525/60, 625/50 */
+ AVFormatContext* fctx;
+ AVStream* vst;
+ AVStream* ast[2];
+ AVPacket audio_pkt[2];
+ uint8_t audio_buf[2][8192];
+ int ach;
+ int frames;
+ uint64_t abytes;
+};
+
+static inline uint16_t dv_audio_12to16(uint16_t sample)
+{
+ uint16_t shift, result;
+
+ sample = (sample < 0x800) ? sample : sample | 0xf000;
+ shift = (sample & 0xf00) >> 8;
+
+ if (shift < 0x2 || shift > 0xd) {
+ result = sample;
+ } else if (shift < 0x8) {
+ shift--;
+ result = (sample - (256 * shift)) << shift;
+ } else {
+ shift = 0xe - shift;
+ result = ((sample + ((256 * shift) + 1)) << shift) - 1;
+ }
+
+ return result;
+}
+
+/*
+ * This is the dumbest implementation of all -- it simply looks at
+ * a fixed offset and if pack isn't there -- fails. We might want
+ * to have a fallback mechanism for complete search of missing packs.
+ */
+static const uint8_t* dv_extract_pack(uint8_t* frame, enum dv_pack_type t)
+{
+ int offs;
+
+ switch (t) {
+ case dv_audio_source:
+ offs = (80*6 + 80*16*3 + 3);
+ break;
+ case dv_audio_control:
+ offs = (80*6 + 80*16*4 + 3);
+ break;
+ case dv_video_control:
+ offs = (80*5 + 48 + 5);
+ break;
+ default:
+ return NULL;
+ }
+
+ return (frame[offs] == t ? &frame[offs] : NULL);
+}
+
+/*
+ * There's a couple of assumptions being made here:
+ * 1. By default we silence erroneous (0x8000/16bit 0x800/12bit) audio samples.
+ * We can pass them upwards when ffmpeg will be ready to deal with them.
+ * 2. We don't do software emphasis.
+ * 3. Audio is always returned as 16bit linear samples: 12bit nonlinear samples
+ * are converted into 16bit linear ones.
+ */
+static int dv_extract_audio(uint8_t* frame, uint8_t* pcm, uint8_t* pcm2,
+ const DVprofile *sys)
+{
+ int size, chan, i, j, d, of, smpls, freq, quant, half_ch;
+ uint16_t lc, rc;
+ const uint8_t* as_pack;
+
+ as_pack = dv_extract_pack(frame, dv_audio_source);
+ if (!as_pack) /* No audio ? */
+ return 0;
+
+ smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
+ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */
+ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
+
+ if (quant > 1)
+ return -1; /* Unsupported quantization */
+
+ size = (sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */
+ half_ch = sys->difseg_size/2;
+
+ /* for each DIF channel */
+ for (chan = 0; chan < sys->n_difchan; chan++) {
+ /* for each DIF segment */
+ for (i = 0; i < sys->difseg_size; i++) {
+ frame += 6 * 80; /* skip DIF segment header */
+ if (quant == 1 && i == half_ch) {
+ /* next stereo channel (12bit mode only) */
+ if (!pcm2)
+ break;
+ else
+ pcm = pcm2;
+ }
+
+ /* for each AV sequence */
+ for (j = 0; j < 9; j++) {
+ for (d = 8; d < 80; d += 2) {
+ if (quant == 0) { /* 16bit quantization */
+ of = sys->audio_shuffle[i][j] + (d - 8)/2 * sys->audio_stride;
+ if (of*2 >= size)
+ continue;
+
+ pcm[of*2] = frame[d+1]; // FIXME: may be we have to admit
+ pcm[of*2+1] = frame[d]; // that DV is a big endian PCM
+ if (pcm[of*2+1] == 0x80 && pcm[of*2] == 0x00)
+ pcm[of*2+1] = 0;
+ } else { /* 12bit quantization */
+ lc = ((uint16_t)frame[d] << 4) |
+ ((uint16_t)frame[d+2] >> 4);
+ rc = ((uint16_t)frame[d+1] << 4) |
+ ((uint16_t)frame[d+2] & 0x0f);
+ lc = (lc == 0x800 ? 0 : dv_audio_12to16(lc));
+ rc = (rc == 0x800 ? 0 : dv_audio_12to16(rc));
+
+ of = sys->audio_shuffle[i%half_ch][j] + (d - 8)/3 * sys->audio_stride;
+ if (of*2 >= size)
+ continue;
+
+ pcm[of*2] = lc & 0xff; // FIXME: may be we have to admit
+ pcm[of*2+1] = lc >> 8; // that DV is a big endian PCM
+ of = sys->audio_shuffle[i%half_ch+half_ch][j] +
+ (d - 8)/3 * sys->audio_stride;
+ pcm[of*2] = rc & 0xff; // FIXME: may be we have to admit
+ pcm[of*2+1] = rc >> 8; // that DV is a big endian PCM
+ ++d;
+ }
+ }
+
+ frame += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
+ }
+ }
+
+ /* next stereo channel (50Mbps only) */
+ if(!pcm2)
+ break;
+ pcm = pcm2;
+ }
+
+ return size;
+}
+
+static int dv_extract_audio_info(DVDemuxContext* c, uint8_t* frame)
+{
+ const uint8_t* as_pack;
+ int freq, stype, smpls, quant, i, ach;
+
+ as_pack = dv_extract_pack(frame, dv_audio_source);
+ if (!as_pack || !c->sys) { /* No audio ? */
+ c->ach = 0;
+ return 0;
+ }
+
+ smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */
+ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48KHz, 1 - 44,1kHz, 2 - 32 kHz */
+ stype = (as_pack[3] & 0x1f); /* 0 - 2CH, 2 - 4CH */
+ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */
+
+ /* note: ach counts PAIRS of channels (i.e. stereo channels) */
+ ach = (stype == 2 || (quant && (freq == 2))) ? 2 : 1;
+
+ /* Dynamic handling of the audio streams in DV */
+ for (i=0; i<ach; i++) {
+ if (!c->ast[i]) {
+ c->ast[i] = av_new_stream(c->fctx, 0);
+ if (!c->ast[i])
+ break;
+ av_set_pts_info(c->ast[i], 64, 1, 30000);
+ c->ast[i]->codec->codec_type = CODEC_TYPE_AUDIO;
+ c->ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE;
+
+ av_init_packet(&c->audio_pkt[i]);
+ c->audio_pkt[i].size = 0;
+ c->audio_pkt[i].data = c->audio_buf[i];
+ c->audio_pkt[i].stream_index = c->ast[i]->index;
+ c->audio_pkt[i].flags |= PKT_FLAG_KEY;
+ }
+ c->ast[i]->codec->sample_rate = dv_audio_frequency[freq];
+ c->ast[i]->codec->channels = 2;
+ c->ast[i]->codec->bit_rate = 2 * dv_audio_frequency[freq] * 16;
+ c->ast[i]->start_time = 0;
+ }
+ c->ach = i;
+
+ return (c->sys->audio_min_samples[freq] + smpls) * 4; /* 2ch, 2bytes */;
+}
+
+static int dv_extract_video_info(DVDemuxContext *c, uint8_t* frame)
+{
+ const uint8_t* vsc_pack;
+ AVCodecContext* avctx;
+ int apt, is16_9;
+ int size = 0;
+
+ if (c->sys) {
+ avctx = c->vst->codec;
+
+ av_set_pts_info(c->vst, 64, c->sys->frame_rate_base, c->sys->frame_rate);
+ avctx->time_base= (AVRational){c->sys->frame_rate_base, c->sys->frame_rate};
+ if(!avctx->width){
+ avctx->width = c->sys->width;
+ avctx->height = c->sys->height;
+ }
+ avctx->pix_fmt = c->sys->pix_fmt;
+
+ /* finding out SAR is a little bit messy */
+ vsc_pack = dv_extract_pack(frame, dv_video_control);
+ apt = frame[4] & 0x07;
+ is16_9 = (vsc_pack && ((vsc_pack[2] & 0x07) == 0x02 ||
+ (!apt && (vsc_pack[2] & 0x07) == 0x07)));
+ avctx->sample_aspect_ratio = c->sys->sar[is16_9];
+ avctx->bit_rate = av_rescale(c->sys->frame_size * 8,
+ c->sys->frame_rate,
+ c->sys->frame_rate_base);
+ size = c->sys->frame_size;
+ }
+ return size;
+}
+
+/*
+ * The following 3 functions constitute our interface to the world
+ */
+
+DVDemuxContext* dv_init_demux(AVFormatContext *s)
+{
+ DVDemuxContext *c;
+
+ c = av_mallocz(sizeof(DVDemuxContext));
+ if (!c)
+ return NULL;
+
+ c->vst = av_new_stream(s, 0);
+ if (!c->vst) {
+ av_free(c);
+ return NULL;
+ }
+
+ c->sys = NULL;
+ c->fctx = s;
+ c->ast[0] = c->ast[1] = NULL;
+ c->ach = 0;
+ c->frames = 0;
+ c->abytes = 0;
+
+ c->vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ c->vst->codec->codec_id = CODEC_ID_DVVIDEO;
+ c->vst->codec->bit_rate = 25000000;
+ c->vst->start_time = 0;
+
+ return c;
+}
+
+int dv_get_packet(DVDemuxContext *c, AVPacket *pkt)
+{
+ int size = -1;
+ int i;
+
+ for (i=0; i<c->ach; i++) {
+ if (c->ast[i] && c->audio_pkt[i].size) {
+ *pkt = c->audio_pkt[i];
+ c->audio_pkt[i].size = 0;
+ size = pkt->size;
+ break;
+ }
+ }
+
+ return size;
+}
+
+int dv_produce_packet(DVDemuxContext *c, AVPacket *pkt,
+ uint8_t* buf, int buf_size)
+{
+ int size, i;
+
+ if (buf_size < DV_PROFILE_BYTES ||
+ !(c->sys = dv_frame_profile(buf)) ||
+ buf_size < c->sys->frame_size) {
+ return -1; /* Broken frame, or not enough data */
+ }
+
+ /* Queueing audio packet */
+ /* FIXME: in case of no audio/bad audio we have to do something */
+ size = dv_extract_audio_info(c, buf);
+ for (i=0; i<c->ach; i++) {
+ c->audio_pkt[i].size = size;
+ c->audio_pkt[i].pts = c->abytes * 30000*8 / c->ast[i]->codec->bit_rate;
+ }
+ dv_extract_audio(buf, c->audio_buf[0], c->audio_buf[1], c->sys);
+ c->abytes += size;
+
+ /* Now it's time to return video packet */
+ size = dv_extract_video_info(c, buf);
+ av_init_packet(pkt);
+ pkt->data = buf;
+ pkt->size = size;
+ pkt->flags |= PKT_FLAG_KEY;
+ pkt->stream_index = c->vst->id;
+ pkt->pts = c->frames;
+
+ c->frames++;
+
+ return size;
+}
+
+static int64_t dv_frame_offset(AVFormatContext *s, DVDemuxContext *c,
+ int64_t timestamp, int flags)
+{
+ // FIXME: sys may be wrong if last dv_read_packet() failed (buffer is junk)
+ const DVprofile* sys = dv_codec_profile(c->vst->codec);
+ int64_t offset;
+ int64_t size = url_fsize(&s->pb);
+ int64_t max_offset = ((size-1) / sys->frame_size) * sys->frame_size;
+
+ offset = sys->frame_size * timestamp;
+
+ if (offset > max_offset) offset = max_offset;
+ else if (offset < 0) offset = 0;
+
+ return offset;
+}
+
+void dv_flush_audio_packets(DVDemuxContext *c)
+{
+ c->audio_pkt[0].size = c->audio_pkt[1].size = 0;
+}
+
+/************************************************************
+ * Implementation of the easiest DV storage of all -- raw DV.
+ ************************************************************/
+
+typedef struct RawDVContext {
+ DVDemuxContext* dv_demux;
+ uint8_t buf[DV_MAX_FRAME_SIZE];
+} RawDVContext;
+
+static int dv_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ RawDVContext *c = s->priv_data;
+
+ c->dv_demux = dv_init_demux(s);
+ if (!c->dv_demux)
+ return -1;
+
+ if (get_buffer(&s->pb, c->buf, DV_PROFILE_BYTES) <= 0 ||
+ url_fseek(&s->pb, -DV_PROFILE_BYTES, SEEK_CUR) < 0)
+ return AVERROR_IO;
+
+ c->dv_demux->sys = dv_frame_profile(c->buf);
+ s->bit_rate = av_rescale(c->dv_demux->sys->frame_size * 8,
+ c->dv_demux->sys->frame_rate,
+ c->dv_demux->sys->frame_rate_base);
+
+ return 0;
+}
+
+
+static int dv_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int size;
+ RawDVContext *c = s->priv_data;
+
+ size = dv_get_packet(c->dv_demux, pkt);
+
+ if (size < 0) {
+ size = c->dv_demux->sys->frame_size;
+ if (get_buffer(&s->pb, c->buf, size) <= 0)
+ return AVERROR_IO;
+
+ size = dv_produce_packet(c->dv_demux, pkt, c->buf, size);
+ }
+
+ return size;
+}
+
+static int dv_read_seek(AVFormatContext *s, int stream_index,
+ int64_t timestamp, int flags)
+{
+ RawDVContext *r = s->priv_data;
+ DVDemuxContext *c = r->dv_demux;
+ int64_t offset= dv_frame_offset(s, c, timestamp, flags);
+
+ c->frames= offset / c->sys->frame_size;
+ if (c->ach)
+ c->abytes= av_rescale(c->frames,
+ c->ast[0]->codec->bit_rate * (int64_t)c->sys->frame_rate_base,
+ 8*c->sys->frame_rate);
+
+ dv_flush_audio_packets(c);
+ return url_fseek(&s->pb, offset, SEEK_SET);
+}
+
+static int dv_read_close(AVFormatContext *s)
+{
+ RawDVContext *c = s->priv_data;
+ av_free(c->dv_demux);
+ return 0;
+}
+
+#ifdef CONFIG_DV_DEMUXER
+AVInputFormat dv_demuxer = {
+ "dv",
+ "DV video format",
+ sizeof(RawDVContext),
+ NULL,
+ dv_read_header,
+ dv_read_packet,
+ dv_read_close,
+ dv_read_seek,
+ .extensions = "dv,dif",
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/dv.h b/contrib/ffmpeg/libavformat/dv.h
new file mode 100644
index 000000000..f39d22c9f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dv.h
@@ -0,0 +1,37 @@
+/*
+ * General DV muxer/demuxer
+ * Copyright (c) 2003 Roman Shaposhnik
+ *
+ * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
+ * of DV technical info.
+ *
+ * Raw DV format
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+typedef struct DVDemuxContext DVDemuxContext;
+DVDemuxContext* dv_init_demux(AVFormatContext* s);
+int dv_get_packet(DVDemuxContext*, AVPacket *);
+int dv_produce_packet(DVDemuxContext*, AVPacket*, uint8_t*, int);
+void dv_flush_audio_packets(DVDemuxContext*);
+
+typedef struct DVMuxContext DVMuxContext;
+DVMuxContext* dv_init_mux(AVFormatContext* s);
+int dv_assemble_frame(DVMuxContext *c, AVStream*, const uint8_t*, int, uint8_t**);
+void dv_delete_mux(DVMuxContext*);
diff --git a/contrib/ffmpeg/libavformat/dv1394.c b/contrib/ffmpeg/libavformat/dv1394.c
new file mode 100644
index 000000000..f00d47435
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dv1394.c
@@ -0,0 +1,240 @@
+/*
+ * Linux DV1394 interface
+ * Copyright (c) 2003 Max Krasnyansky <maxk@qualcomm.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/poll.h>
+#include <sys/time.h>
+#include <time.h>
+
+#include "avformat.h"
+
+#undef DV1394_DEBUG
+
+#include "dv1394.h"
+#include "dv.h"
+
+struct dv1394_data {
+ int fd;
+ int channel;
+ int format;
+
+ void *ring; /* Ring buffer */
+ int index; /* Current frame index */
+ int avail; /* Number of frames available for reading */
+ int done; /* Number of completed frames */
+
+ DVDemuxContext* dv_demux; /* Generic DV muxing/demuxing context */
+};
+
+/*
+ * The trick here is to kludge around well known problem with kernel Ooopsing
+ * when you try to capture PAL on a device node configure for NTSC. That's
+ * why we have to configure the device node for PAL, and then read only NTSC
+ * amount of data.
+ */
+static int dv1394_reset(struct dv1394_data *dv)
+{
+ struct dv1394_init init;
+
+ init.channel = dv->channel;
+ init.api_version = DV1394_API_VERSION;
+ init.n_frames = DV1394_RING_FRAMES;
+ init.format = DV1394_PAL;
+
+ if (ioctl(dv->fd, DV1394_INIT, &init) < 0)
+ return -1;
+
+ dv->avail = dv->done = 0;
+ return 0;
+}
+
+static int dv1394_start(struct dv1394_data *dv)
+{
+ /* Tell DV1394 driver to enable receiver */
+ if (ioctl(dv->fd, DV1394_START_RECEIVE, 0) < 0) {
+ perror("Failed to start receiver");
+ return -1;
+ }
+ return 0;
+}
+
+static int dv1394_read_header(AVFormatContext * context, AVFormatParameters * ap)
+{
+ struct dv1394_data *dv = context->priv_data;
+ const char *video_device;
+
+ dv->dv_demux = dv_init_demux(context);
+ if (!dv->dv_demux)
+ goto failed;
+
+ if (ap->standard && !strcasecmp(ap->standard, "pal"))
+ dv->format = DV1394_PAL;
+ else
+ dv->format = DV1394_NTSC;
+
+ if (ap->channel)
+ dv->channel = ap->channel;
+ else
+ dv->channel = DV1394_DEFAULT_CHANNEL;
+
+ /* Open and initialize DV1394 device */
+ video_device = ap->device;
+ if (!video_device)
+ video_device = "/dev/dv1394/0";
+ dv->fd = open(video_device, O_RDONLY);
+ if (dv->fd < 0) {
+ perror("Failed to open DV interface");
+ goto failed;
+ }
+
+ if (dv1394_reset(dv) < 0) {
+ perror("Failed to initialize DV interface");
+ goto failed;
+ }
+
+ dv->ring = mmap(NULL, DV1394_PAL_FRAME_SIZE * DV1394_RING_FRAMES,
+ PROT_READ, MAP_PRIVATE, dv->fd, 0);
+ if (dv->ring == MAP_FAILED) {
+ perror("Failed to mmap DV ring buffer");
+ goto failed;
+ }
+
+ if (dv1394_start(dv) < 0)
+ goto failed;
+
+ return 0;
+
+failed:
+ close(dv->fd);
+ return AVERROR_IO;
+}
+
+static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt)
+{
+ struct dv1394_data *dv = context->priv_data;
+ int size;
+
+ size = dv_get_packet(dv->dv_demux, pkt);
+ if (size > 0)
+ return size;
+
+ if (!dv->avail) {
+ struct dv1394_status s;
+ struct pollfd p;
+
+ if (dv->done) {
+ /* Request more frames */
+ if (ioctl(dv->fd, DV1394_RECEIVE_FRAMES, dv->done) < 0) {
+ /* This usually means that ring buffer overflowed.
+ * We have to reset :(.
+ */
+
+ av_log(context, AV_LOG_ERROR, "DV1394: Ring buffer overflow. Reseting ..\n");
+
+ dv1394_reset(dv);
+ dv1394_start(dv);
+ }
+ dv->done = 0;
+ }
+
+ /* Wait until more frames are available */
+restart_poll:
+ p.fd = dv->fd;
+ p.events = POLLIN | POLLERR | POLLHUP;
+ if (poll(&p, 1, -1) < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ goto restart_poll;
+ perror("Poll failed");
+ return AVERROR_IO;
+ }
+
+ if (ioctl(dv->fd, DV1394_GET_STATUS, &s) < 0) {
+ perror("Failed to get status");
+ return AVERROR_IO;
+ }
+#ifdef DV1394_DEBUG
+ av_log(context, AV_LOG_DEBUG, "DV1394: status\n"
+ "\tactive_frame\t%d\n"
+ "\tfirst_clear_frame\t%d\n"
+ "\tn_clear_frames\t%d\n"
+ "\tdropped_frames\t%d\n",
+ s.active_frame, s.first_clear_frame,
+ s.n_clear_frames, s.dropped_frames);
+#endif
+
+ dv->avail = s.n_clear_frames;
+ dv->index = s.first_clear_frame;
+ dv->done = 0;
+
+ if (s.dropped_frames) {
+ av_log(context, AV_LOG_ERROR, "DV1394: Frame drop detected (%d). Reseting ..\n",
+ s.dropped_frames);
+
+ dv1394_reset(dv);
+ dv1394_start(dv);
+ }
+ }
+
+#ifdef DV1394_DEBUG
+ av_log(context, AV_LOG_DEBUG, "index %d, avail %d, done %d\n", dv->index, dv->avail,
+ dv->done);
+#endif
+
+ size = dv_produce_packet(dv->dv_demux, pkt,
+ dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
+ DV1394_PAL_FRAME_SIZE);
+ dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
+ dv->done++; dv->avail--;
+
+ return size;
+}
+
+static int dv1394_close(AVFormatContext * context)
+{
+ struct dv1394_data *dv = context->priv_data;
+
+ /* Shutdown DV1394 receiver */
+ if (ioctl(dv->fd, DV1394_SHUTDOWN, 0) < 0)
+ perror("Failed to shutdown DV1394");
+
+ /* Unmap ring buffer */
+ if (munmap(dv->ring, DV1394_NTSC_FRAME_SIZE * DV1394_RING_FRAMES) < 0)
+ perror("Failed to munmap DV1394 ring buffer");
+
+ close(dv->fd);
+ av_free(dv->dv_demux);
+
+ return 0;
+}
+
+AVInputFormat dv1394_demuxer = {
+ .name = "dv1394",
+ .long_name = "dv1394 A/V grab",
+ .priv_data_size = sizeof(struct dv1394_data),
+ .read_header = dv1394_read_header,
+ .read_packet = dv1394_read_packet,
+ .read_close = dv1394_close,
+ .flags = AVFMT_NOFILE
+};
diff --git a/contrib/ffmpeg/libavformat/dv1394.h b/contrib/ffmpeg/libavformat/dv1394.h
new file mode 100644
index 000000000..f7db40108
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dv1394.h
@@ -0,0 +1,357 @@
+/*
+ * dv1394.h - DV input/output over IEEE 1394 on OHCI chips
+ * Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
+ * receive, proc_fs by Dan Dennedy <dan@dennedy.org>
+ *
+ * based on:
+ * video1394.h - driver for OHCI 1394 boards
+ * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
+ * Peter Schlaile <udbz@rz.uni-karlsruhe.de>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef _DV_1394_H
+#define _DV_1394_H
+
+#define DV1394_DEFAULT_CHANNEL 63
+#define DV1394_DEFAULT_CARD 0
+#define DV1394_RING_FRAMES 20
+
+#define DV1394_WIDTH 720
+#define DV1394_NTSC_HEIGHT 480
+#define DV1394_PAL_HEIGHT 576
+
+/* This is the public user-space interface. Try not to break it. */
+
+#define DV1394_API_VERSION 0x20011127
+
+/* ********************
+ ** **
+ ** DV1394 API **
+ ** **
+ ********************
+
+ There are two methods of operating the DV1394 DV output device.
+
+ 1)
+
+ The simplest is an interface based on write(): simply write
+ full DV frames of data to the device, and they will be transmitted
+ as quickly as possible. The FD may be set for non-blocking I/O,
+ in which case you can use select() or poll() to wait for output
+ buffer space.
+
+ To set the DV output parameters (e.g. whether you want NTSC or PAL
+ video), use the DV1394_INIT ioctl, passing in the parameters you
+ want in a struct dv1394_init.
+
+ Example 1:
+ To play a raw .DV file: cat foo.DV > /dev/dv1394
+ (cat will use write() internally)
+
+ Example 2:
+ static struct dv1394_init init = {
+ 0x63, (broadcast channel)
+ 4, (four-frame ringbuffer)
+ DV1394_NTSC, (send NTSC video)
+ 0, 0 (default empty packet rate)
+ }
+
+ ioctl(fd, DV1394_INIT, &init);
+
+ while(1) {
+ read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
+ write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
+ }
+
+ 2)
+
+ For more control over buffering, and to avoid unnecessary copies
+ of the DV data, you can use the more sophisticated the mmap() interface.
+ First, call the DV1394_INIT ioctl to specify your parameters,
+ including the number of frames in the ringbuffer. Then, calling mmap()
+ on the dv1394 device will give you direct access to the ringbuffer
+ from which the DV card reads your frame data.
+
+ The ringbuffer is simply one large, contiguous region of memory
+ containing two or more frames of packed DV data. Each frame of DV data
+ is 120000 bytes (NTSC) or 144000 bytes (PAL).
+
+ Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
+ ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
+ or select()/poll() to wait until the frames are transmitted. Next, you'll
+ need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
+ frames are clear (ready to be filled with new DV data). Finally, use
+ DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
+
+
+ Example: here is what a four-frame ringbuffer might look like
+ during DV transmission:
+
+
+ frame 0 frame 1 frame 2 frame 3
+
+ *--------------------------------------*
+ | CLEAR | DV data | DV data | CLEAR |
+ *--------------------------------------*
+ <ACTIVE>
+
+ transmission goes in this direction --->>>
+
+
+ The DV hardware is currently transmitting the data in frame 1.
+ Once frame 1 is finished, it will automatically transmit frame 2.
+ (if frame 2 finishes before frame 3 is submitted, the device
+ will continue to transmit frame 2, and will increase the dropped_frames
+ counter each time it repeats the transmission).
+
+
+ If you called DV1394_GET_STATUS at this instant, you would
+ receive the following values:
+
+ n_frames = 4
+ active_frame = 1
+ first_clear_frame = 3
+ n_clear_frames = 2
+
+ At this point, you should write new DV data into frame 3 and optionally
+ frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
+ it may transmit the new frames.
+
+ ERROR HANDLING
+
+ An error (buffer underflow/overflow or a break in the DV stream due
+ to a 1394 bus reset) can be detected by checking the dropped_frames
+ field of struct dv1394_status (obtained through the
+ DV1394_GET_STATUS ioctl).
+
+ The best way to recover from such an error is to re-initialize
+ dv1394, either by using the DV1394_INIT ioctl call, or closing the
+ file descriptor and opening it again. (note that you must unmap all
+ ringbuffer mappings when closing the file descriptor, or else
+ dv1394 will still be considered 'in use').
+
+ MAIN LOOP
+
+ For maximum efficiency and robustness against bus errors, you are
+ advised to model the main loop of your application after the
+ following pseudo-code example:
+
+ (checks of system call return values omitted for brevity; always
+ check return values in your code!)
+
+ while( frames left ) {
+
+ struct pollfd *pfd = ...;
+
+ pfd->fd = dv1394_fd;
+ pfd->revents = 0;
+ pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
+
+ (add other sources of I/O here)
+
+ poll(pfd, 1, -1); (or select(); add a timeout if you want)
+
+ if(pfd->revents) {
+ struct dv1394_status status;
+
+ ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
+
+ if(status.dropped_frames > 0) {
+ reset_dv1394();
+ } else {
+ for(int i = 0; i < status.n_clear_frames; i++) {
+ copy_DV_frame();
+ }
+ }
+ }
+ }
+
+ where copy_DV_frame() reads or writes on the dv1394 file descriptor
+ (read/write mode) or copies data to/from the mmap ringbuffer and
+ then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
+ frames are availble (mmap mode).
+
+ reset_dv1394() is called in the event of a buffer
+ underflow/overflow or a halt in the DV stream (e.g. due to a 1394
+ bus reset). To guarantee recovery from the error, this function
+ should close the dv1394 file descriptor (and munmap() all
+ ringbuffer mappings, if you are using them), then re-open the
+ dv1394 device (and re-map the ringbuffer).
+
+*/
+
+
+/* maximum number of frames in the ringbuffer */
+#define DV1394_MAX_FRAMES 32
+
+/* number of *full* isochronous packets per DV frame */
+#define DV1394_NTSC_PACKETS_PER_FRAME 250
+#define DV1394_PAL_PACKETS_PER_FRAME 300
+
+/* size of one frame's worth of DV data, in bytes */
+#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
+#define DV1394_PAL_FRAME_SIZE (480 * DV1394_PAL_PACKETS_PER_FRAME)
+
+
+/* ioctl() commands */
+
+enum {
+ /* I don't like using 0 as a valid ioctl() */
+ DV1394_INVALID = 0,
+
+
+ /* get the driver ready to transmit video.
+ pass a struct dv1394_init* as the parameter (see below),
+ or NULL to get default parameters */
+ DV1394_INIT,
+
+
+ /* stop transmitting video and free the ringbuffer */
+ DV1394_SHUTDOWN,
+
+
+ /* submit N new frames to be transmitted, where
+ the index of the first new frame is first_clear_buffer,
+ and the index of the last new frame is
+ (first_clear_buffer + N) % n_frames */
+ DV1394_SUBMIT_FRAMES,
+
+
+ /* block until N buffers are clear (pass N as the parameter)
+ Because we re-transmit the last frame on underrun, there
+ will at most be n_frames - 1 clear frames at any time */
+ DV1394_WAIT_FRAMES,
+
+ /* capture new frames that have been received, where
+ the index of the first new frame is first_clear_buffer,
+ and the index of the last new frame is
+ (first_clear_buffer + N) % n_frames */
+ DV1394_RECEIVE_FRAMES,
+
+
+ DV1394_START_RECEIVE,
+
+
+ /* pass a struct dv1394_status* as the parameter (see below) */
+ DV1394_GET_STATUS,
+};
+
+
+
+enum pal_or_ntsc {
+ DV1394_NTSC = 0,
+ DV1394_PAL
+};
+
+
+
+
+/* this is the argument to DV1394_INIT */
+struct dv1394_init {
+ /* DV1394_API_VERSION */
+ unsigned int api_version;
+
+ /* isochronous transmission channel to use */
+ unsigned int channel;
+
+ /* number of frames in the ringbuffer. Must be at least 2
+ and at most DV1394_MAX_FRAMES. */
+ unsigned int n_frames;
+
+ /* send/receive PAL or NTSC video format */
+ enum pal_or_ntsc format;
+
+ /* the following are used only for transmission */
+
+ /* set these to zero unless you want a
+ non-default empty packet rate (see below) */
+ unsigned long cip_n;
+ unsigned long cip_d;
+
+ /* set this to zero unless you want a
+ non-default SYT cycle offset (default = 3 cycles) */
+ unsigned int syt_offset;
+};
+
+/* NOTE: you may only allocate the DV frame ringbuffer once each time
+ you open the dv1394 device. DV1394_INIT will fail if you call it a
+ second time with different 'n_frames' or 'format' arguments (which
+ would imply a different size for the ringbuffer). If you need a
+ different buffer size, simply close and re-open the device, then
+ initialize it with your new settings. */
+
+/* Q: What are cip_n and cip_d? */
+
+/*
+ A: DV video streams do not utilize 100% of the potential bandwidth offered
+ by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
+ DV devices must periodically insert empty packets into the 1394 data stream.
+ Typically there is one empty packet per 14-16 data-carrying packets.
+
+ Some DV devices will accept a wide range of empty packet rates, while others
+ require a precise rate. If the dv1394 driver produces empty packets at
+ a rate that your device does not accept, you may see ugly patterns on the
+ DV output, or even no output at all.
+
+ The default empty packet insertion rate seems to work for many people; if
+ your DV output is stable, you can simply ignore this discussion. However,
+ we have exposed the empty packet rate as a parameter to support devices that
+ do not work with the default rate.
+
+ The decision to insert an empty packet is made with a numerator/denominator
+ algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
+ You can alter the empty packet rate by passing non-zero values for cip_n
+ and cip_d to the INIT ioctl.
+
+ */
+
+
+
+struct dv1394_status {
+ /* this embedded init struct returns the current dv1394
+ parameters in use */
+ struct dv1394_init init;
+
+ /* the ringbuffer frame that is currently being
+ displayed. (-1 if the device is not transmitting anything) */
+ int active_frame;
+
+ /* index of the first buffer (ahead of active_frame) that
+ is ready to be filled with data */
+ unsigned int first_clear_frame;
+
+ /* how many buffers, including first_clear_buffer, are
+ ready to be filled with data */
+ unsigned int n_clear_frames;
+
+ /* how many times the DV stream has underflowed, overflowed,
+ or otherwise encountered an error, since the previous call
+ to DV1394_GET_STATUS */
+ unsigned int dropped_frames;
+
+ /* N.B. The dropped_frames counter is only a lower bound on the actual
+ number of dropped frames, with the special case that if dropped_frames
+ is zero, then it is guaranteed that NO frames have been dropped
+ since the last call to DV1394_GET_STATUS.
+ */
+};
+
+
+#endif /* _DV_1394_H */
diff --git a/contrib/ffmpeg/libavformat/dvenc.c b/contrib/ffmpeg/libavformat/dvenc.c
new file mode 100644
index 000000000..79cee7af6
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/dvenc.c
@@ -0,0 +1,407 @@
+/*
+ * General DV muxer/demuxer
+ * Copyright (c) 2003 Roman Shaposhnik
+ *
+ * Many thanks to Dan Dennedy <dan@dennedy.org> for providing wealth
+ * of DV technical info.
+ *
+ * Raw DV format
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * 50 Mbps (DVCPRO50) support
+ * Copyright (c) 2006 Daniel Maas <dmaas@maasdigital.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <time.h>
+#include "avformat.h"
+#include "dvdata.h"
+#include "dv.h"
+#include "fifo.h"
+
+struct DVMuxContext {
+ const DVprofile* sys; /* Current DV profile. E.g.: 525/60, 625/50 */
+ int n_ast; /* Number of stereo audio streams (up to 2) */
+ AVStream *ast[2]; /* Stereo audio streams */
+ AVFifoBuffer audio_data[2]; /* Fifo for storing excessive amounts of PCM */
+ int frames; /* Number of a current frame */
+ time_t start_time; /* Start time of recording */
+ int has_audio; /* frame under contruction has audio */
+ int has_video; /* frame under contruction has video */
+ uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under contruction */
+};
+
+static const int dv_aaux_packs_dist[12][9] = {
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+ { 0xff, 0xff, 0xff, 0x50, 0x51, 0x52, 0x53, 0xff, 0xff },
+ { 0x50, 0x51, 0x52, 0x53, 0xff, 0xff, 0xff, 0xff, 0xff },
+};
+
+static int dv_audio_frame_size(const DVprofile* sys, int frame)
+{
+ return sys->audio_samples_dist[frame % (sizeof(sys->audio_samples_dist)/
+ sizeof(sys->audio_samples_dist[0]))];
+}
+
+static int dv_write_pack(enum dv_pack_type pack_id, DVMuxContext *c, uint8_t* buf)
+{
+ struct tm tc;
+ time_t ct;
+ int ltc_frame;
+
+ buf[0] = (uint8_t)pack_id;
+ switch (pack_id) {
+ case dv_timecode:
+ ct = (time_t)(c->frames / ((float)c->sys->frame_rate /
+ (float)c->sys->frame_rate_base));
+ brktimegm(ct, &tc);
+ /*
+ * LTC drop-frame frame counter drops two frames (0 and 1) every
+ * minute, unless it is exactly divisible by 10
+ */
+ ltc_frame = (c->frames + 2*ct/60 - 2*ct/600) % c->sys->ltc_divisor;
+ buf[1] = (0 << 7) | /* Color fame: 0 - unsync; 1 - sync mode */
+ (1 << 6) | /* Drop frame timecode: 0 - nondrop; 1 - drop */
+ ((ltc_frame / 10) << 4) | /* Tens of frames */
+ (ltc_frame % 10); /* Units of frames */
+ buf[2] = (1 << 7) | /* Biphase mark polarity correction: 0 - even; 1 - odd */
+ ((tc.tm_sec / 10) << 4) | /* Tens of seconds */
+ (tc.tm_sec % 10); /* Units of seconds */
+ buf[3] = (1 << 7) | /* Binary group flag BGF0 */
+ ((tc.tm_min / 10) << 4) | /* Tens of minutes */
+ (tc.tm_min % 10); /* Units of minutes */
+ buf[4] = (1 << 7) | /* Binary group flag BGF2 */
+ (1 << 6) | /* Binary group flag BGF1 */
+ ((tc.tm_hour / 10) << 4) | /* Tens of hours */
+ (tc.tm_hour % 10); /* Units of hours */
+ break;
+ case dv_audio_source: /* AAUX source pack */
+ buf[1] = (0 << 7) | /* locked mode */
+ (1 << 6) | /* reserved -- always 1 */
+ (dv_audio_frame_size(c->sys, c->frames) -
+ c->sys->audio_min_samples[0]);
+ /* # of samples */
+ buf[2] = (0 << 7) | /* multi-stereo */
+ (0 << 5) | /* #of audio channels per block: 0 -- 1 channel */
+ (0 << 4) | /* pair bit: 0 -- one pair of channels */
+ 0; /* audio mode */
+ buf[3] = (1 << 7) | /* res */
+ (1 << 6) | /* multi-language flag */
+ (c->sys->dsf << 5) | /* system: 60fields/50fields */
+ (c->sys->n_difchan & 2); /* definition: 0 -- 25Mbps, 2 -- 50Mbps */
+ buf[4] = (1 << 7) | /* emphasis: 1 -- off */
+ (0 << 6) | /* emphasis time constant: 0 -- reserved */
+ (0 << 3) | /* frequency: 0 -- 48Khz, 1 -- 44,1Khz, 2 -- 32Khz */
+ 0; /* quantization: 0 -- 16bit linear, 1 -- 12bit nonlinear */
+ break;
+ case dv_audio_control:
+ buf[1] = (0 << 6) | /* copy protection: 0 -- unrestricted */
+ (1 << 4) | /* input source: 1 -- digital input */
+ (3 << 2) | /* compression: 3 -- no information */
+ 0; /* misc. info/SMPTE emphasis off */
+ buf[2] = (1 << 7) | /* recording start point: 1 -- no */
+ (1 << 6) | /* recording end point: 1 -- no */
+ (1 << 3) | /* recording mode: 1 -- original */
+ 7;
+ buf[3] = (1 << 7) | /* direction: 1 -- forward */
+ (c->sys->pix_fmt == PIX_FMT_YUV420P ? 0x20 : /* speed */
+ c->sys->ltc_divisor*4);
+ buf[4] = (1 << 7) | /* reserved -- always 1 */
+ 0x7f; /* genre category */
+ break;
+ case dv_audio_recdate:
+ case dv_video_recdate: /* VAUX recording date */
+ ct = c->start_time + (time_t)(c->frames /
+ ((float)c->sys->frame_rate / (float)c->sys->frame_rate_base));
+ brktimegm(ct, &tc);
+ buf[1] = 0xff; /* ds, tm, tens of time zone, units of time zone */
+ /* 0xff is very likely to be "unknown" */
+ buf[2] = (3 << 6) | /* reserved -- always 1 */
+ ((tc.tm_mday / 10) << 4) | /* Tens of day */
+ (tc.tm_mday % 10); /* Units of day */
+ buf[3] = /* we set high 4 bits to 0, shouldn't we set them to week? */
+ ((tc.tm_mon / 10) << 4) | /* Tens of month */
+ (tc.tm_mon % 10); /* Units of month */
+ buf[4] = (((tc.tm_year % 100) / 10) << 4) | /* Tens of year */
+ (tc.tm_year % 10); /* Units of year */
+ break;
+ case dv_audio_rectime: /* AAUX recording time */
+ case dv_video_rectime: /* VAUX recording time */
+ ct = c->start_time + (time_t)(c->frames /
+ ((float)c->sys->frame_rate / (float)c->sys->frame_rate_base));
+ brktimegm(ct, &tc);
+ buf[1] = (3 << 6) | /* reserved -- always 1 */
+ 0x3f; /* tens of frame, units of frame: 0x3f - "unknown" ? */
+ buf[2] = (1 << 7) | /* reserved -- always 1 */
+ ((tc.tm_sec / 10) << 4) | /* Tens of seconds */
+ (tc.tm_sec % 10); /* Units of seconds */
+ buf[3] = (1 << 7) | /* reserved -- always 1 */
+ ((tc.tm_min / 10) << 4) | /* Tens of minutes */
+ (tc.tm_min % 10); /* Units of minutes */
+ buf[4] = (3 << 6) | /* reserved -- always 1 */
+ ((tc.tm_hour / 10) << 4) | /* Tens of hours */
+ (tc.tm_hour % 10); /* Units of hours */
+ break;
+ default:
+ buf[1] = buf[2] = buf[3] = buf[4] = 0xff;
+ }
+ return 5;
+}
+
+static void dv_inject_audio(DVMuxContext *c, int channel, uint8_t* frame_ptr)
+{
+ int i, j, d, of, size;
+ size = 4 * dv_audio_frame_size(c->sys, c->frames);
+ frame_ptr += channel * c->sys->difseg_size * 150 * 80;
+ for (i = 0; i < c->sys->difseg_size; i++) {
+ frame_ptr += 6 * 80; /* skip DIF segment header */
+ for (j = 0; j < 9; j++) {
+ dv_write_pack(dv_aaux_packs_dist[i][j], c, &frame_ptr[3]);
+ for (d = 8; d < 80; d+=2) {
+ of = c->sys->audio_shuffle[i][j] + (d - 8)/2 * c->sys->audio_stride;
+ if (of*2 >= size)
+ continue;
+
+ frame_ptr[d] = av_fifo_peek(&c->audio_data[channel], of*2+1); // FIXME: may be we have to admit
+ frame_ptr[d+1] = av_fifo_peek(&c->audio_data[channel], of*2); // that DV is a big endian PCM
+ }
+ frame_ptr += 16 * 80; /* 15 Video DIFs + 1 Audio DIF */
+ }
+ }
+}
+
+static void dv_inject_metadata(DVMuxContext *c, uint8_t* frame)
+{
+ int j, k;
+ uint8_t* buf;
+
+ for (buf = frame; buf < frame + c->sys->frame_size; buf += 150 * 80) {
+ /* DV subcode: 2nd and 3d DIFs */
+ for (j = 80; j < 80 * 3; j += 80) {
+ for (k = 6; k < 6 * 8; k += 8)
+ dv_write_pack(dv_timecode, c, &buf[j+k]);
+
+ if (((long)(buf-frame)/(c->sys->frame_size/(c->sys->difseg_size*c->sys->n_difchan))%c->sys->difseg_size) > 5) { /* FIXME: is this really needed ? */
+ dv_write_pack(dv_video_recdate, c, &buf[j+14]);
+ dv_write_pack(dv_video_rectime, c, &buf[j+22]);
+ dv_write_pack(dv_video_recdate, c, &buf[j+38]);
+ dv_write_pack(dv_video_rectime, c, &buf[j+46]);
+ }
+ }
+
+ /* DV VAUX: 4th, 5th and 6th 3DIFs */
+ for (j = 80*3 + 3; j < 80*6; j += 80) {
+ dv_write_pack(dv_video_recdate, c, &buf[j+5*2]);
+ dv_write_pack(dv_video_rectime, c, &buf[j+5*3]);
+ dv_write_pack(dv_video_recdate, c, &buf[j+5*11]);
+ dv_write_pack(dv_video_rectime, c, &buf[j+5*12]);
+ }
+ }
+}
+
+/*
+ * The following 3 functions constitute our interface to the world
+ */
+
+int dv_assemble_frame(DVMuxContext *c, AVStream* st,
+ const uint8_t* data, int data_size, uint8_t** frame)
+{
+ int i, reqasize;
+
+ *frame = &c->frame_buf[0];
+ reqasize = 4 * dv_audio_frame_size(c->sys, c->frames);
+
+ switch (st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ /* FIXME: we have to have more sensible approach than this one */
+ if (c->has_video)
+ av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient audio data or severe sync problem.\n", c->frames);
+
+ memcpy(*frame, data, c->sys->frame_size);
+ c->has_video = 1;
+ break;
+ case CODEC_TYPE_AUDIO:
+ for (i = 0; i < c->n_ast && st != c->ast[i]; i++);
+
+ /* FIXME: we have to have more sensible approach than this one */
+ if (av_fifo_size(&c->audio_data[i]) + data_size >= 100*AVCODEC_MAX_AUDIO_FRAME_SIZE)
+ av_log(st->codec, AV_LOG_ERROR, "Can't process DV frame #%d. Insufficient video data or severe sync problem.\n", c->frames);
+ av_fifo_write(&c->audio_data[i], data, data_size);
+
+ /* Lets see if we've got enough audio for one DV frame */
+ c->has_audio |= ((reqasize <= av_fifo_size(&c->audio_data[i])) << i);
+
+ break;
+ default:
+ break;
+ }
+
+ /* Lets see if we have enough data to construct one DV frame */
+ if (c->has_video == 1 && c->has_audio + 1 == 1<<c->n_ast) {
+ dv_inject_metadata(c, *frame);
+ for (i=0; i<c->n_ast; i++) {
+ dv_inject_audio(c, i, *frame);
+ av_fifo_drain(&c->audio_data[i], reqasize);
+ }
+
+ c->has_video = 0;
+ c->has_audio = 0;
+ c->frames++;
+
+ return c->sys->frame_size;
+ }
+
+ return 0;
+}
+
+DVMuxContext* dv_init_mux(AVFormatContext* s)
+{
+ DVMuxContext *c = (DVMuxContext *)s->priv_data;
+ AVStream *vst = NULL;
+ int i;
+
+ /* we support at most 1 video and 2 audio streams */
+ if (s->nb_streams > 3)
+ return NULL;
+
+ c->n_ast = 0;
+ c->ast[0] = c->ast[1] = NULL;
+
+ /* We have to sort out where audio and where video stream is */
+ for (i=0; i<s->nb_streams; i++) {
+ switch (s->streams[i]->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ vst = s->streams[i];
+ break;
+ case CODEC_TYPE_AUDIO:
+ c->ast[c->n_ast++] = s->streams[i];
+ break;
+ default:
+ goto bail_out;
+ }
+ }
+
+ /* Some checks -- DV format is very picky about its incoming streams */
+ if (!vst || vst->codec->codec_id != CODEC_ID_DVVIDEO)
+ goto bail_out;
+ for (i=0; i<c->n_ast; i++) {
+ if (c->ast[i] && (c->ast[i]->codec->codec_id != CODEC_ID_PCM_S16LE ||
+ c->ast[i]->codec->sample_rate != 48000 ||
+ c->ast[i]->codec->channels != 2))
+ goto bail_out;
+ }
+ c->sys = dv_codec_profile(vst->codec);
+ if (!c->sys)
+ goto bail_out;
+
+ if((c->n_ast > 1) && (c->sys->n_difchan < 2)) {
+ /* only 1 stereo pair is allowed in 25Mbps mode */
+ goto bail_out;
+ }
+
+ /* Ok, everything seems to be in working order */
+ c->frames = 0;
+ c->has_audio = 0;
+ c->has_video = 0;
+ c->start_time = (time_t)s->timestamp;
+
+ for (i=0; i<c->n_ast; i++) {
+ if (c->ast[i] && av_fifo_init(&c->audio_data[i], 100*AVCODEC_MAX_AUDIO_FRAME_SIZE) < 0) {
+ while (i>0) {
+ i--;
+ av_fifo_free(&c->audio_data[i]);
+ }
+ goto bail_out;
+ }
+ }
+
+ return c;
+
+bail_out:
+ return NULL;
+}
+
+void dv_delete_mux(DVMuxContext *c)
+{
+ int i;
+ for (i=0; i < c->n_ast; i++)
+ av_fifo_free(&c->audio_data[i]);
+}
+
+#ifdef CONFIG_MUXERS
+static int dv_write_header(AVFormatContext *s)
+{
+ if (!dv_init_mux(s)) {
+ av_log(s, AV_LOG_ERROR, "Can't initialize DV format!\n"
+ "Make sure that you supply exactly two streams:\n"
+ " video: 25fps or 29.97fps, audio: 2ch/48Khz/PCM\n"
+ " (50Mbps allows an optional second audio stream)\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int dv_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ uint8_t* frame;
+ int fsize;
+
+ fsize = dv_assemble_frame((DVMuxContext *)s->priv_data, s->streams[pkt->stream_index],
+ pkt->data, pkt->size, &frame);
+ if (fsize > 0) {
+ put_buffer(&s->pb, frame, fsize);
+ put_flush_packet(&s->pb);
+ }
+ return 0;
+}
+
+/*
+ * We might end up with some extra A/V data without matching counterpart.
+ * E.g. video data without enough audio to write the complete frame.
+ * Currently we simply drop the last frame. I don't know whether this
+ * is the best strategy of all
+ */
+static int dv_write_trailer(struct AVFormatContext *s)
+{
+ dv_delete_mux((DVMuxContext *)s->priv_data);
+ return 0;
+}
+#endif /* CONFIG_MUXERS */
+
+#ifdef CONFIG_DV_MUXER
+AVOutputFormat dv_muxer = {
+ "dv",
+ "DV video format",
+ NULL,
+ "dv",
+ sizeof(DVMuxContext),
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_DVVIDEO,
+ dv_write_header,
+ dv_write_packet,
+ dv_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/electronicarts.c b/contrib/ffmpeg/libavformat/electronicarts.c
new file mode 100644
index 000000000..943f75b42
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/electronicarts.c
@@ -0,0 +1,291 @@
+/* Electronic Arts Multimedia File Demuxer
+ * Copyright (c) 2004 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file electronicarts.c
+ * Electronic Arts Multimedia file demuxer (WVE/UV2/etc.)
+ * by Robin Kay (komadori at gekkou.co.uk)
+ */
+
+#include "avformat.h"
+
+#define SCHl_TAG MKTAG('S', 'C', 'H', 'l')
+#define PT00_TAG MKTAG('P', 'T', 0x0, 0x0)
+#define SCDl_TAG MKTAG('S', 'C', 'D', 'l')
+#define pIQT_TAG MKTAG('p', 'I', 'Q', 'T')
+#define SCEl_TAG MKTAG('S', 'C', 'E', 'l')
+#define _TAG MKTAG('', '', '', '')
+
+#define EA_SAMPLE_RATE 22050
+#define EA_BITS_PER_SAMPLE 16
+#define EA_PREAMBLE_SIZE 8
+
+typedef struct EaDemuxContext {
+ int width;
+ int height;
+ int video_stream_index;
+ int track_count;
+
+ int audio_stream_index;
+ int audio_frame_counter;
+
+ int64_t audio_pts;
+ int64_t video_pts;
+ int video_pts_inc;
+ float fps;
+
+ int num_channels;
+ int num_samples;
+ int compression_type;
+} EaDemuxContext;
+
+static uint32_t read_arbitary(ByteIOContext *pb) {
+ uint8_t size, byte;
+ int i;
+ uint32_t word;
+
+ size = get_byte(pb);
+
+ word = 0;
+ for (i = 0; i < size; i++) {
+ byte = get_byte(pb);
+ word <<= 8;
+ word |= byte;
+ }
+
+ return word;
+}
+
+/*
+ * Process WVE file header
+ * Returns 1 if the WVE file is valid and successfully opened, 0 otherwise
+ */
+static int process_ea_header(AVFormatContext *s) {
+ int inHeader;
+ uint32_t blockid, size;
+ EaDemuxContext *ea = (EaDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+
+ if (get_buffer(pb, (void*)&blockid, 4) != 4) {
+ return 0;
+ }
+ if (le2me_32(blockid) != SCHl_TAG) {
+ return 0;
+ }
+
+ if (get_buffer(pb, (void*)&size, 4) != 4) {
+ return 0;
+ }
+ size = le2me_32(size);
+
+ if (get_buffer(pb, (void*)&blockid, 4) != 4) {
+ return 0;
+ }
+ if (le2me_32(blockid) != PT00_TAG) {
+ av_log (s, AV_LOG_ERROR, "PT header missing\n");
+ return 0;
+ }
+
+ inHeader = 1;
+ while (inHeader) {
+ int inSubheader;
+ uint8_t byte;
+ byte = get_byte(pb) & 0xFF;
+
+ switch (byte) {
+ case 0xFD:
+ av_log (s, AV_LOG_INFO, "entered audio subheader\n");
+ inSubheader = 1;
+ while (inSubheader) {
+ uint8_t subbyte;
+ subbyte = get_byte(pb) & 0xFF;
+
+ switch (subbyte) {
+ case 0x82:
+ ea->num_channels = read_arbitary(pb);
+ av_log (s, AV_LOG_INFO, "num_channels (element 0x82) set to 0x%08x\n", ea->num_channels);
+ break;
+ case 0x83:
+ ea->compression_type = read_arbitary(pb);
+ av_log (s, AV_LOG_INFO, "compression_type (element 0x83) set to 0x%08x\n", ea->compression_type);
+ break;
+ case 0x85:
+ ea->num_samples = read_arbitary(pb);
+ av_log (s, AV_LOG_INFO, "num_samples (element 0x85) set to 0x%08x\n", ea->num_samples);
+ break;
+ case 0x8A:
+ av_log (s, AV_LOG_INFO, "element 0x%02x set to 0x%08x\n", subbyte, read_arbitary(pb));
+ av_log (s, AV_LOG_INFO, "exited audio subheader\n");
+ inSubheader = 0;
+ break;
+ default:
+ av_log (s, AV_LOG_INFO, "element 0x%02x set to 0x%08x\n", subbyte, read_arbitary(pb));
+ break;
+ }
+ }
+ break;
+ case 0xFF:
+ av_log (s, AV_LOG_INFO, "end of header block reached\n");
+ inHeader = 0;
+ break;
+ default:
+ av_log (s, AV_LOG_INFO, "header element 0x%02x set to 0x%08x\n", byte, read_arbitary(pb));
+ break;
+ }
+ }
+
+ if ((ea->num_channels != 2) || (ea->compression_type != 7)) {
+ av_log (s, AV_LOG_ERROR, "unsupported stream type\n");
+ return 0;
+ }
+
+ /* skip to the start of the data */
+ url_fseek(pb, size, SEEK_SET);
+
+ return 1;
+}
+
+
+static int ea_probe(AVProbeData *p)
+{
+ if (p->buf_size < 4)
+ return 0;
+
+ if (LE_32(&p->buf[0]) != SCHl_TAG)
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int ea_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ EaDemuxContext *ea = (EaDemuxContext *)s->priv_data;
+ AVStream *st;
+
+ if (!process_ea_header(s))
+ return AVERROR_IO;
+
+#if 0
+ /* initialize the video decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ ea->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_EA_MJPEG;
+ st->codec->codec_tag = 0; /* no fourcc */
+#endif
+
+ /* initialize the audio decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, EA_SAMPLE_RATE);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_ADPCM_EA;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = ea->num_channels;
+ st->codec->sample_rate = EA_SAMPLE_RATE;
+ st->codec->bits_per_sample = EA_BITS_PER_SAMPLE;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample / 4;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ ea->audio_stream_index = st->index;
+ ea->audio_frame_counter = 0;
+
+ return 1;
+}
+
+static int ea_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ EaDemuxContext *ea = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret = 0;
+ int packet_read = 0;
+ unsigned char preamble[EA_PREAMBLE_SIZE];
+ unsigned int chunk_type, chunk_size;
+
+ while (!packet_read) {
+
+ if (get_buffer(pb, preamble, EA_PREAMBLE_SIZE) != EA_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ chunk_type = LE_32(&preamble[0]);
+ chunk_size = LE_32(&preamble[4]) - EA_PREAMBLE_SIZE;
+
+ switch (chunk_type) {
+ /* audio data */
+ case SCDl_TAG:
+ ret = av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ ret = AVERROR_IO;
+ else {
+ pkt->stream_index = ea->audio_stream_index;
+ pkt->pts = 90000;
+ pkt->pts *= ea->audio_frame_counter;
+ pkt->pts /= EA_SAMPLE_RATE;
+
+ /* 2 samples/byte, 1 or 2 samples per frame depending
+ * on stereo; chunk also has 12-byte header */
+ ea->audio_frame_counter += ((chunk_size - 12) * 2) /
+ ea->num_channels;
+ }
+
+ packet_read = 1;
+ break;
+
+ /* ending tag */
+ case SCEl_TAG:
+ ret = AVERROR_IO;
+ packet_read = 1;
+ break;
+
+ default:
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ break;
+ }
+
+ /* ending packet */
+ if (chunk_type == SCEl_TAG) {
+ }
+ }
+
+ return ret;
+}
+
+static int ea_read_close(AVFormatContext *s)
+{
+// EaDemuxContext *ea = (EaDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+AVInputFormat ea_demuxer = {
+ "ea",
+ "Electronic Arts Multimedia Format",
+ sizeof(EaDemuxContext),
+ ea_probe,
+ ea_read_header,
+ ea_read_packet,
+ ea_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/ffm.c b/contrib/ffmpeg/libavformat/ffm.c
new file mode 100644
index 000000000..539b45d5f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ffm.c
@@ -0,0 +1,792 @@
+/*
+ * FFM (ffserver live feed) muxer and demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+
+/* The FFM file is made of blocks of fixed size */
+#define FFM_HEADER_SIZE 14
+#define PACKET_ID 0x666d
+
+/* each packet contains frames (which can span several packets */
+#define FRAME_HEADER_SIZE 8
+#define FLAG_KEY_FRAME 0x01
+
+typedef struct FFMStream {
+ int64_t pts;
+} FFMStream;
+
+enum {
+ READ_HEADER,
+ READ_DATA,
+};
+
+typedef struct FFMContext {
+ /* only reading mode */
+ offset_t write_index, file_size;
+ int read_state;
+ uint8_t header[FRAME_HEADER_SIZE];
+
+ /* read and write */
+ int first_packet; /* true if first packet, needed to set the discontinuity tag */
+ int first_frame_in_packet; /* true if first frame in packet, needed to know if PTS information is valid */
+ int packet_size;
+ int frame_offset;
+ int64_t pts;
+ uint8_t *packet_ptr, *packet_end;
+ uint8_t packet[FFM_PACKET_SIZE];
+} FFMContext;
+
+static int64_t get_pts(AVFormatContext *s, offset_t pos);
+
+/* disable pts hack for testing */
+int ffm_nopts = 0;
+
+#ifdef CONFIG_MUXERS
+static void flush_packet(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ int fill_size, h;
+ ByteIOContext *pb = &s->pb;
+
+ fill_size = ffm->packet_end - ffm->packet_ptr;
+ memset(ffm->packet_ptr, 0, fill_size);
+
+ if (url_ftell(pb) % ffm->packet_size)
+ av_abort();
+
+ /* put header */
+ put_be16(pb, PACKET_ID);
+ put_be16(pb, fill_size);
+ put_be64(pb, ffm->pts);
+ h = ffm->frame_offset;
+ if (ffm->first_packet)
+ h |= 0x8000;
+ put_be16(pb, h);
+ put_buffer(pb, ffm->packet, ffm->packet_end - ffm->packet);
+ put_flush_packet(pb);
+
+ /* prepare next packet */
+ ffm->frame_offset = 0; /* no key frame */
+ ffm->pts = 0; /* no pts */
+ ffm->packet_ptr = ffm->packet;
+ ffm->first_packet = 0;
+}
+
+/* 'first' is true if first data of a frame */
+static void ffm_write_data(AVFormatContext *s,
+ const uint8_t *buf, int size,
+ int64_t pts, int first)
+{
+ FFMContext *ffm = s->priv_data;
+ int len;
+
+ if (first && ffm->frame_offset == 0)
+ ffm->frame_offset = ffm->packet_ptr - ffm->packet + FFM_HEADER_SIZE;
+ if (first && ffm->pts == 0)
+ ffm->pts = pts;
+
+ /* write as many packets as needed */
+ while (size > 0) {
+ len = ffm->packet_end - ffm->packet_ptr;
+ if (len > size)
+ len = size;
+ memcpy(ffm->packet_ptr, buf, len);
+
+ ffm->packet_ptr += len;
+ buf += len;
+ size -= len;
+ if (ffm->packet_ptr >= ffm->packet_end) {
+ /* special case : no pts in packet : we leave the current one */
+ if (ffm->pts == 0)
+ ffm->pts = pts;
+
+ flush_packet(s);
+ }
+ }
+}
+
+static int ffm_write_header(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ AVStream *st;
+ FFMStream *fst;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *codec;
+ int bit_rate, i;
+
+ ffm->packet_size = FFM_PACKET_SIZE;
+
+ /* header */
+ put_le32(pb, MKTAG('F', 'F', 'M', '1'));
+ put_be32(pb, ffm->packet_size);
+ /* XXX: store write position in other file ? */
+ put_be64(pb, ffm->packet_size); /* current write position */
+
+ put_be32(pb, s->nb_streams);
+ bit_rate = 0;
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ bit_rate += st->codec->bit_rate;
+ }
+ put_be32(pb, bit_rate);
+
+ /* list of streams */
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ fst = av_mallocz(sizeof(FFMStream));
+ if (!fst)
+ goto fail;
+ av_set_pts_info(st, 64, 1, 1000000);
+ st->priv_data = fst;
+
+ codec = st->codec;
+ /* generic info */
+ put_be32(pb, codec->codec_id);
+ put_byte(pb, codec->codec_type);
+ put_be32(pb, codec->bit_rate);
+ put_be32(pb, st->quality);
+ put_be32(pb, codec->flags);
+ put_be32(pb, codec->flags2);
+ put_be32(pb, codec->debug);
+ /* specific info */
+ switch(codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ put_be32(pb, codec->time_base.num);
+ put_be32(pb, codec->time_base.den);
+ put_be16(pb, codec->width);
+ put_be16(pb, codec->height);
+ put_be16(pb, codec->gop_size);
+ put_be32(pb, codec->pix_fmt);
+ put_byte(pb, codec->qmin);
+ put_byte(pb, codec->qmax);
+ put_byte(pb, codec->max_qdiff);
+ put_be16(pb, (int) (codec->qcompress * 10000.0));
+ put_be16(pb, (int) (codec->qblur * 10000.0));
+ put_be32(pb, codec->bit_rate_tolerance);
+ put_strz(pb, codec->rc_eq);
+ put_be32(pb, codec->rc_max_rate);
+ put_be32(pb, codec->rc_min_rate);
+ put_be32(pb, codec->rc_buffer_size);
+ put_be64(pb, av_dbl2int(codec->i_quant_factor));
+ put_be64(pb, av_dbl2int(codec->b_quant_factor));
+ put_be64(pb, av_dbl2int(codec->i_quant_offset));
+ put_be64(pb, av_dbl2int(codec->b_quant_offset));
+ put_be32(pb, codec->dct_algo);
+ put_be32(pb, codec->strict_std_compliance);
+ put_be32(pb, codec->max_b_frames);
+ put_be32(pb, codec->luma_elim_threshold);
+ put_be32(pb, codec->chroma_elim_threshold);
+ put_be32(pb, codec->mpeg_quant);
+ put_be32(pb, codec->intra_dc_precision);
+ put_be32(pb, codec->me_method);
+ put_be32(pb, codec->mb_decision);
+ put_be32(pb, codec->nsse_weight);
+ put_be32(pb, codec->frame_skip_cmp);
+ put_be64(pb, av_dbl2int(codec->rc_buffer_aggressivity));
+ break;
+ case CODEC_TYPE_AUDIO:
+ put_be32(pb, codec->sample_rate);
+ put_le16(pb, codec->channels);
+ put_le16(pb, codec->frame_size);
+ break;
+ default:
+ return -1;
+ }
+ /* hack to have real time */
+ if (ffm_nopts)
+ fst->pts = 0;
+ else
+ fst->pts = av_gettime();
+ }
+
+ /* flush until end of block reached */
+ while ((url_ftell(pb) % ffm->packet_size) != 0)
+ put_byte(pb, 0);
+
+ put_flush_packet(pb);
+
+ /* init packet mux */
+ ffm->packet_ptr = ffm->packet;
+ ffm->packet_end = ffm->packet + ffm->packet_size - FFM_HEADER_SIZE;
+ assert(ffm->packet_end >= ffm->packet);
+ ffm->frame_offset = 0;
+ ffm->pts = 0;
+ ffm->first_packet = 1;
+
+ return 0;
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ av_freep(&st->priv_data);
+ }
+ return -1;
+}
+
+static int ffm_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVStream *st = s->streams[pkt->stream_index];
+ FFMStream *fst = st->priv_data;
+ int64_t pts;
+ uint8_t header[FRAME_HEADER_SIZE];
+ int duration;
+ int size= pkt->size;
+
+ //XXX/FIXME use duration from pkt
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ duration = ((float)st->codec->frame_size / st->codec->sample_rate * 1000000.0);
+ } else {
+ duration = (1000000.0 * st->codec->time_base.num / (float)st->codec->time_base.den);
+ }
+
+ pts = fst->pts;
+ /* packet size & key_frame */
+ header[0] = pkt->stream_index;
+ header[1] = 0;
+ if (pkt->flags & PKT_FLAG_KEY)
+ header[1] |= FLAG_KEY_FRAME;
+ header[2] = (size >> 16) & 0xff;
+ header[3] = (size >> 8) & 0xff;
+ header[4] = size & 0xff;
+ header[5] = (duration >> 16) & 0xff;
+ header[6] = (duration >> 8) & 0xff;
+ header[7] = duration & 0xff;
+ ffm_write_data(s, header, FRAME_HEADER_SIZE, pts, 1);
+ ffm_write_data(s, pkt->data, size, pts, 0);
+
+ fst->pts += duration;
+ return 0;
+}
+
+static int ffm_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ FFMContext *ffm = s->priv_data;
+
+ /* flush packets */
+ if (ffm->packet_ptr > ffm->packet)
+ flush_packet(s);
+
+ put_flush_packet(pb);
+
+ if (!url_is_streamed(pb)) {
+ int64_t size;
+ /* update the write offset */
+ size = url_ftell(pb);
+ url_fseek(pb, 8, SEEK_SET);
+ put_be64(pb, size);
+ put_flush_packet(pb);
+ }
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/* ffm demux */
+
+static int ffm_is_avail_data(AVFormatContext *s, int size)
+{
+ FFMContext *ffm = s->priv_data;
+ offset_t pos, avail_size;
+ int len;
+
+ len = ffm->packet_end - ffm->packet_ptr;
+ if (!ffm_nopts) {
+ /* XXX: I don't understand this test, so I disabled it for testing */
+ if (size <= len)
+ return 1;
+ }
+ pos = url_ftell(&s->pb);
+ if (pos == ffm->write_index) {
+ /* exactly at the end of stream */
+ return 0;
+ } else if (pos < ffm->write_index) {
+ avail_size = ffm->write_index - pos;
+ } else {
+ avail_size = (ffm->file_size - pos) + (ffm->write_index - FFM_PACKET_SIZE);
+ }
+ avail_size = (avail_size / ffm->packet_size) * (ffm->packet_size - FFM_HEADER_SIZE) + len;
+ if (size <= avail_size)
+ return 1;
+ else
+ return 0;
+}
+
+/* first is true if we read the frame header */
+static int ffm_read_data(AVFormatContext *s,
+ uint8_t *buf, int size, int first)
+{
+ FFMContext *ffm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int len, fill_size, size1, frame_offset;
+
+ size1 = size;
+ while (size > 0) {
+ redo:
+ len = ffm->packet_end - ffm->packet_ptr;
+ if (len > size)
+ len = size;
+ if (len == 0) {
+ if (url_ftell(pb) == ffm->file_size)
+ url_fseek(pb, ffm->packet_size, SEEK_SET);
+ retry_read:
+ get_be16(pb); /* PACKET_ID */
+ fill_size = get_be16(pb);
+ ffm->pts = get_be64(pb);
+ ffm->first_frame_in_packet = 1;
+ frame_offset = get_be16(pb);
+ get_buffer(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE);
+ ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size);
+ if (ffm->packet_end < ffm->packet)
+ return -1;
+ /* if first packet or resynchronization packet, we must
+ handle it specifically */
+ if (ffm->first_packet || (frame_offset & 0x8000)) {
+ if (!frame_offset) {
+ /* This packet has no frame headers in it */
+ if (url_ftell(pb) >= ffm->packet_size * 3) {
+ url_fseek(pb, -ffm->packet_size * 2, SEEK_CUR);
+ goto retry_read;
+ }
+ /* This is bad, we cannot find a valid frame header */
+ return 0;
+ }
+ ffm->first_packet = 0;
+ if ((frame_offset & 0x7ffff) < FFM_HEADER_SIZE)
+ return -1;
+ ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE;
+ if (!first)
+ break;
+ } else {
+ ffm->packet_ptr = ffm->packet;
+ }
+ goto redo;
+ }
+ memcpy(buf, ffm->packet_ptr, len);
+ buf += len;
+ ffm->packet_ptr += len;
+ size -= len;
+ first = 0;
+ }
+ return size1 - size;
+}
+
+
+static void adjust_write_index(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int64_t pts;
+ //offset_t orig_write_index = ffm->write_index;
+ offset_t pos_min, pos_max;
+ int64_t pts_start;
+ offset_t ptr = url_ftell(pb);
+
+
+ pos_min = 0;
+ pos_max = ffm->file_size - 2 * FFM_PACKET_SIZE;
+
+ pts_start = get_pts(s, pos_min);
+
+ pts = get_pts(s, pos_max);
+
+ if (pts - 100000 > pts_start)
+ goto end;
+
+ ffm->write_index = FFM_PACKET_SIZE;
+
+ pts_start = get_pts(s, pos_min);
+
+ pts = get_pts(s, pos_max);
+
+ if (pts - 100000 <= pts_start) {
+ while (1) {
+ offset_t newpos;
+ int64_t newpts;
+
+ newpos = ((pos_max + pos_min) / (2 * FFM_PACKET_SIZE)) * FFM_PACKET_SIZE;
+
+ if (newpos == pos_min)
+ break;
+
+ newpts = get_pts(s, newpos);
+
+ if (newpts - 100000 <= pts) {
+ pos_max = newpos;
+ pts = newpts;
+ } else {
+ pos_min = newpos;
+ }
+ }
+ ffm->write_index += pos_max;
+ }
+
+ //printf("Adjusted write index from %"PRId64" to %"PRId64": pts=%0.6f\n", orig_write_index, ffm->write_index, pts / 1000000.);
+ //printf("pts range %0.6f - %0.6f\n", get_pts(s, 0) / 1000000. , get_pts(s, ffm->file_size - 2 * FFM_PACKET_SIZE) / 1000000. );
+
+ end:
+ url_fseek(pb, ptr, SEEK_SET);
+}
+
+
+static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ FFMContext *ffm = s->priv_data;
+ AVStream *st;
+ FFMStream *fst;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *codec;
+ int i, nb_streams;
+ uint32_t tag;
+
+ /* header */
+ tag = get_le32(pb);
+ if (tag != MKTAG('F', 'F', 'M', '1'))
+ goto fail;
+ ffm->packet_size = get_be32(pb);
+ if (ffm->packet_size != FFM_PACKET_SIZE)
+ goto fail;
+ ffm->write_index = get_be64(pb);
+ /* get also filesize */
+ if (!url_is_streamed(pb)) {
+ ffm->file_size = url_fsize(pb);
+ adjust_write_index(s);
+ } else {
+ ffm->file_size = (uint64_t_C(1) << 63) - 1;
+ }
+
+ nb_streams = get_be32(pb);
+ get_be32(pb); /* total bitrate */
+ /* read each stream */
+ for(i=0;i<nb_streams;i++) {
+ char rc_eq_buf[128];
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ goto fail;
+ fst = av_mallocz(sizeof(FFMStream));
+ if (!fst)
+ goto fail;
+ s->streams[i] = st;
+
+ av_set_pts_info(st, 64, 1, 1000000);
+
+ st->priv_data = fst;
+
+ codec = st->codec;
+ /* generic info */
+ codec->codec_id = get_be32(pb);
+ codec->codec_type = get_byte(pb); /* codec_type */
+ codec->bit_rate = get_be32(pb);
+ st->quality = get_be32(pb);
+ codec->flags = get_be32(pb);
+ codec->flags2 = get_be32(pb);
+ codec->debug = get_be32(pb);
+ /* specific info */
+ switch(codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ codec->time_base.num = get_be32(pb);
+ codec->time_base.den = get_be32(pb);
+ codec->width = get_be16(pb);
+ codec->height = get_be16(pb);
+ codec->gop_size = get_be16(pb);
+ codec->pix_fmt = get_be32(pb);
+ codec->qmin = get_byte(pb);
+ codec->qmax = get_byte(pb);
+ codec->max_qdiff = get_byte(pb);
+ codec->qcompress = get_be16(pb) / 10000.0;
+ codec->qblur = get_be16(pb) / 10000.0;
+ codec->bit_rate_tolerance = get_be32(pb);
+ codec->rc_eq = av_strdup(get_strz(pb, rc_eq_buf, sizeof(rc_eq_buf)));
+ codec->rc_max_rate = get_be32(pb);
+ codec->rc_min_rate = get_be32(pb);
+ codec->rc_buffer_size = get_be32(pb);
+ codec->i_quant_factor = av_int2dbl(get_be64(pb));
+ codec->b_quant_factor = av_int2dbl(get_be64(pb));
+ codec->i_quant_offset = av_int2dbl(get_be64(pb));
+ codec->b_quant_offset = av_int2dbl(get_be64(pb));
+ codec->dct_algo = get_be32(pb);
+ codec->strict_std_compliance = get_be32(pb);
+ codec->max_b_frames = get_be32(pb);
+ codec->luma_elim_threshold = get_be32(pb);
+ codec->chroma_elim_threshold = get_be32(pb);
+ codec->mpeg_quant = get_be32(pb);
+ codec->intra_dc_precision = get_be32(pb);
+ codec->me_method = get_be32(pb);
+ codec->mb_decision = get_be32(pb);
+ codec->nsse_weight = get_be32(pb);
+ codec->frame_skip_cmp = get_be32(pb);
+ codec->rc_buffer_aggressivity = av_int2dbl(get_be64(pb));
+ break;
+ case CODEC_TYPE_AUDIO:
+ codec->sample_rate = get_be32(pb);
+ codec->channels = get_le16(pb);
+ codec->frame_size = get_le16(pb);
+ break;
+ default:
+ goto fail;
+ }
+
+ }
+
+ /* get until end of block reached */
+ while ((url_ftell(pb) % ffm->packet_size) != 0)
+ get_byte(pb);
+
+ /* init packet demux */
+ ffm->packet_ptr = ffm->packet;
+ ffm->packet_end = ffm->packet;
+ ffm->frame_offset = 0;
+ ffm->pts = 0;
+ ffm->read_state = READ_HEADER;
+ ffm->first_packet = 1;
+ return 0;
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ if (st) {
+ av_freep(&st->priv_data);
+ av_free(st);
+ }
+ }
+ return -1;
+}
+
+/* return < 0 if eof */
+static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int size;
+ FFMContext *ffm = s->priv_data;
+ int duration;
+
+ switch(ffm->read_state) {
+ case READ_HEADER:
+ if (!ffm_is_avail_data(s, FRAME_HEADER_SIZE)) {
+ return -EAGAIN;
+ }
+#if 0
+ printf("pos=%08"PRIx64" spos=%"PRIx64", write_index=%"PRIx64" size=%"PRIx64"\n",
+ url_ftell(&s->pb), s->pb.pos, ffm->write_index, ffm->file_size);
+#endif
+ if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) !=
+ FRAME_HEADER_SIZE)
+ return -EAGAIN;
+#if 0
+ {
+ int i;
+ for(i=0;i<FRAME_HEADER_SIZE;i++)
+ printf("%02x ", ffm->header[i]);
+ printf("\n");
+ }
+#endif
+ ffm->read_state = READ_DATA;
+ /* fall thru */
+ case READ_DATA:
+ size = (ffm->header[2] << 16) | (ffm->header[3] << 8) | ffm->header[4];
+ if (!ffm_is_avail_data(s, size)) {
+ return -EAGAIN;
+ }
+
+ duration = (ffm->header[5] << 16) | (ffm->header[6] << 8) | ffm->header[7];
+
+ av_new_packet(pkt, size);
+ pkt->stream_index = ffm->header[0];
+ pkt->pos = url_ftell(&s->pb);
+ if (ffm->header[1] & FLAG_KEY_FRAME)
+ pkt->flags |= PKT_FLAG_KEY;
+
+ ffm->read_state = READ_HEADER;
+ if (ffm_read_data(s, pkt->data, size, 0) != size) {
+ /* bad case: desynchronized packet. we cancel all the packet loading */
+ av_free_packet(pkt);
+ return -EAGAIN;
+ }
+ if (ffm->first_frame_in_packet)
+ {
+ pkt->pts = ffm->pts;
+ ffm->first_frame_in_packet = 0;
+ }
+ pkt->duration = duration;
+ break;
+ }
+ return 0;
+}
+
+//#define DEBUG_SEEK
+
+/* pos is between 0 and file_size - FFM_PACKET_SIZE. It is translated
+ by the write position inside this function */
+static void ffm_seek1(AVFormatContext *s, offset_t pos1)
+{
+ FFMContext *ffm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ offset_t pos;
+
+ pos = pos1 + ffm->write_index;
+ if (pos >= ffm->file_size)
+ pos -= (ffm->file_size - FFM_PACKET_SIZE);
+#ifdef DEBUG_SEEK
+ printf("seek to %"PRIx64" -> %"PRIx64"\n", pos1, pos);
+#endif
+ url_fseek(pb, pos, SEEK_SET);
+}
+
+static int64_t get_pts(AVFormatContext *s, offset_t pos)
+{
+ ByteIOContext *pb = &s->pb;
+ int64_t pts;
+
+ ffm_seek1(s, pos);
+ url_fskip(pb, 4);
+ pts = get_be64(pb);
+#ifdef DEBUG_SEEK
+ printf("pts=%0.6f\n", pts / 1000000.0);
+#endif
+ return pts;
+}
+
+/* seek to a given time in the file. The file read pointer is
+ positionned at or before pts. XXX: the following code is quite
+ approximative */
+static int ffm_seek(AVFormatContext *s, int stream_index, int64_t wanted_pts, int flags)
+{
+ FFMContext *ffm = s->priv_data;
+ offset_t pos_min, pos_max, pos;
+ int64_t pts_min, pts_max, pts;
+ double pos1;
+
+#ifdef DEBUG_SEEK
+ printf("wanted_pts=%0.6f\n", wanted_pts / 1000000.0);
+#endif
+ /* find the position using linear interpolation (better than
+ dichotomy in typical cases) */
+ pos_min = 0;
+ pos_max = ffm->file_size - 2 * FFM_PACKET_SIZE;
+ while (pos_min <= pos_max) {
+ pts_min = get_pts(s, pos_min);
+ pts_max = get_pts(s, pos_max);
+ /* linear interpolation */
+ pos1 = (double)(pos_max - pos_min) * (double)(wanted_pts - pts_min) /
+ (double)(pts_max - pts_min);
+ pos = (((int64_t)pos1) / FFM_PACKET_SIZE) * FFM_PACKET_SIZE;
+ if (pos <= pos_min)
+ pos = pos_min;
+ else if (pos >= pos_max)
+ pos = pos_max;
+ pts = get_pts(s, pos);
+ /* check if we are lucky */
+ if (pts == wanted_pts) {
+ goto found;
+ } else if (pts > wanted_pts) {
+ pos_max = pos - FFM_PACKET_SIZE;
+ } else {
+ pos_min = pos + FFM_PACKET_SIZE;
+ }
+ }
+ pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
+ if (pos > 0)
+ pos -= FFM_PACKET_SIZE;
+ found:
+ ffm_seek1(s, pos);
+ return 0;
+}
+
+#ifdef CONFIG_FFSERVER
+offset_t ffm_read_write_index(int fd)
+{
+ uint8_t buf[8];
+ offset_t pos;
+ int i;
+
+ lseek(fd, 8, SEEK_SET);
+ read(fd, buf, 8);
+ pos = 0;
+ for(i=0;i<8;i++)
+ pos |= (int64_t)buf[i] << (56 - i * 8);
+ return pos;
+}
+
+void ffm_write_write_index(int fd, offset_t pos)
+{
+ uint8_t buf[8];
+ int i;
+
+ for(i=0;i<8;i++)
+ buf[i] = (pos >> (56 - i * 8)) & 0xff;
+ lseek(fd, 8, SEEK_SET);
+ write(fd, buf, 8);
+}
+
+void ffm_set_write_index(AVFormatContext *s, offset_t pos, offset_t file_size)
+{
+ FFMContext *ffm = s->priv_data;
+ ffm->write_index = pos;
+ ffm->file_size = file_size;
+}
+#endif // CONFIG_FFSERVER
+
+static int ffm_read_close(AVFormatContext *s)
+{
+ AVStream *st;
+ int i;
+
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ av_freep(&st->priv_data);
+ }
+ return 0;
+}
+
+static int ffm_probe(AVProbeData *p)
+{
+ if (p->buf_size >= 4 &&
+ p->buf[0] == 'F' && p->buf[1] == 'F' && p->buf[2] == 'M' &&
+ p->buf[3] == '1')
+ return AVPROBE_SCORE_MAX + 1;
+ return 0;
+}
+
+#ifdef CONFIG_FFM_DEMUXER
+AVInputFormat ffm_demuxer = {
+ "ffm",
+ "ffm format",
+ sizeof(FFMContext),
+ ffm_probe,
+ ffm_read_header,
+ ffm_read_packet,
+ ffm_read_close,
+ ffm_seek,
+};
+#endif
+#ifdef CONFIG_FFM_MUXER
+AVOutputFormat ffm_muxer = {
+ "ffm",
+ "ffm format",
+ "",
+ "ffm",
+ sizeof(FFMContext),
+ /* not really used */
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG1VIDEO,
+ ffm_write_header,
+ ffm_write_packet,
+ ffm_write_trailer,
+};
+#endif //CONFIG_FFM_MUXER
diff --git a/contrib/ffmpeg/libavformat/file.c b/contrib/ffmpeg/libavformat/file.c
new file mode 100644
index 000000000..db671698f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/file.c
@@ -0,0 +1,140 @@
+/*
+ * Buffered file io for ffmpeg system
+ * Copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <fcntl.h>
+#ifndef __MINGW32__
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#else
+#include <io.h>
+#define open(fname,oflag,pmode) _open(fname,oflag,pmode)
+#endif /* __MINGW32__ */
+
+
+/* standard file protocol */
+
+static int file_open(URLContext *h, const char *filename, int flags)
+{
+ int access;
+ int fd;
+
+ strstart(filename, "file:", &filename);
+
+ if (flags & URL_RDWR) {
+ access = O_CREAT | O_TRUNC | O_RDWR;
+ } else if (flags & URL_WRONLY) {
+ access = O_CREAT | O_TRUNC | O_WRONLY;
+ } else {
+ access = O_RDONLY;
+ }
+#if defined(__MINGW32__) || defined(CONFIG_OS2) || defined(__CYGWIN__)
+ access |= O_BINARY;
+#endif
+ fd = open(filename, access, 0666);
+ if (fd < 0)
+ return -ENOENT;
+ h->priv_data = (void *)(size_t)fd;
+ return 0;
+}
+
+static int file_read(URLContext *h, unsigned char *buf, int size)
+{
+ int fd = (size_t)h->priv_data;
+ return read(fd, buf, size);
+}
+
+static int file_write(URLContext *h, unsigned char *buf, int size)
+{
+ int fd = (size_t)h->priv_data;
+ return write(fd, buf, size);
+}
+
+/* XXX: use llseek */
+static offset_t file_seek(URLContext *h, offset_t pos, int whence)
+{
+ int fd = (size_t)h->priv_data;
+#if defined(__MINGW32__)
+ return _lseeki64(fd, pos, whence);
+#else
+ return lseek(fd, pos, whence);
+#endif
+}
+
+static int file_close(URLContext *h)
+{
+ int fd = (size_t)h->priv_data;
+ return close(fd);
+}
+
+URLProtocol file_protocol = {
+ "file",
+ file_open,
+ file_read,
+ file_write,
+ file_seek,
+ file_close,
+};
+
+/* pipe protocol */
+
+static int pipe_open(URLContext *h, const char *filename, int flags)
+{
+ int fd;
+
+ if (flags & URL_WRONLY) {
+ fd = 1;
+ } else {
+ fd = 0;
+ }
+#if defined(__MINGW32__) || defined(CONFIG_OS2) || defined(__CYGWIN__)
+ setmode(fd, O_BINARY);
+#endif
+ h->priv_data = (void *)(size_t)fd;
+ h->is_streamed = 1;
+ return 0;
+}
+
+static int pipe_read(URLContext *h, unsigned char *buf, int size)
+{
+ int fd = (size_t)h->priv_data;
+ return read(fd, buf, size);
+}
+
+static int pipe_write(URLContext *h, unsigned char *buf, int size)
+{
+ int fd = (size_t)h->priv_data;
+ return write(fd, buf, size);
+}
+
+static int pipe_close(URLContext *h)
+{
+ return 0;
+}
+
+URLProtocol pipe_protocol = {
+ "pipe",
+ pipe_open,
+ pipe_read,
+ pipe_write,
+ NULL,
+ pipe_close,
+};
diff --git a/contrib/ffmpeg/libavformat/flic.c b/contrib/ffmpeg/libavformat/flic.c
new file mode 100644
index 000000000..ac32e7392
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/flic.c
@@ -0,0 +1,221 @@
+/*
+ * FLI/FLC Animation File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file flic.c
+ * FLI/FLC file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the .fli/.flc file format and all of its many
+ * variations, visit:
+ * http://www.compuphase.com/flic.htm
+ *
+ * This demuxer handles standard 0xAF11- and 0xAF12-type FLIs. It also
+ * handles special FLIs from the PC game "Magic Carpet".
+ */
+
+#include "avformat.h"
+
+#define FLIC_FILE_MAGIC_1 0xAF11
+#define FLIC_FILE_MAGIC_2 0xAF12
+#define FLIC_FILE_MAGIC_3 0xAF44 /* Flic Type for Extended FLX Format which
+ originated in Dave's Targa Animator (DTA) */
+#define FLIC_CHUNK_MAGIC_1 0xF1FA
+#define FLIC_CHUNK_MAGIC_2 0xF5FA
+#define FLIC_MC_PTS_INC 6000 /* pts increment for Magic Carpet game FLIs */
+#define FLIC_DEFAULT_PTS_INC 6000 /* for FLIs that have 0 speed */
+
+#define FLIC_HEADER_SIZE 128
+#define FLIC_PREAMBLE_SIZE 6
+
+typedef struct FlicDemuxContext {
+ int frame_pts_inc;
+ int64_t pts;
+ int video_stream_index;
+} FlicDemuxContext;
+
+static int flic_probe(AVProbeData *p)
+{
+ int magic_number;
+
+ if (p->buf_size < 6)
+ return 0;
+
+ magic_number = LE_16(&p->buf[4]);
+ if ((magic_number != FLIC_FILE_MAGIC_1) &&
+ (magic_number != FLIC_FILE_MAGIC_2) &&
+ (magic_number != FLIC_FILE_MAGIC_3))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int flic_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ FlicDemuxContext *flic = (FlicDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned char header[FLIC_HEADER_SIZE];
+ AVStream *st;
+ int speed;
+ int magic_number;
+
+ flic->pts = 0;
+
+ /* load the whole header and pull out the width and height */
+ if (get_buffer(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
+ return AVERROR_IO;
+
+ magic_number = LE_16(&header[4]);
+ speed = LE_32(&header[0x10]);
+
+ /* initialize the decoder streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ flic->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_FLIC;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = LE_16(&header[0x08]);
+ st->codec->height = LE_16(&header[0x0A]);
+
+ if (!st->codec->width || !st->codec->height)
+ return AVERROR_INVALIDDATA;
+
+ /* send over the whole 128-byte FLIC header */
+ st->codec->extradata_size = FLIC_HEADER_SIZE;
+ st->codec->extradata = av_malloc(FLIC_HEADER_SIZE);
+ memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);
+
+ av_set_pts_info(st, 33, 1, 90000);
+
+ /* Time to figure out the framerate: If there is a FLIC chunk magic
+ * number at offset 0x10, assume this is from the Bullfrog game,
+ * Magic Carpet. */
+ if (LE_16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
+
+ flic->frame_pts_inc = FLIC_MC_PTS_INC;
+
+ /* rewind the stream since the first chunk is at offset 12 */
+ url_fseek(pb, 12, SEEK_SET);
+
+ /* send over abbreviated FLIC header chunk */
+ av_free(st->codec->extradata);
+ st->codec->extradata_size = 12;
+ st->codec->extradata = av_malloc(12);
+ memcpy(st->codec->extradata, header, 12);
+
+ } else if (magic_number == FLIC_FILE_MAGIC_1) {
+ /*
+ * in this case, the speed (n) is number of 1/70s ticks between frames:
+ *
+ * pts n * frame #
+ * -------- = ----------- => pts = n * (90000/70) * frame #
+ * 90000 70
+ *
+ * therefore, the frame pts increment = n * 1285.7
+ */
+ flic->frame_pts_inc = speed * 1285.7;
+ } else if ((magic_number == FLIC_FILE_MAGIC_2) ||
+ (magic_number == FLIC_FILE_MAGIC_3)) {
+ /*
+ * in this case, the speed (n) is number of milliseconds between frames:
+ *
+ * pts n * frame #
+ * -------- = ----------- => pts = n * 90 * frame #
+ * 90000 1000
+ *
+ * therefore, the frame pts increment = n * 90
+ */
+ flic->frame_pts_inc = speed * 90;
+ } else
+ return AVERROR_INVALIDDATA;
+
+ if (flic->frame_pts_inc == 0)
+ flic->frame_pts_inc = FLIC_DEFAULT_PTS_INC;
+
+ return 0;
+}
+
+static int flic_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ FlicDemuxContext *flic = (FlicDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int packet_read = 0;
+ unsigned int size;
+ int magic;
+ int ret = 0;
+ unsigned char preamble[FLIC_PREAMBLE_SIZE];
+
+ while (!packet_read) {
+
+ if ((ret = get_buffer(pb, preamble, FLIC_PREAMBLE_SIZE)) !=
+ FLIC_PREAMBLE_SIZE) {
+ ret = AVERROR_IO;
+ break;
+ }
+
+ size = LE_32(&preamble[0]);
+ magic = LE_16(&preamble[4]);
+
+ if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
+ if (av_new_packet(pkt, size)) {
+ ret = AVERROR_IO;
+ break;
+ }
+ pkt->stream_index = flic->video_stream_index;
+ pkt->pts = flic->pts;
+ pkt->pos = url_ftell(pb);
+ memcpy(pkt->data, preamble, FLIC_PREAMBLE_SIZE);
+ ret = get_buffer(pb, pkt->data + FLIC_PREAMBLE_SIZE,
+ size - FLIC_PREAMBLE_SIZE);
+ if (ret != size - FLIC_PREAMBLE_SIZE) {
+ av_free_packet(pkt);
+ ret = AVERROR_IO;
+ }
+ flic->pts += flic->frame_pts_inc;
+ packet_read = 1;
+ } else {
+ /* not interested in this chunk */
+ url_fseek(pb, size - 6, SEEK_CUR);
+ }
+ }
+
+ return ret;
+}
+
+static int flic_read_close(AVFormatContext *s)
+{
+// FlicDemuxContext *flic = (FlicDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+AVInputFormat flic_demuxer = {
+ "flic",
+ "FLI/FLC/FLX animation format",
+ sizeof(FlicDemuxContext),
+ flic_probe,
+ flic_read_header,
+ flic_read_packet,
+ flic_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/flvdec.c b/contrib/ffmpeg/libavformat/flvdec.c
new file mode 100644
index 000000000..a1c2aa4eb
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/flvdec.c
@@ -0,0 +1,259 @@
+/*
+ * FLV demuxer
+ * Copyright (c) 2003 The FFmpeg Project.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ * This demuxer will generate a 1 byte extradata for VP6F content.
+ * It is composed of:
+ * - upper 4bits: difference between encoded width and visible width
+ * - lower 4bits: difference between encoded height and visible height
+ */
+#include "avformat.h"
+
+static int flv_probe(AVProbeData *p)
+{
+ const uint8_t *d;
+
+ if (p->buf_size < 6)
+ return 0;
+ d = p->buf;
+ if (d[0] == 'F' && d[1] == 'L' && d[2] == 'V') {
+ return 50;
+ }
+ return 0;
+}
+
+static int flv_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int offset, flags, size;
+
+ s->ctx_flags |= AVFMTCTX_NOHEADER; //ok we have a header but theres no fps, codec type, sample_rate, ...
+
+ url_fskip(&s->pb, 4);
+ flags = get_byte(&s->pb);
+
+ offset = get_be32(&s->pb);
+
+ if(!url_is_streamed(&s->pb)){
+ const int fsize= url_fsize(&s->pb);
+ url_fseek(&s->pb, fsize-4, SEEK_SET);
+ size= get_be32(&s->pb);
+ url_fseek(&s->pb, fsize-3-size, SEEK_SET);
+ if(size == get_be24(&s->pb) + 11){
+ s->duration= get_be24(&s->pb) * (int64_t)AV_TIME_BASE / 1000;
+ }
+ }
+
+ url_fseek(&s->pb, offset, SEEK_SET);
+
+ s->start_time = 0;
+
+ return 0;
+}
+
+static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, i, type, size, pts, flags, is_audio, next, pos;
+ AVStream *st = NULL;
+
+ for(;;){
+ pos = url_ftell(&s->pb);
+ url_fskip(&s->pb, 4); /* size of previous packet */
+ type = get_byte(&s->pb);
+ size = get_be24(&s->pb);
+ pts = get_be24(&s->pb);
+// av_log(s, AV_LOG_DEBUG, "type:%d, size:%d, pts:%d\n", type, size, pts);
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+ url_fskip(&s->pb, 4); /* reserved */
+ flags = 0;
+
+ if(size == 0)
+ continue;
+
+ next= size + url_ftell(&s->pb);
+
+ if (type == 8) {
+ is_audio=1;
+ flags = get_byte(&s->pb);
+ } else if (type == 9) {
+ is_audio=0;
+ flags = get_byte(&s->pb);
+ } else if (type == 18 && size > 13+1+4) {
+ url_fskip(&s->pb, 13); //onMetaData blah
+ if(get_byte(&s->pb) == 8){
+ url_fskip(&s->pb, 4);
+ }
+ while(url_ftell(&s->pb) + 5 < next){
+ char tmp[128];
+ int type, len;
+ double d= 0;
+
+ len= get_be16(&s->pb);
+ if(len >= sizeof(tmp) || !len)
+ break;
+ get_buffer(&s->pb, tmp, len);
+ tmp[len]=0;
+
+ type= get_byte(&s->pb);
+ if(type==0){
+ d= av_int2dbl(get_be64(&s->pb));
+ }else if(type==2){
+ len= get_be16(&s->pb);
+ if(len >= sizeof(tmp))
+ break;
+ url_fskip(&s->pb, len);
+ }else if(type==8){
+ //array
+ break;
+ }else if(type==11){
+ d= av_int2dbl(get_be64(&s->pb));
+ get_be16(&s->pb);
+ }
+
+ if(!strcmp(tmp, "duration")){
+ s->duration = d*AV_TIME_BASE;
+ }else if(!strcmp(tmp, "videodatarate")){
+ }else if(!strcmp(tmp, "audiodatarate")){
+ }
+ }
+ url_fseek(&s->pb, next, SEEK_SET);
+ continue;
+ } else {
+ /* skip packet */
+ av_log(s, AV_LOG_ERROR, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags);
+ url_fseek(&s->pb, next, SEEK_SET);
+ continue;
+ }
+
+ /* now find stream */
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ if (st->id == is_audio)
+ break;
+ }
+ if(i == s->nb_streams){
+ st = av_new_stream(s, is_audio);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 24, 1, 1000); /* 24 bit pts in ms */
+ st->codec->time_base= (AVRational){1,1000};
+ }
+// av_log(NULL, AV_LOG_DEBUG, "%d %X %d \n", is_audio, flags, st->discard);
+ if( (st->discard >= AVDISCARD_NONKEY && !((flags >> 4)==1 || is_audio))
+ ||(st->discard >= AVDISCARD_BIDIR && ((flags >> 4)==3 && !is_audio))
+ || st->discard >= AVDISCARD_ALL
+ ){
+ url_fseek(&s->pb, next, SEEK_SET);
+ continue;
+ }
+ if ((flags >> 4)==1)
+ av_add_index_entry(st, pos, pts, size, 0, AVINDEX_KEYFRAME);
+ break;
+ }
+
+ if(is_audio){
+ if(st->codec->sample_rate == 0){
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->channels = (flags&1)+1;
+ if((flags >> 4) == 5)
+ st->codec->sample_rate= 8000;
+ else
+ st->codec->sample_rate = (44100<<((flags>>2)&3))>>3;
+ switch(flags >> 4){/* 0: uncompressed 1: ADPCM 2: mp3 5: Nellymoser 8kHz mono 6: Nellymoser*/
+ case 0: if (flags&2) st->codec->codec_id = CODEC_ID_PCM_S16BE;
+ else st->codec->codec_id = CODEC_ID_PCM_S8; break;
+ case 1: st->codec->codec_id = CODEC_ID_ADPCM_SWF; break;
+ case 2: st->codec->codec_id = CODEC_ID_MP3; st->need_parsing = 1; break;
+ // this is not listed at FLV but at SWF, strange...
+ case 3: if (flags&2) st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ else st->codec->codec_id = CODEC_ID_PCM_S8; break;
+ default:
+ av_log(s, AV_LOG_INFO, "Unsupported audio codec (%x)\n", flags >> 4);
+ st->codec->codec_tag= (flags >> 4);
+ }
+ st->codec->bits_per_sample = (flags & 2) ? 16 : 8;
+ }
+ }else{
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ switch(flags & 0xF){
+ case 2: st->codec->codec_id = CODEC_ID_FLV1; break;
+ case 3: st->codec->codec_id = CODEC_ID_FLASHSV; break;
+ case 4:
+ st->codec->codec_id = CODEC_ID_VP6F;
+ if (st->codec->extradata_size != 1) {
+ st->codec->extradata_size = 1;
+ st->codec->extradata = av_malloc(1);
+ }
+ /* width and height adjustment */
+ st->codec->extradata[0] = get_byte(&s->pb);
+ size--;
+ break;
+ default:
+ av_log(s, AV_LOG_INFO, "Unsupported video codec (%x)\n", flags & 0xf);
+ st->codec->codec_tag= flags & 0xF;
+ }
+ }
+
+ ret= av_get_packet(&s->pb, pkt, size - 1);
+ if (ret <= 0) {
+ return AVERROR_IO;
+ }
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ pkt->pts = pts;
+ pkt->stream_index = st->index;
+
+ if (is_audio || ((flags >> 4)==1))
+ pkt->flags |= PKT_FLAG_KEY;
+
+ return ret;
+}
+
+static int flv_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int flv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+ AVStream *st = s->streams[stream_index];
+ int index = av_index_search_timestamp(st, timestamp, flags);
+ if (index < 0)
+ return -1;
+ url_fseek(&s->pb, st->index_entries[index].pos, SEEK_SET);
+
+ return 0;
+}
+
+AVInputFormat flv_demuxer = {
+ "flv",
+ "flv format",
+ 0,
+ flv_probe,
+ flv_read_header,
+ flv_read_packet,
+ flv_read_close,
+ flv_read_seek,
+ .extensions = "flv",
+ .value = CODEC_ID_FLV1,
+};
diff --git a/contrib/ffmpeg/libavformat/flvenc.c b/contrib/ffmpeg/libavformat/flvenc.c
new file mode 100644
index 000000000..0b09d9830
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/flvenc.c
@@ -0,0 +1,284 @@
+/*
+ * FLV muxer
+ * Copyright (c) 2003 The FFmpeg Project.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+typedef struct FLVContext {
+ int hasAudio;
+ int hasVideo;
+ int reserved;
+ offset_t duration_offset;
+ offset_t filesize_offset;
+ int64_t duration;
+} FLVContext;
+
+static int get_audio_flags(AVCodecContext *enc){
+ int flags = (enc->bits_per_sample == 16) ? 0x2 : 0x0;
+
+ switch (enc->sample_rate) {
+ case 44100:
+ flags |= 0x0C;
+ break;
+ case 22050:
+ flags |= 0x08;
+ break;
+ case 11025:
+ flags |= 0x04;
+ break;
+ case 8000: //nellymoser only
+ case 5512: //not mp3
+ flags |= 0x00;
+ break;
+ default:
+ av_log(enc, AV_LOG_ERROR, "flv doesnt support that sample rate, choose from (44100, 22050, 11025)\n");
+ return -1;
+ }
+
+ if (enc->channels > 1) {
+ flags |= 0x01;
+ }
+
+ switch(enc->codec_id){
+ case CODEC_ID_MP3:
+ flags |= 0x20 | 0x2;
+ break;
+ case CODEC_ID_PCM_S8:
+ break;
+ case CODEC_ID_PCM_S16BE:
+ flags |= 0x2;
+ break;
+ case CODEC_ID_PCM_S16LE:
+ flags |= 0x30 | 0x2;
+ break;
+ case CODEC_ID_ADPCM_SWF:
+ flags |= 0x10;
+ break;
+ case 0:
+ flags |= enc->codec_tag<<4;
+ break;
+ default:
+ av_log(enc, AV_LOG_ERROR, "codec not compatible with flv\n");
+ return -1;
+ }
+
+ return flags;
+}
+
+#define AMF_DOUBLE 0
+#define AMF_BOOLEAN 1
+#define AMF_STRING 2
+#define AMF_OBJECT 3
+#define AMF_MIXED_ARRAY 8
+#define AMF_ARRAY 10
+#define AMF_DATE 11
+
+static void put_amf_string(ByteIOContext *pb, const char *str)
+{
+ size_t len = strlen(str);
+ put_be16(pb, len);
+ put_buffer(pb, str, len);
+}
+
+static void put_amf_double(ByteIOContext *pb, double d)
+{
+ put_byte(pb, AMF_DOUBLE);
+ put_be64(pb, av_dbl2int(d));
+}
+
+static int flv_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ FLVContext *flv = s->priv_data;
+ int i, width, height, samplerate;
+ double framerate = 0.0;
+ int metadata_size_pos, data_size;
+
+ flv->hasAudio = 0;
+ flv->hasVideo = 0;
+
+ put_tag(pb,"FLV");
+ put_byte(pb,1);
+ put_byte(pb,0); // delayed write
+ put_be32(pb,9);
+ put_be32(pb,0);
+
+ for(i=0; i<s->nb_streams; i++){
+ AVCodecContext *enc = s->streams[i]->codec;
+ if (enc->codec_type == CODEC_TYPE_VIDEO) {
+ width = enc->width;
+ height = enc->height;
+ if (s->streams[i]->r_frame_rate.den && s->streams[i]->r_frame_rate.num) {
+ framerate = av_q2d(s->streams[i]->r_frame_rate);
+ } else {
+ framerate = 1/av_q2d(s->streams[i]->codec->time_base);
+ }
+ flv->hasVideo=1;
+ } else {
+ flv->hasAudio=1;
+ samplerate = enc->sample_rate;
+ }
+ av_set_pts_info(s->streams[i], 24, 1, 1000); /* 24 bit pts in ms */
+ if(enc->codec_tag == 5){
+ put_byte(pb,8); // message type
+ put_be24(pb,0); // include flags
+ put_be24(pb,0); // time stamp
+ put_be32(pb,0); // reserved
+ put_be32(pb,11); // size
+ flv->reserved=5;
+ }
+ if(enc->codec_type == CODEC_TYPE_AUDIO && get_audio_flags(enc)<0)
+ return -1;
+ }
+
+ /* write meta_tag */
+ put_byte(pb, 18); // tag type META
+ metadata_size_pos= url_ftell(pb);
+ put_be24(pb, 0); // size of data part (sum of all parts below)
+ put_be24(pb, 0); // time stamp
+ put_be32(pb, 0); // reserved
+
+ /* now data of data_size size */
+
+ /* first event name as a string */
+ put_byte(pb, AMF_STRING); // 1 byte
+ put_amf_string(pb, "onMetaData"); // 12 bytes
+
+ /* mixed array (hash) with size and string/type/data tuples */
+ put_byte(pb, AMF_MIXED_ARRAY);
+ put_be32(pb, 4*flv->hasVideo + flv->hasAudio + 2); // +2 for duration and file size
+
+ put_amf_string(pb, "duration");
+ flv->duration_offset= url_ftell(pb);
+ put_amf_double(pb, 0); // delayed write
+
+ if(flv->hasVideo){
+ put_amf_string(pb, "width");
+ put_amf_double(pb, width);
+
+ put_amf_string(pb, "height");
+ put_amf_double(pb, height);
+
+ put_amf_string(pb, "videodatarate");
+ put_amf_double(pb, s->bit_rate / 1024.0);
+
+ put_amf_string(pb, "framerate");
+ put_amf_double(pb, framerate);
+ }
+
+ if(flv->hasAudio){
+ put_amf_string(pb, "audiosamplerate");
+ put_amf_double(pb, samplerate);
+ }
+
+ put_amf_string(pb, "filesize");
+ flv->filesize_offset= url_ftell(pb);
+ put_amf_double(pb, 0); // delayed write
+
+ put_amf_string(pb, "");
+ put_byte(pb, 9); // end marker 1 byte
+
+ /* write total size of tag */
+ data_size= url_ftell(pb) - metadata_size_pos - 10;
+ url_fseek(pb, metadata_size_pos, SEEK_SET);
+ put_be24(pb, data_size);
+ url_fseek(pb, data_size + 10 - 3, SEEK_CUR);
+ put_be32(pb, data_size + 11);
+
+ return 0;
+}
+
+static int flv_write_trailer(AVFormatContext *s)
+{
+ int64_t file_size;
+ int flags = 0;
+
+ ByteIOContext *pb = &s->pb;
+ FLVContext *flv = s->priv_data;
+
+ file_size = url_ftell(pb);
+ flags |= flv->hasAudio ? 4 : 0;
+ flags |= flv->hasVideo ? 1 : 0;
+ url_fseek(pb, 4, SEEK_SET);
+ put_byte(pb,flags);
+
+ /* update informations */
+ url_fseek(pb, flv->duration_offset, SEEK_SET);
+ put_amf_double(pb, flv->duration / (double)1000);
+ url_fseek(pb, flv->filesize_offset, SEEK_SET);
+ put_amf_double(pb, file_size);
+
+ url_fseek(pb, file_size, SEEK_SET);
+ return 0;
+}
+
+static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc = s->streams[pkt->stream_index]->codec;
+ FLVContext *flv = s->priv_data;
+ int size= pkt->size;
+ int flags;
+
+// av_log(s, AV_LOG_DEBUG, "type:%d pts: %"PRId64" size:%d\n", enc->codec_type, timestamp, size);
+
+ if (enc->codec_type == CODEC_TYPE_VIDEO) {
+ put_byte(pb, 9);
+ flags = 2; // choose h263
+ flags |= pkt->flags & PKT_FLAG_KEY ? 0x10 : 0x20; // add keyframe indicator
+ } else {
+ assert(enc->codec_type == CODEC_TYPE_AUDIO);
+ flags = get_audio_flags(enc);
+
+ assert(size);
+
+ put_byte(pb, 8);
+ }
+
+ put_be24(pb,size+1); // include flags
+ put_be24(pb,pkt->pts);
+ put_be32(pb,flv->reserved);
+ put_byte(pb,flags);
+ put_buffer(pb, pkt->data, size);
+ put_be32(pb,size+1+11); // previous tag size
+ flv->duration = pkt->pts + pkt->duration;
+
+ put_flush_packet(pb);
+ return 0;
+}
+
+AVOutputFormat flv_muxer = {
+ "flv",
+ "flv format",
+ "video/x-flv",
+ "flv",
+ sizeof(FLVContext),
+#ifdef CONFIG_MP3LAME
+ CODEC_ID_MP3,
+#else // CONFIG_MP3LAME
+ CODEC_ID_NONE,
+#endif // CONFIG_MP3LAME
+ CODEC_ID_FLV1,
+ flv_write_header,
+ flv_write_packet,
+ flv_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/framehook.c b/contrib/ffmpeg/libavformat/framehook.c
new file mode 100644
index 000000000..03bbc95f6
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/framehook.c
@@ -0,0 +1,121 @@
+/*
+ * Video processing hooks
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <errno.h>
+#include "config.h"
+#include "avformat.h"
+#include "framehook.h"
+
+#ifdef HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+
+typedef struct _FrameHookEntry {
+ struct _FrameHookEntry *next;
+ FrameHookConfigureFn Configure;
+ FrameHookProcessFn Process;
+ FrameHookReleaseFn Release;
+ void *ctx;
+} FrameHookEntry;
+
+static FrameHookEntry *first_hook;
+
+/* Returns 0 on OK */
+int frame_hook_add(int argc, char *argv[])
+{
+#ifdef CONFIG_VHOOK
+ void *loaded;
+ FrameHookEntry *fhe, **fhep;
+
+ if (argc < 1) {
+ return ENOENT;
+ }
+
+ loaded = dlopen(argv[0], RTLD_NOW);
+ if (!loaded) {
+ av_log(NULL, AV_LOG_ERROR, "%s\n", dlerror());
+ return -1;
+ }
+
+ fhe = av_mallocz(sizeof(*fhe));
+ if (!fhe) {
+ return errno;
+ }
+
+ fhe->Configure = dlsym(loaded, "Configure");
+ fhe->Process = dlsym(loaded, "Process");
+ fhe->Release = dlsym(loaded, "Release"); /* Optional */
+
+ if (!fhe->Process) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to find Process entrypoint in %s\n", argv[0]);
+ return -1;
+ }
+
+ if (!fhe->Configure && argc > 1) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to find Configure entrypoint in %s\n", argv[0]);
+ return -1;
+ }
+
+ if (argc > 1 || fhe->Configure) {
+ if (fhe->Configure(&fhe->ctx, argc, argv)) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to Configure %s\n", argv[0]);
+ return -1;
+ }
+ }
+
+ for (fhep = &first_hook; *fhep; fhep = &((*fhep)->next)) {
+ }
+
+ *fhep = fhe;
+
+ return 0;
+#else
+ av_log(NULL, AV_LOG_ERROR, "Video hooking not compiled into this version\n");
+ return 1;
+#endif
+}
+
+void frame_hook_process(AVPicture *pict, enum PixelFormat pix_fmt, int width, int height)
+{
+ if (first_hook) {
+ FrameHookEntry *fhe;
+ int64_t pts = av_gettime();
+
+ for (fhe = first_hook; fhe; fhe = fhe->next) {
+ fhe->Process(fhe->ctx, pict, pix_fmt, width, height, pts);
+ }
+ }
+}
+
+void frame_hook_release(void)
+{
+ FrameHookEntry *fhe;
+ FrameHookEntry *fhenext;
+
+ for (fhe = first_hook; fhe; fhe = fhenext) {
+ fhenext = fhe->next;
+ if (fhe->Release)
+ fhe->Release(fhe->ctx);
+ av_free(fhe);
+ }
+
+ first_hook = NULL;
+}
diff --git a/contrib/ffmpeg/libavformat/framehook.h b/contrib/ffmpeg/libavformat/framehook.h
new file mode 100644
index 000000000..d843ddb85
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/framehook.h
@@ -0,0 +1,50 @@
+/*
+ * video processing hooks
+ * copyright (c) 2000, 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _FRAMEHOOK_H
+#define _FRAMEHOOK_H
+
+/*
+ * Prototypes for interface to .so that implement a video processing hook
+ */
+
+#include "avcodec.h"
+
+/* Function must be called 'Configure' */
+typedef int (FrameHookConfigure)(void **ctxp, int argc, char *argv[]);
+typedef FrameHookConfigure *FrameHookConfigureFn;
+extern FrameHookConfigure Configure;
+
+/* Function must be called 'Process' */
+typedef void (FrameHookProcess)(void *ctx, struct AVPicture *pict, enum PixelFormat pix_fmt, int width, int height, int64_t pts);
+typedef FrameHookProcess *FrameHookProcessFn;
+extern FrameHookProcess Process;
+
+/* Function must be called 'Release' */
+typedef void (FrameHookRelease)(void *ctx);
+typedef FrameHookRelease *FrameHookReleaseFn;
+extern FrameHookRelease Release;
+
+extern int frame_hook_add(int argc, char *argv[]);
+extern void frame_hook_process(struct AVPicture *pict, enum PixelFormat pix_fmt, int width, int height);
+extern void frame_hook_release(void);
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/gif.c b/contrib/ffmpeg/libavformat/gif.c
new file mode 100644
index 000000000..1083710d5
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gif.c
@@ -0,0 +1,419 @@
+/*
+ * Animated GIF muxer
+ * Copyright (c) 2000 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * First version by Francois Revol revol@free.fr
+ *
+ * Features and limitations:
+ * - currently no compression is performed,
+ * in fact the size of the data is 9/8 the size of the image in 8bpp
+ * - uses only a global standard palette
+ * - tested with IE 5.0, Opera for BeOS, NetPositive (BeOS), and Mozilla (BeOS).
+ *
+ * Reference documents:
+ * http://www.goice.co.jp/member/mo/formats/gif.html
+ * http://astronomy.swin.edu.au/pbourke/dataformats/gif/
+ * http://www.dcs.ed.ac.uk/home/mxr/gfx/2d/GIF89a.txt
+ *
+ * this url claims to have an LZW algorithm not covered by Unisys patent:
+ * http://www.msg.net/utility/whirlgif/gifencod.html
+ * could help reduce the size of the files _a lot_...
+ * some sites mentions an RLE type compression also.
+ */
+
+#include "avformat.h"
+#include "bitstream.h"
+
+/* bitstream minipacket size */
+#define GIF_CHUNKS 100
+
+/* slows down the decoding (and some browsers don't like it) */
+/* update on the 'some browsers don't like it issue from above: this was probably due to missing 'Data Sub-block Terminator' (byte 19) in the app_header */
+#define GIF_ADD_APP_HEADER // required to enable looping of animated gif
+
+typedef struct {
+ unsigned char r;
+ unsigned char g;
+ unsigned char b;
+} rgb_triplet;
+
+/* we use the standard 216 color palette */
+
+/* this script was used to create the palette:
+ * for r in 00 33 66 99 cc ff; do for g in 00 33 66 99 cc ff; do echo -n " "; for b in 00 33 66 99 cc ff; do
+ * echo -n "{ 0x$r, 0x$g, 0x$b }, "; done; echo ""; done; done
+ */
+
+static const rgb_triplet gif_clut[216] = {
+ { 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x33 }, { 0x00, 0x00, 0x66 }, { 0x00, 0x00, 0x99 }, { 0x00, 0x00, 0xcc }, { 0x00, 0x00, 0xff },
+ { 0x00, 0x33, 0x00 }, { 0x00, 0x33, 0x33 }, { 0x00, 0x33, 0x66 }, { 0x00, 0x33, 0x99 }, { 0x00, 0x33, 0xcc }, { 0x00, 0x33, 0xff },
+ { 0x00, 0x66, 0x00 }, { 0x00, 0x66, 0x33 }, { 0x00, 0x66, 0x66 }, { 0x00, 0x66, 0x99 }, { 0x00, 0x66, 0xcc }, { 0x00, 0x66, 0xff },
+ { 0x00, 0x99, 0x00 }, { 0x00, 0x99, 0x33 }, { 0x00, 0x99, 0x66 }, { 0x00, 0x99, 0x99 }, { 0x00, 0x99, 0xcc }, { 0x00, 0x99, 0xff },
+ { 0x00, 0xcc, 0x00 }, { 0x00, 0xcc, 0x33 }, { 0x00, 0xcc, 0x66 }, { 0x00, 0xcc, 0x99 }, { 0x00, 0xcc, 0xcc }, { 0x00, 0xcc, 0xff },
+ { 0x00, 0xff, 0x00 }, { 0x00, 0xff, 0x33 }, { 0x00, 0xff, 0x66 }, { 0x00, 0xff, 0x99 }, { 0x00, 0xff, 0xcc }, { 0x00, 0xff, 0xff },
+ { 0x33, 0x00, 0x00 }, { 0x33, 0x00, 0x33 }, { 0x33, 0x00, 0x66 }, { 0x33, 0x00, 0x99 }, { 0x33, 0x00, 0xcc }, { 0x33, 0x00, 0xff },
+ { 0x33, 0x33, 0x00 }, { 0x33, 0x33, 0x33 }, { 0x33, 0x33, 0x66 }, { 0x33, 0x33, 0x99 }, { 0x33, 0x33, 0xcc }, { 0x33, 0x33, 0xff },
+ { 0x33, 0x66, 0x00 }, { 0x33, 0x66, 0x33 }, { 0x33, 0x66, 0x66 }, { 0x33, 0x66, 0x99 }, { 0x33, 0x66, 0xcc }, { 0x33, 0x66, 0xff },
+ { 0x33, 0x99, 0x00 }, { 0x33, 0x99, 0x33 }, { 0x33, 0x99, 0x66 }, { 0x33, 0x99, 0x99 }, { 0x33, 0x99, 0xcc }, { 0x33, 0x99, 0xff },
+ { 0x33, 0xcc, 0x00 }, { 0x33, 0xcc, 0x33 }, { 0x33, 0xcc, 0x66 }, { 0x33, 0xcc, 0x99 }, { 0x33, 0xcc, 0xcc }, { 0x33, 0xcc, 0xff },
+ { 0x33, 0xff, 0x00 }, { 0x33, 0xff, 0x33 }, { 0x33, 0xff, 0x66 }, { 0x33, 0xff, 0x99 }, { 0x33, 0xff, 0xcc }, { 0x33, 0xff, 0xff },
+ { 0x66, 0x00, 0x00 }, { 0x66, 0x00, 0x33 }, { 0x66, 0x00, 0x66 }, { 0x66, 0x00, 0x99 }, { 0x66, 0x00, 0xcc }, { 0x66, 0x00, 0xff },
+ { 0x66, 0x33, 0x00 }, { 0x66, 0x33, 0x33 }, { 0x66, 0x33, 0x66 }, { 0x66, 0x33, 0x99 }, { 0x66, 0x33, 0xcc }, { 0x66, 0x33, 0xff },
+ { 0x66, 0x66, 0x00 }, { 0x66, 0x66, 0x33 }, { 0x66, 0x66, 0x66 }, { 0x66, 0x66, 0x99 }, { 0x66, 0x66, 0xcc }, { 0x66, 0x66, 0xff },
+ { 0x66, 0x99, 0x00 }, { 0x66, 0x99, 0x33 }, { 0x66, 0x99, 0x66 }, { 0x66, 0x99, 0x99 }, { 0x66, 0x99, 0xcc }, { 0x66, 0x99, 0xff },
+ { 0x66, 0xcc, 0x00 }, { 0x66, 0xcc, 0x33 }, { 0x66, 0xcc, 0x66 }, { 0x66, 0xcc, 0x99 }, { 0x66, 0xcc, 0xcc }, { 0x66, 0xcc, 0xff },
+ { 0x66, 0xff, 0x00 }, { 0x66, 0xff, 0x33 }, { 0x66, 0xff, 0x66 }, { 0x66, 0xff, 0x99 }, { 0x66, 0xff, 0xcc }, { 0x66, 0xff, 0xff },
+ { 0x99, 0x00, 0x00 }, { 0x99, 0x00, 0x33 }, { 0x99, 0x00, 0x66 }, { 0x99, 0x00, 0x99 }, { 0x99, 0x00, 0xcc }, { 0x99, 0x00, 0xff },
+ { 0x99, 0x33, 0x00 }, { 0x99, 0x33, 0x33 }, { 0x99, 0x33, 0x66 }, { 0x99, 0x33, 0x99 }, { 0x99, 0x33, 0xcc }, { 0x99, 0x33, 0xff },
+ { 0x99, 0x66, 0x00 }, { 0x99, 0x66, 0x33 }, { 0x99, 0x66, 0x66 }, { 0x99, 0x66, 0x99 }, { 0x99, 0x66, 0xcc }, { 0x99, 0x66, 0xff },
+ { 0x99, 0x99, 0x00 }, { 0x99, 0x99, 0x33 }, { 0x99, 0x99, 0x66 }, { 0x99, 0x99, 0x99 }, { 0x99, 0x99, 0xcc }, { 0x99, 0x99, 0xff },
+ { 0x99, 0xcc, 0x00 }, { 0x99, 0xcc, 0x33 }, { 0x99, 0xcc, 0x66 }, { 0x99, 0xcc, 0x99 }, { 0x99, 0xcc, 0xcc }, { 0x99, 0xcc, 0xff },
+ { 0x99, 0xff, 0x00 }, { 0x99, 0xff, 0x33 }, { 0x99, 0xff, 0x66 }, { 0x99, 0xff, 0x99 }, { 0x99, 0xff, 0xcc }, { 0x99, 0xff, 0xff },
+ { 0xcc, 0x00, 0x00 }, { 0xcc, 0x00, 0x33 }, { 0xcc, 0x00, 0x66 }, { 0xcc, 0x00, 0x99 }, { 0xcc, 0x00, 0xcc }, { 0xcc, 0x00, 0xff },
+ { 0xcc, 0x33, 0x00 }, { 0xcc, 0x33, 0x33 }, { 0xcc, 0x33, 0x66 }, { 0xcc, 0x33, 0x99 }, { 0xcc, 0x33, 0xcc }, { 0xcc, 0x33, 0xff },
+ { 0xcc, 0x66, 0x00 }, { 0xcc, 0x66, 0x33 }, { 0xcc, 0x66, 0x66 }, { 0xcc, 0x66, 0x99 }, { 0xcc, 0x66, 0xcc }, { 0xcc, 0x66, 0xff },
+ { 0xcc, 0x99, 0x00 }, { 0xcc, 0x99, 0x33 }, { 0xcc, 0x99, 0x66 }, { 0xcc, 0x99, 0x99 }, { 0xcc, 0x99, 0xcc }, { 0xcc, 0x99, 0xff },
+ { 0xcc, 0xcc, 0x00 }, { 0xcc, 0xcc, 0x33 }, { 0xcc, 0xcc, 0x66 }, { 0xcc, 0xcc, 0x99 }, { 0xcc, 0xcc, 0xcc }, { 0xcc, 0xcc, 0xff },
+ { 0xcc, 0xff, 0x00 }, { 0xcc, 0xff, 0x33 }, { 0xcc, 0xff, 0x66 }, { 0xcc, 0xff, 0x99 }, { 0xcc, 0xff, 0xcc }, { 0xcc, 0xff, 0xff },
+ { 0xff, 0x00, 0x00 }, { 0xff, 0x00, 0x33 }, { 0xff, 0x00, 0x66 }, { 0xff, 0x00, 0x99 }, { 0xff, 0x00, 0xcc }, { 0xff, 0x00, 0xff },
+ { 0xff, 0x33, 0x00 }, { 0xff, 0x33, 0x33 }, { 0xff, 0x33, 0x66 }, { 0xff, 0x33, 0x99 }, { 0xff, 0x33, 0xcc }, { 0xff, 0x33, 0xff },
+ { 0xff, 0x66, 0x00 }, { 0xff, 0x66, 0x33 }, { 0xff, 0x66, 0x66 }, { 0xff, 0x66, 0x99 }, { 0xff, 0x66, 0xcc }, { 0xff, 0x66, 0xff },
+ { 0xff, 0x99, 0x00 }, { 0xff, 0x99, 0x33 }, { 0xff, 0x99, 0x66 }, { 0xff, 0x99, 0x99 }, { 0xff, 0x99, 0xcc }, { 0xff, 0x99, 0xff },
+ { 0xff, 0xcc, 0x00 }, { 0xff, 0xcc, 0x33 }, { 0xff, 0xcc, 0x66 }, { 0xff, 0xcc, 0x99 }, { 0xff, 0xcc, 0xcc }, { 0xff, 0xcc, 0xff },
+ { 0xff, 0xff, 0x00 }, { 0xff, 0xff, 0x33 }, { 0xff, 0xff, 0x66 }, { 0xff, 0xff, 0x99 }, { 0xff, 0xff, 0xcc }, { 0xff, 0xff, 0xff },
+};
+
+/* The GIF format uses reversed order for bitstreams... */
+/* at least they don't use PDP_ENDIAN :) */
+/* so we 'extend' PutBitContext. hmmm, OOP :) */
+/* seems this thing changed slightly since I wrote it... */
+
+#ifdef ALT_BITSTREAM_WRITER
+# error no ALT_BITSTREAM_WRITER support for now
+#endif
+
+static void gif_put_bits_rev(PutBitContext *s, int n, unsigned int value)
+{
+ unsigned int bit_buf;
+ int bit_cnt;
+
+ // printf("put_bits=%d %x\n", n, value);
+ assert(n == 32 || value < (1U << n));
+
+ bit_buf = s->bit_buf;
+ bit_cnt = 32 - s->bit_left; /* XXX:lazyness... was = s->bit_cnt; */
+
+ // printf("n=%d value=%x cnt=%d buf=%x\n", n, value, bit_cnt, bit_buf);
+ /* XXX: optimize */
+ if (n < (32-bit_cnt)) {
+ bit_buf |= value << (bit_cnt);
+ bit_cnt+=n;
+ } else {
+ bit_buf |= value << (bit_cnt);
+
+ *s->buf_ptr = bit_buf & 0xff;
+ s->buf_ptr[1] = (bit_buf >> 8) & 0xff;
+ s->buf_ptr[2] = (bit_buf >> 16) & 0xff;
+ s->buf_ptr[3] = (bit_buf >> 24) & 0xff;
+
+ //printf("bitbuf = %08x\n", bit_buf);
+ s->buf_ptr+=4;
+ if (s->buf_ptr >= s->buf_end)
+ puts("bit buffer overflow !!"); // should never happen ! who got rid of the callback ???
+// flush_buffer_rev(s);
+ bit_cnt=bit_cnt + n - 32;
+ if (bit_cnt == 0) {
+ bit_buf = 0;
+ } else {
+ bit_buf = value >> (n - bit_cnt);
+ }
+ }
+
+ s->bit_buf = bit_buf;
+ s->bit_left = 32 - bit_cnt;
+}
+
+/* pad the end of the output stream with zeros */
+static void gif_flush_put_bits_rev(PutBitContext *s)
+{
+ while (s->bit_left < 32) {
+ /* XXX: should test end of buffer */
+ *s->buf_ptr++=s->bit_buf & 0xff;
+ s->bit_buf>>=8;
+ s->bit_left+=8;
+ }
+// flush_buffer_rev(s);
+ s->bit_left=32;
+ s->bit_buf=0;
+}
+
+/* !RevPutBitContext */
+
+/* GIF header */
+static int gif_image_write_header(ByteIOContext *pb,
+ int width, int height, int loop_count,
+ uint32_t *palette)
+{
+ int i;
+ unsigned int v;
+
+ put_tag(pb, "GIF");
+ put_tag(pb, "89a");
+ put_le16(pb, width);
+ put_le16(pb, height);
+
+ put_byte(pb, 0xf7); /* flags: global clut, 256 entries */
+ put_byte(pb, 0x1f); /* background color index */
+ put_byte(pb, 0); /* aspect ratio */
+
+ /* the global palette */
+ if (!palette) {
+ put_buffer(pb, (const unsigned char *)gif_clut, 216*3);
+ for(i=0;i<((256-216)*3);i++)
+ put_byte(pb, 0);
+ } else {
+ for(i=0;i<256;i++) {
+ v = palette[i];
+ put_byte(pb, (v >> 16) & 0xff);
+ put_byte(pb, (v >> 8) & 0xff);
+ put_byte(pb, (v) & 0xff);
+ }
+ }
+
+ /* update: this is the 'NETSCAPE EXTENSION' that allows for looped animated gif
+ see http://members.aol.com/royalef/gifabout.htm#net-extension
+
+ byte 1 : 33 (hex 0x21) GIF Extension code
+ byte 2 : 255 (hex 0xFF) Application Extension Label
+ byte 3 : 11 (hex (0x0B) Length of Application Block
+ (eleven bytes of data to follow)
+ bytes 4 to 11 : "NETSCAPE"
+ bytes 12 to 14 : "2.0"
+ byte 15 : 3 (hex 0x03) Length of Data Sub-Block
+ (three bytes of data to follow)
+ byte 16 : 1 (hex 0x01)
+ bytes 17 to 18 : 0 to 65535, an unsigned integer in
+ lo-hi byte format. This indicate the
+ number of iterations the loop should
+ be executed.
+ bytes 19 : 0 (hex 0x00) a Data Sub-block Terminator
+ */
+
+ /* application extension header */
+#ifdef GIF_ADD_APP_HEADER
+ if (loop_count >= 0 && loop_count <= 65535) {
+ put_byte(pb, 0x21);
+ put_byte(pb, 0xff);
+ put_byte(pb, 0x0b);
+ put_tag(pb, "NETSCAPE2.0"); // bytes 4 to 14
+ put_byte(pb, 0x03); // byte 15
+ put_byte(pb, 0x01); // byte 16
+ put_le16(pb, (uint16_t)loop_count);
+ put_byte(pb, 0x00); // byte 19
+ }
+#endif
+ return 0;
+}
+
+/* this is maybe slow, but allows for extensions */
+static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
+{
+ return ((((r)/47)%6)*6*6+(((g)/47)%6)*6+(((b)/47)%6));
+}
+
+
+static int gif_image_write_image(ByteIOContext *pb,
+ int x1, int y1, int width, int height,
+ const uint8_t *buf, int linesize, int pix_fmt)
+{
+ PutBitContext p;
+ uint8_t buffer[200]; /* 100 * 9 / 8 = 113 */
+ int i, left, w, v;
+ const uint8_t *ptr;
+ /* image block */
+
+ put_byte(pb, 0x2c);
+ put_le16(pb, x1);
+ put_le16(pb, y1);
+ put_le16(pb, width);
+ put_le16(pb, height);
+ put_byte(pb, 0x00); /* flags */
+ /* no local clut */
+
+ put_byte(pb, 0x08);
+
+ left= width * height;
+
+ init_put_bits(&p, buffer, 130);
+
+/*
+ * the thing here is the bitstream is written as little packets, with a size byte before
+ * but it's still the same bitstream between packets (no flush !)
+ */
+ ptr = buf;
+ w = width;
+ while(left>0) {
+
+ gif_put_bits_rev(&p, 9, 0x0100); /* clear code */
+
+ for(i=(left<GIF_CHUNKS)?left:GIF_CHUNKS;i;i--) {
+ if (pix_fmt == PIX_FMT_RGB24) {
+ v = gif_clut_index(ptr[0], ptr[1], ptr[2]);
+ ptr+=3;
+ } else {
+ v = *ptr++;
+ }
+ gif_put_bits_rev(&p, 9, v);
+ if (--w == 0) {
+ w = width;
+ buf += linesize;
+ ptr = buf;
+ }
+ }
+
+ if(left<=GIF_CHUNKS) {
+ gif_put_bits_rev(&p, 9, 0x101); /* end of stream */
+ gif_flush_put_bits_rev(&p);
+ }
+ if(pbBufPtr(&p) - p.buf > 0) {
+ put_byte(pb, pbBufPtr(&p) - p.buf); /* byte count of the packet */
+ put_buffer(pb, p.buf, pbBufPtr(&p) - p.buf); /* the actual buffer */
+ p.buf_ptr = p.buf; /* dequeue the bytes off the bitstream */
+ }
+ left-=GIF_CHUNKS;
+ }
+ put_byte(pb, 0x00); /* end of image block */
+
+ return 0;
+}
+
+typedef struct {
+ int64_t time, file_time;
+ uint8_t buffer[100]; /* data chunks */
+} GIFContext;
+
+static int gif_write_header(AVFormatContext *s)
+{
+ GIFContext *gif = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc, *video_enc;
+ int i, width, height, loop_count /*, rate*/;
+
+/* XXX: do we reject audio streams or just ignore them ?
+ if(s->nb_streams > 1)
+ return -1;
+*/
+ gif->time = 0;
+ gif->file_time = 0;
+
+ video_enc = NULL;
+ for(i=0;i<s->nb_streams;i++) {
+ enc = s->streams[i]->codec;
+ if (enc->codec_type != CODEC_TYPE_AUDIO)
+ video_enc = enc;
+ }
+
+ if (!video_enc) {
+ av_free(gif);
+ return -1;
+ } else {
+ width = video_enc->width;
+ height = video_enc->height;
+ loop_count = s->loop_output;
+// rate = video_enc->time_base.den;
+ }
+
+ if (video_enc->pix_fmt != PIX_FMT_RGB24) {
+ av_log(s, AV_LOG_ERROR, "ERROR: gif only handles the rgb24 pixel format. Use -pix_fmt rgb24.\n");
+ return AVERROR_IO;
+ }
+
+ gif_image_write_header(pb, width, height, loop_count, NULL);
+
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int gif_write_video(AVFormatContext *s,
+ AVCodecContext *enc, const uint8_t *buf, int size)
+{
+ ByteIOContext *pb = &s->pb;
+ GIFContext *gif = s->priv_data;
+ int jiffies;
+ int64_t delay;
+
+ /* graphic control extension block */
+ put_byte(pb, 0x21);
+ put_byte(pb, 0xf9);
+ put_byte(pb, 0x04); /* block size */
+ put_byte(pb, 0x04); /* flags */
+
+ /* 1 jiffy is 1/70 s */
+ /* the delay_time field indicates the number of jiffies - 1 */
+ delay = gif->file_time - gif->time;
+
+ /* XXX: should use delay, in order to be more accurate */
+ /* instead of using the same rounded value each time */
+ /* XXX: don't even remember if I really use it for now */
+ jiffies = (70*enc->time_base.num/enc->time_base.den) - 1;
+
+ put_le16(pb, jiffies);
+
+ put_byte(pb, 0x1f); /* transparent color index */
+ put_byte(pb, 0x00);
+
+ gif_image_write_image(pb, 0, 0, enc->width, enc->height,
+ buf, enc->width * 3, PIX_FMT_RGB24);
+
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int gif_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVCodecContext *codec = s->streams[pkt->stream_index]->codec;
+ if (codec->codec_type == CODEC_TYPE_AUDIO)
+ return 0; /* just ignore audio */
+ else
+ return gif_write_video(s, codec, pkt->data, pkt->size);
+}
+
+static int gif_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+
+ put_byte(pb, 0x3b);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+AVOutputFormat gif_muxer = {
+ "gif",
+ "GIF Animation",
+ "image/gif",
+ "gif",
+ sizeof(GIFContext),
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ gif_write_header,
+ gif_write_packet,
+ gif_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/gifdec.c b/contrib/ffmpeg/libavformat/gifdec.c
new file mode 100644
index 000000000..692ca6466
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gifdec.c
@@ -0,0 +1,593 @@
+/*
+ * GIF demuxer
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+//#define DEBUG
+
+#define MAXBITS 12
+#define SIZTABLE (1<<MAXBITS)
+
+#define GCE_DISPOSAL_NONE 0
+#define GCE_DISPOSAL_INPLACE 1
+#define GCE_DISPOSAL_BACKGROUND 2
+#define GCE_DISPOSAL_RESTORE 3
+
+typedef struct GifState {
+ int screen_width;
+ int screen_height;
+ int bits_per_pixel;
+ int background_color_index;
+ int transparent_color_index;
+ int color_resolution;
+ uint8_t *image_buf;
+ int image_linesize;
+ uint32_t *image_palette;
+ int pix_fmt;
+
+ /* after the frame is displayed, the disposal method is used */
+ int gce_disposal;
+ /* delay during which the frame is shown */
+ int gce_delay;
+
+ /* LZW compatible decoder */
+ ByteIOContext *f;
+ int eob_reached;
+ uint8_t *pbuf, *ebuf;
+ int bbits;
+ unsigned int bbuf;
+
+ int cursize; /* The current code size */
+ int curmask;
+ int codesize;
+ int clear_code;
+ int end_code;
+ int newcodes; /* First available code */
+ int top_slot; /* Highest code for current size */
+ int slot; /* Last read code */
+ int fc, oc;
+ uint8_t *sp;
+ uint8_t stack[SIZTABLE];
+ uint8_t suffix[SIZTABLE];
+ uint16_t prefix[SIZTABLE];
+
+ /* aux buffers */
+ uint8_t global_palette[256 * 3];
+ uint8_t local_palette[256 * 3];
+ uint8_t buf[256];
+} GifState;
+
+
+static const uint8_t gif87a_sig[6] = "GIF87a";
+static const uint8_t gif89a_sig[6] = "GIF89a";
+
+static const uint16_t mask[17] =
+{
+ 0x0000, 0x0001, 0x0003, 0x0007,
+ 0x000F, 0x001F, 0x003F, 0x007F,
+ 0x00FF, 0x01FF, 0x03FF, 0x07FF,
+ 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF
+};
+
+/* Probe gif video format or gif image format. The current heuristic
+ supposes the gif87a is always a single image. For gif89a, we
+ consider it as a video only if a GCE extension is present in the
+ first kilobyte. */
+static int gif_video_probe(AVProbeData * pd)
+{
+ const uint8_t *p, *p_end;
+ int bits_per_pixel, has_global_palette, ext_code, ext_len;
+ int gce_flags, gce_disposal;
+
+ if (pd->buf_size < 24 ||
+ memcmp(pd->buf, gif89a_sig, 6) != 0)
+ return 0;
+ p_end = pd->buf + pd->buf_size;
+ p = pd->buf + 6;
+ bits_per_pixel = (p[4] & 0x07) + 1;
+ has_global_palette = (p[4] & 0x80);
+ p += 7;
+ if (has_global_palette)
+ p += (1 << bits_per_pixel) * 3;
+ for(;;) {
+ if (p >= p_end)
+ return 0;
+ if (*p != '!')
+ break;
+ p++;
+ if (p >= p_end)
+ return 0;
+ ext_code = *p++;
+ if (p >= p_end)
+ return 0;
+ ext_len = *p++;
+ if (ext_code == 0xf9) {
+ if (p >= p_end)
+ return 0;
+ /* if GCE extension found with gce_disposal != 0: it is
+ likely to be an animation */
+ gce_flags = *p++;
+ gce_disposal = (gce_flags >> 2) & 0x7;
+ if (gce_disposal != 0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+ }
+ for(;;) {
+ if (ext_len == 0)
+ break;
+ p += ext_len;
+ if (p >= p_end)
+ return 0;
+ ext_len = *p++;
+ }
+ }
+ return 0;
+}
+
+static void GLZWDecodeInit(GifState * s, int csize)
+{
+ /* read buffer */
+ s->eob_reached = 0;
+ s->pbuf = s->buf;
+ s->ebuf = s->buf;
+ s->bbuf = 0;
+ s->bbits = 0;
+
+ /* decoder */
+ s->codesize = csize;
+ s->cursize = s->codesize + 1;
+ s->curmask = mask[s->cursize];
+ s->top_slot = 1 << s->cursize;
+ s->clear_code = 1 << s->codesize;
+ s->end_code = s->clear_code + 1;
+ s->slot = s->newcodes = s->clear_code + 2;
+ s->oc = s->fc = 0;
+ s->sp = s->stack;
+}
+
+/* XXX: optimize */
+static inline int GetCode(GifState * s)
+{
+ int c, sizbuf;
+ uint8_t *ptr;
+
+ while (s->bbits < s->cursize) {
+ ptr = s->pbuf;
+ if (ptr >= s->ebuf) {
+ if (!s->eob_reached) {
+ sizbuf = get_byte(s->f);
+ s->ebuf = s->buf + sizbuf;
+ s->pbuf = s->buf;
+ if (sizbuf > 0) {
+ get_buffer(s->f, s->buf, sizbuf);
+ } else {
+ s->eob_reached = 1;
+ }
+ }
+ ptr = s->pbuf;
+ }
+ s->bbuf |= ptr[0] << s->bbits;
+ ptr++;
+ s->pbuf = ptr;
+ s->bbits += 8;
+ }
+ c = s->bbuf & s->curmask;
+ s->bbuf >>= s->cursize;
+ s->bbits -= s->cursize;
+ return c;
+}
+
+/* NOTE: the algorithm here is inspired from the LZW GIF decoder
+ written by Steven A. Bennett in 1987. */
+/* return the number of byte decoded */
+static int GLZWDecode(GifState * s, uint8_t * buf, int len)
+{
+ int l, c, code, oc, fc;
+ uint8_t *sp;
+
+ if (s->end_code < 0)
+ return 0;
+
+ l = len;
+ sp = s->sp;
+ oc = s->oc;
+ fc = s->fc;
+
+ while (sp > s->stack) {
+ *buf++ = *(--sp);
+ if ((--l) == 0)
+ goto the_end;
+ }
+
+ for (;;) {
+ c = GetCode(s);
+ if (c == s->end_code) {
+ s->end_code = -1;
+ break;
+ } else if (c == s->clear_code) {
+ s->cursize = s->codesize + 1;
+ s->curmask = mask[s->cursize];
+ s->slot = s->newcodes;
+ s->top_slot = 1 << s->cursize;
+ while ((c = GetCode(s)) == s->clear_code);
+ if (c == s->end_code) {
+ s->end_code = -1;
+ break;
+ }
+ /* test error */
+ if (c >= s->slot)
+ c = 0;
+ fc = oc = c;
+ *buf++ = c;
+ if ((--l) == 0)
+ break;
+ } else {
+ code = c;
+ if (code >= s->slot) {
+ *sp++ = fc;
+ code = oc;
+ }
+ while (code >= s->newcodes) {
+ *sp++ = s->suffix[code];
+ code = s->prefix[code];
+ }
+ *sp++ = code;
+ if (s->slot < s->top_slot) {
+ s->suffix[s->slot] = fc = code;
+ s->prefix[s->slot++] = oc;
+ oc = c;
+ }
+ if (s->slot >= s->top_slot) {
+ if (s->cursize < MAXBITS) {
+ s->top_slot <<= 1;
+ s->curmask = mask[++s->cursize];
+ }
+ }
+ while (sp > s->stack) {
+ *buf++ = *(--sp);
+ if ((--l) == 0)
+ goto the_end;
+ }
+ }
+ }
+ the_end:
+ s->sp = sp;
+ s->oc = oc;
+ s->fc = fc;
+ return len - l;
+}
+
+static int gif_read_image(GifState *s)
+{
+ ByteIOContext *f = s->f;
+ int left, top, width, height, bits_per_pixel, code_size, flags;
+ int is_interleaved, has_local_palette, y, x, pass, y1, linesize, n, i;
+ uint8_t *ptr, *line, *d, *spal, *palette, *sptr, *ptr1;
+
+ left = get_le16(f);
+ top = get_le16(f);
+ width = get_le16(f);
+ height = get_le16(f);
+ flags = get_byte(f);
+ is_interleaved = flags & 0x40;
+ has_local_palette = flags & 0x80;
+ bits_per_pixel = (flags & 0x07) + 1;
+#ifdef DEBUG
+ printf("gif: image x=%d y=%d w=%d h=%d\n", left, top, width, height);
+#endif
+
+ if (has_local_palette) {
+ get_buffer(f, s->local_palette, 3 * (1 << bits_per_pixel));
+ palette = s->local_palette;
+ } else {
+ palette = s->global_palette;
+ bits_per_pixel = s->bits_per_pixel;
+ }
+
+ /* verify that all the image is inside the screen dimensions */
+ if (left + width > s->screen_width ||
+ top + height > s->screen_height)
+ return -EINVAL;
+
+ /* build the palette */
+ if (s->pix_fmt == PIX_FMT_RGB24) {
+ line = av_malloc(width);
+ if (!line)
+ return -ENOMEM;
+ } else {
+ n = (1 << bits_per_pixel);
+ spal = palette;
+ for(i = 0; i < n; i++) {
+ s->image_palette[i] = (0xff << 24) |
+ (spal[0] << 16) | (spal[1] << 8) | (spal[2]);
+ spal += 3;
+ }
+ for(; i < 256; i++)
+ s->image_palette[i] = (0xff << 24);
+ /* handle transparency */
+ if (s->transparent_color_index >= 0)
+ s->image_palette[s->transparent_color_index] = 0;
+ line = NULL;
+ }
+
+ /* now get the image data */
+ s->f = f;
+ code_size = get_byte(f);
+ GLZWDecodeInit(s, code_size);
+
+ /* read all the image */
+ linesize = s->image_linesize;
+ ptr1 = s->image_buf + top * linesize + (left * 3);
+ ptr = ptr1;
+ pass = 0;
+ y1 = 0;
+ for (y = 0; y < height; y++) {
+ if (s->pix_fmt == PIX_FMT_RGB24) {
+ /* transcode to RGB24 */
+ GLZWDecode(s, line, width);
+ d = ptr;
+ sptr = line;
+ for(x = 0; x < width; x++) {
+ spal = palette + sptr[0] * 3;
+ d[0] = spal[0];
+ d[1] = spal[1];
+ d[2] = spal[2];
+ d += 3;
+ sptr++;
+ }
+ } else {
+ GLZWDecode(s, ptr, width);
+ }
+ if (is_interleaved) {
+ switch(pass) {
+ default:
+ case 0:
+ case 1:
+ y1 += 8;
+ ptr += linesize * 8;
+ if (y1 >= height) {
+ y1 = 4;
+ if (pass == 0)
+ ptr = ptr1 + linesize * 4;
+ else
+ ptr = ptr1 + linesize * 2;
+ pass++;
+ }
+ break;
+ case 2:
+ y1 += 4;
+ ptr += linesize * 4;
+ if (y1 >= height) {
+ y1 = 1;
+ ptr = ptr1 + linesize;
+ pass++;
+ }
+ break;
+ case 3:
+ y1 += 2;
+ ptr += linesize * 2;
+ break;
+ }
+ } else {
+ ptr += linesize;
+ }
+ }
+ av_free(line);
+
+ /* read the garbage data until end marker is found */
+ while (!s->eob_reached)
+ GetCode(s);
+ return 0;
+}
+
+static int gif_read_extension(GifState *s)
+{
+ ByteIOContext *f = s->f;
+ int ext_code, ext_len, i, gce_flags, gce_transparent_index;
+
+ /* extension */
+ ext_code = get_byte(f);
+ ext_len = get_byte(f);
+#ifdef DEBUG
+ printf("gif: ext_code=0x%x len=%d\n", ext_code, ext_len);
+#endif
+ switch(ext_code) {
+ case 0xf9:
+ if (ext_len != 4)
+ goto discard_ext;
+ s->transparent_color_index = -1;
+ gce_flags = get_byte(f);
+ s->gce_delay = get_le16(f);
+ gce_transparent_index = get_byte(f);
+ if (gce_flags & 0x01)
+ s->transparent_color_index = gce_transparent_index;
+ else
+ s->transparent_color_index = -1;
+ s->gce_disposal = (gce_flags >> 2) & 0x7;
+#ifdef DEBUG
+ printf("gif: gce_flags=%x delay=%d tcolor=%d disposal=%d\n",
+ gce_flags, s->gce_delay,
+ s->transparent_color_index, s->gce_disposal);
+#endif
+ ext_len = get_byte(f);
+ break;
+ }
+
+ /* NOTE: many extension blocks can come after */
+ discard_ext:
+ while (ext_len != 0) {
+ for (i = 0; i < ext_len; i++)
+ get_byte(f);
+ ext_len = get_byte(f);
+#ifdef DEBUG
+ printf("gif: ext_len1=%d\n", ext_len);
+#endif
+ }
+ return 0;
+}
+
+static int gif_read_header1(GifState *s)
+{
+ ByteIOContext *f = s->f;
+ uint8_t sig[6];
+ int ret, v, n;
+ int has_global_palette;
+
+ /* read gif signature */
+ ret = get_buffer(f, sig, 6);
+ if (ret != 6)
+ return -1;
+ if (memcmp(sig, gif87a_sig, 6) != 0 &&
+ memcmp(sig, gif89a_sig, 6) != 0)
+ return -1;
+
+ /* read screen header */
+ s->transparent_color_index = -1;
+ s->screen_width = get_le16(f);
+ s->screen_height = get_le16(f);
+ if( (unsigned)s->screen_width > 32767
+ || (unsigned)s->screen_height > 32767){
+ av_log(NULL, AV_LOG_ERROR, "picture size too large\n");
+ return -1;
+ }
+
+ v = get_byte(f);
+ s->color_resolution = ((v & 0x70) >> 4) + 1;
+ has_global_palette = (v & 0x80);
+ s->bits_per_pixel = (v & 0x07) + 1;
+ s->background_color_index = get_byte(f);
+ get_byte(f); /* ignored */
+#ifdef DEBUG
+ printf("gif: screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
+ s->screen_width, s->screen_height, s->bits_per_pixel,
+ has_global_palette);
+#endif
+ if (has_global_palette) {
+ n = 1 << s->bits_per_pixel;
+ get_buffer(f, s->global_palette, n * 3);
+ }
+ return 0;
+}
+
+static int gif_parse_next_image(GifState *s)
+{
+ ByteIOContext *f = s->f;
+ int ret, code;
+
+ for (;;) {
+ code = url_fgetc(f);
+#ifdef DEBUG
+ printf("gif: code=%02x '%c'\n", code, code);
+#endif
+ switch (code) {
+ case ',':
+ if (gif_read_image(s) < 0)
+ return AVERROR_IO;
+ ret = 0;
+ goto the_end;
+ case ';':
+ /* end of image */
+ ret = AVERROR_IO;
+ goto the_end;
+ case '!':
+ if (gif_read_extension(s) < 0)
+ return AVERROR_IO;
+ break;
+ case EOF:
+ default:
+ /* error or errneous EOF */
+ ret = AVERROR_IO;
+ goto the_end;
+ }
+ }
+ the_end:
+ return ret;
+}
+
+static int gif_read_header(AVFormatContext * s1,
+ AVFormatParameters * ap)
+{
+ GifState *s = s1->priv_data;
+ ByteIOContext *f = &s1->pb;
+ AVStream *st;
+
+ s->f = f;
+ if (gif_read_header1(s) < 0)
+ return -1;
+
+ /* allocate image buffer */
+ s->image_linesize = s->screen_width * 3;
+ s->image_buf = av_malloc(s->screen_height * s->image_linesize);
+ if (!s->image_buf)
+ return -ENOMEM;
+ s->pix_fmt = PIX_FMT_RGB24;
+ /* now we are ready: build format streams */
+ st = av_new_stream(s1, 0);
+ if (!st)
+ return -1;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->time_base.den = 5;
+ st->codec->time_base.num = 1;
+ /* XXX: check if screen size is always valid */
+ st->codec->width = s->screen_width;
+ st->codec->height = s->screen_height;
+ st->codec->pix_fmt = PIX_FMT_RGB24;
+ return 0;
+}
+
+static int gif_read_packet(AVFormatContext * s1,
+ AVPacket * pkt)
+{
+ GifState *s = s1->priv_data;
+ int ret;
+
+ ret = gif_parse_next_image(s);
+ if (ret < 0)
+ return ret;
+
+ /* XXX: avoid copying */
+ if (av_new_packet(pkt, s->screen_width * s->screen_height * 3)) {
+ return AVERROR_IO;
+ }
+ pkt->stream_index = 0;
+ memcpy(pkt->data, s->image_buf, s->screen_width * s->screen_height * 3);
+ return 0;
+}
+
+static int gif_read_close(AVFormatContext *s1)
+{
+ GifState *s = s1->priv_data;
+ av_free(s->image_buf);
+ return 0;
+}
+
+AVInputFormat gif_demuxer =
+{
+ "gif",
+ "gif format",
+ sizeof(GifState),
+ gif_video_probe,
+ gif_read_header,
+ gif_read_packet,
+ gif_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/grab.c b/contrib/ffmpeg/libavformat/grab.c
new file mode 100644
index 000000000..4e85772e5
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/grab.c
@@ -0,0 +1,860 @@
+/*
+ * Linux video grab interface
+ * Copyright (c) 2000,2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#define _LINUX_TIME_H 1
+#include <linux/videodev.h>
+#include <time.h>
+
+typedef struct {
+ int fd;
+ int frame_format; /* see VIDEO_PALETTE_xxx */
+ int use_mmap;
+ int width, height;
+ int frame_rate;
+ int frame_rate_base;
+ int64_t time_frame;
+ int frame_size;
+ struct video_capability video_cap;
+ struct video_audio audio_saved;
+ uint8_t *video_buf;
+ struct video_mbuf gb_buffers;
+ struct video_mmap gb_buf;
+ int gb_frame;
+
+ /* ATI All In Wonder specific stuff */
+ /* XXX: remove and merge in libavcodec/imgconvert.c */
+ int aiw_enabled;
+ int deint;
+ int halfw;
+ uint8_t *src_mem;
+ uint8_t *lum_m4_mem;
+} VideoData;
+
+static int aiw_init(VideoData *s);
+static int aiw_read_picture(VideoData *s, uint8_t *data);
+static int aiw_close(VideoData *s);
+
+static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ AVStream *st;
+ int width, height;
+ int video_fd, frame_size;
+ int ret, frame_rate, frame_rate_base;
+ int desired_palette, desired_depth;
+ struct video_tuner tuner;
+ struct video_audio audio;
+ struct video_picture pict;
+ const char *video_device;
+ int j;
+
+ if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
+ av_log(s1, AV_LOG_ERROR, "Bad capture size (%dx%d) or wrong time base (%d)\n",
+ ap->width, ap->height, ap->time_base.den);
+
+ return -1;
+ }
+
+ width = ap->width;
+ height = ap->height;
+ frame_rate = ap->time_base.den;
+ frame_rate_base = ap->time_base.num;
+
+ if((unsigned)width > 32767 || (unsigned)height > 32767) {
+ av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
+ width, height);
+
+ return -1;
+ }
+
+ st = av_new_stream(s1, 0);
+ if (!st)
+ return -ENOMEM;
+ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ s->width = width;
+ s->height = height;
+ s->frame_rate = frame_rate;
+ s->frame_rate_base = frame_rate_base;
+
+ video_device = ap->device;
+ if (!video_device)
+ video_device = "/dev/video";
+ video_fd = open(video_device, O_RDWR);
+ if (video_fd < 0) {
+ perror(video_device);
+ goto fail;
+ }
+
+ if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
+ perror("VIDIOCGCAP");
+ goto fail;
+ }
+
+ if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
+ av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
+ goto fail;
+ }
+
+ desired_palette = -1;
+ desired_depth = -1;
+ if (ap->pix_fmt == PIX_FMT_YUV420P) {
+ desired_palette = VIDEO_PALETTE_YUV420P;
+ desired_depth = 12;
+ } else if (ap->pix_fmt == PIX_FMT_YUV422) {
+ desired_palette = VIDEO_PALETTE_YUV422;
+ desired_depth = 16;
+ } else if (ap->pix_fmt == PIX_FMT_BGR24) {
+ desired_palette = VIDEO_PALETTE_RGB24;
+ desired_depth = 24;
+ }
+
+ /* set tv standard */
+ if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
+ if (!strcasecmp(ap->standard, "pal"))
+ tuner.mode = VIDEO_MODE_PAL;
+ else if (!strcasecmp(ap->standard, "secam"))
+ tuner.mode = VIDEO_MODE_SECAM;
+ else
+ tuner.mode = VIDEO_MODE_NTSC;
+ ioctl(video_fd, VIDIOCSTUNER, &tuner);
+ }
+
+ /* unmute audio */
+ audio.audio = 0;
+ ioctl(video_fd, VIDIOCGAUDIO, &audio);
+ memcpy(&s->audio_saved, &audio, sizeof(audio));
+ audio.flags &= ~VIDEO_AUDIO_MUTE;
+ ioctl(video_fd, VIDIOCSAUDIO, &audio);
+
+ ioctl(video_fd, VIDIOCGPICT, &pict);
+#if 0
+ printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
+ pict.colour,
+ pict.hue,
+ pict.brightness,
+ pict.contrast,
+ pict.whiteness);
+#endif
+ /* try to choose a suitable video format */
+ pict.palette = desired_palette;
+ pict.depth= desired_depth;
+ if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
+ pict.palette=VIDEO_PALETTE_YUV420P;
+ pict.depth=12;
+ ret = ioctl(video_fd, VIDIOCSPICT, &pict);
+ if (ret < 0) {
+ pict.palette=VIDEO_PALETTE_YUV422;
+ pict.depth=16;
+ ret = ioctl(video_fd, VIDIOCSPICT, &pict);
+ if (ret < 0) {
+ pict.palette=VIDEO_PALETTE_RGB24;
+ pict.depth=24;
+ ret = ioctl(video_fd, VIDIOCSPICT, &pict);
+ if (ret < 0)
+ pict.palette=VIDEO_PALETTE_GREY;
+ pict.depth=8;
+ ret = ioctl(video_fd, VIDIOCSPICT, &pict);
+ if (ret < 0)
+ goto fail1;
+ }
+ }
+ }
+
+ ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
+ if (ret < 0) {
+ /* try to use read based access */
+ struct video_window win;
+ int val;
+
+ win.x = 0;
+ win.y = 0;
+ win.width = width;
+ win.height = height;
+ win.chromakey = -1;
+ win.flags = 0;
+
+ ioctl(video_fd, VIDIOCSWIN, &win);
+
+ s->frame_format = pict.palette;
+
+ val = 1;
+ ioctl(video_fd, VIDIOCCAPTURE, &val);
+
+ s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
+ s->use_mmap = 0;
+
+ /* ATI All In Wonder automatic activation */
+ if (!strcmp(s->video_cap.name, "Km")) {
+ if (aiw_init(s) < 0)
+ goto fail;
+ s->aiw_enabled = 1;
+ /* force 420P format because convertion from YUV422 to YUV420P
+ is done in this driver (ugly) */
+ s->frame_format = VIDEO_PALETTE_YUV420P;
+ }
+ } else {
+ s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
+ if ((unsigned char*)-1 == s->video_buf) {
+ perror("mmap");
+ goto fail;
+ }
+ s->gb_frame = 0;
+ s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
+
+ /* start to grab the first frame */
+ s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
+ s->gb_buf.height = height;
+ s->gb_buf.width = width;
+ s->gb_buf.format = pict.palette;
+
+ ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
+ if (ret < 0) {
+ if (errno != EAGAIN) {
+ fail1:
+ av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
+ } else {
+ av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
+ }
+ goto fail;
+ }
+ for (j = 1; j < s->gb_buffers.frames; j++) {
+ s->gb_buf.frame = j;
+ ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
+ }
+ s->frame_format = s->gb_buf.format;
+ s->use_mmap = 1;
+ }
+
+ switch(s->frame_format) {
+ case VIDEO_PALETTE_YUV420P:
+ frame_size = (width * height * 3) / 2;
+ st->codec->pix_fmt = PIX_FMT_YUV420P;
+ break;
+ case VIDEO_PALETTE_YUV422:
+ frame_size = width * height * 2;
+ st->codec->pix_fmt = PIX_FMT_YUV422;
+ break;
+ case VIDEO_PALETTE_RGB24:
+ frame_size = width * height * 3;
+ st->codec->pix_fmt = PIX_FMT_BGR24; /* NOTE: v4l uses BGR24, not RGB24 ! */
+ break;
+ case VIDEO_PALETTE_GREY:
+ frame_size = width * height * 1;
+ st->codec->pix_fmt = PIX_FMT_GRAY8;
+ break;
+ default:
+ goto fail;
+ }
+ s->fd = video_fd;
+ s->frame_size = frame_size;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = width;
+ st->codec->height = height;
+ st->codec->time_base.den = frame_rate;
+ st->codec->time_base.num = frame_rate_base;
+ st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;
+
+ return 0;
+ fail:
+ if (video_fd >= 0)
+ close(video_fd);
+ av_free(st);
+ return AVERROR_IO;
+}
+
+static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
+{
+ uint8_t *ptr;
+
+ while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
+ (errno == EAGAIN || errno == EINTR));
+
+ ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
+ memcpy(buf, ptr, s->frame_size);
+
+ /* Setup to capture the next frame */
+ s->gb_buf.frame = s->gb_frame;
+ if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
+ if (errno == EAGAIN)
+ av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
+ else
+ perror("VIDIOCMCAPTURE");
+ return AVERROR_IO;
+ }
+
+ /* This is now the grabbing frame */
+ s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
+
+ return s->frame_size;
+}
+
+static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+ int64_t curtime, delay;
+ struct timespec ts;
+
+ /* Calculate the time of the next frame */
+ s->time_frame += int64_t_C(1000000);
+
+ /* wait based on the frame rate */
+ for(;;) {
+ curtime = av_gettime();
+ delay = s->time_frame * s->frame_rate_base / s->frame_rate - curtime;
+ if (delay <= 0) {
+ if (delay < int64_t_C(-1000000) * s->frame_rate_base / s->frame_rate) {
+ /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
+ s->time_frame += int64_t_C(1000000);
+ }
+ break;
+ }
+ ts.tv_sec = delay / 1000000;
+ ts.tv_nsec = (delay % 1000000) * 1000;
+ nanosleep(&ts, NULL);
+ }
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return AVERROR_IO;
+
+ pkt->pts = curtime;
+
+ /* read one frame */
+ if (s->aiw_enabled) {
+ return aiw_read_picture(s, pkt->data);
+ } else if (s->use_mmap) {
+ return v4l_mm_read_picture(s, pkt->data);
+ } else {
+ if (read(s->fd, pkt->data, pkt->size) != pkt->size)
+ return AVERROR_IO;
+ return s->frame_size;
+ }
+}
+
+static int grab_read_close(AVFormatContext *s1)
+{
+ VideoData *s = s1->priv_data;
+
+ if (s->aiw_enabled)
+ aiw_close(s);
+
+ if (s->use_mmap)
+ munmap(s->video_buf, s->gb_buffers.size);
+
+ /* mute audio. we must force it because the BTTV driver does not
+ return its state correctly */
+ s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
+ ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
+
+ close(s->fd);
+ return 0;
+}
+
+AVInputFormat video_grab_device_demuxer = {
+ "video4linux",
+ "video grab",
+ sizeof(VideoData),
+ NULL,
+ grab_read_header,
+ grab_read_packet,
+ grab_read_close,
+ .flags = AVFMT_NOFILE,
+};
+
+/* All in Wonder specific stuff */
+/* XXX: remove and merge in libavcodec/imgconvert.c */
+
+static int aiw_init(VideoData *s)
+{
+ int width, height;
+
+ width = s->width;
+ height = s->height;
+
+ if ((width == s->video_cap.maxwidth && height == s->video_cap.maxheight) ||
+ (width == s->video_cap.maxwidth && height == s->video_cap.maxheight*2) ||
+ (width == s->video_cap.maxwidth/2 && height == s->video_cap.maxheight)) {
+
+ s->deint=0;
+ s->halfw=0;
+ if (height == s->video_cap.maxheight*2) s->deint=1;
+ if (width == s->video_cap.maxwidth/2) s->halfw=1;
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "\nIncorrect Grab Size Supplied - Supported Sizes Are:\n");
+ av_log(NULL, AV_LOG_ERROR, " %dx%d %dx%d %dx%d\n\n",
+ s->video_cap.maxwidth,s->video_cap.maxheight,
+ s->video_cap.maxwidth,s->video_cap.maxheight*2,
+ s->video_cap.maxwidth/2,s->video_cap.maxheight);
+ goto fail;
+ }
+
+ if (s->halfw == 0) {
+ s->src_mem = av_malloc(s->width*2);
+ } else {
+ s->src_mem = av_malloc(s->width*4);
+ }
+ if (!s->src_mem) goto fail;
+
+ s->lum_m4_mem = av_malloc(s->width);
+ if (!s->lum_m4_mem)
+ goto fail;
+ return 0;
+ fail:
+ av_freep(&s->src_mem);
+ av_freep(&s->lum_m4_mem);
+ return -1;
+}
+
+#ifdef HAVE_MMX
+#include "libavcodec/i386/mmx.h"
+
+#define LINE_WITH_UV \
+ movq_m2r(ptr[0],mm0); \
+ movq_m2r(ptr[8],mm1); \
+ movq_r2r(mm0, mm4); \
+ punpcklbw_r2r(mm1,mm0); \
+ punpckhbw_r2r(mm1,mm4); \
+ movq_r2r(mm0,mm5); \
+ punpcklbw_r2r(mm4,mm0); \
+ punpckhbw_r2r(mm4,mm5); \
+ movq_r2r(mm0,mm1); \
+ punpcklbw_r2r(mm5,mm1); \
+ movq_r2m(mm1,lum[0]); \
+ movq_m2r(ptr[16],mm2); \
+ movq_m2r(ptr[24],mm1); \
+ movq_r2r(mm2,mm4); \
+ punpcklbw_r2r(mm1,mm2); \
+ punpckhbw_r2r(mm1,mm4); \
+ movq_r2r(mm2,mm3); \
+ punpcklbw_r2r(mm4,mm2); \
+ punpckhbw_r2r(mm4,mm3); \
+ movq_r2r(mm2,mm1); \
+ punpcklbw_r2r(mm3,mm1); \
+ movq_r2m(mm1,lum[8]); \
+ punpckhdq_r2r(mm2,mm0); \
+ punpckhdq_r2r(mm3,mm5); \
+ movq_r2m(mm0,cb[0]); \
+ movq_r2m(mm5,cr[0]);
+
+#define LINE_NO_UV \
+ movq_m2r(ptr[0],mm0);\
+ movq_m2r(ptr[8],mm1);\
+ movq_r2r(mm0, mm4);\
+ punpcklbw_r2r(mm1,mm0); \
+ punpckhbw_r2r(mm1,mm4);\
+ movq_r2r(mm0,mm5);\
+ punpcklbw_r2r(mm4,mm0);\
+ punpckhbw_r2r(mm4,mm5);\
+ movq_r2r(mm0,mm1);\
+ punpcklbw_r2r(mm5,mm1);\
+ movq_r2m(mm1,lum[0]);\
+ movq_m2r(ptr[16],mm2);\
+ movq_m2r(ptr[24],mm1);\
+ movq_r2r(mm2,mm4);\
+ punpcklbw_r2r(mm1,mm2);\
+ punpckhbw_r2r(mm1,mm4);\
+ movq_r2r(mm2,mm3);\
+ punpcklbw_r2r(mm4,mm2);\
+ punpckhbw_r2r(mm4,mm3);\
+ movq_r2r(mm2,mm1);\
+ punpcklbw_r2r(mm3,mm1);\
+ movq_r2m(mm1,lum[8]);
+
+#define LINE_WITHUV_AVG \
+ movq_m2r(ptr[0], mm0);\
+ movq_m2r(ptr[8], mm1);\
+ movq_r2r(mm0, mm4);\
+ punpcklbw_r2r(mm1,mm0);\
+ punpckhbw_r2r(mm1,mm4);\
+ movq_r2r(mm0,mm5);\
+ punpcklbw_r2r(mm4,mm0);\
+ punpckhbw_r2r(mm4,mm5);\
+ movq_r2r(mm0,mm1);\
+ movq_r2r(mm5,mm2);\
+ punpcklbw_r2r(mm7,mm1);\
+ punpcklbw_r2r(mm7,mm2);\
+ paddw_r2r(mm6,mm1);\
+ paddw_r2r(mm2,mm1);\
+ psraw_i2r(1,mm1);\
+ packuswb_r2r(mm7,mm1);\
+ movd_r2m(mm1,lum[0]);\
+ movq_m2r(ptr[16],mm2);\
+ movq_m2r(ptr[24],mm1);\
+ movq_r2r(mm2,mm4);\
+ punpcklbw_r2r(mm1,mm2);\
+ punpckhbw_r2r(mm1,mm4);\
+ movq_r2r(mm2,mm3);\
+ punpcklbw_r2r(mm4,mm2);\
+ punpckhbw_r2r(mm4,mm3);\
+ movq_r2r(mm2,mm1);\
+ movq_r2r(mm3,mm4);\
+ punpcklbw_r2r(mm7,mm1);\
+ punpcklbw_r2r(mm7,mm4);\
+ paddw_r2r(mm6,mm1);\
+ paddw_r2r(mm4,mm1);\
+ psraw_i2r(1,mm1);\
+ packuswb_r2r(mm7,mm1);\
+ movd_r2m(mm1,lum[4]);\
+ punpckhbw_r2r(mm7,mm0);\
+ punpckhbw_r2r(mm7,mm2);\
+ paddw_r2r(mm6,mm0);\
+ paddw_r2r(mm2,mm0);\
+ psraw_i2r(1,mm0);\
+ packuswb_r2r(mm7,mm0);\
+ punpckhbw_r2r(mm7,mm5);\
+ punpckhbw_r2r(mm7,mm3);\
+ paddw_r2r(mm6,mm5);\
+ paddw_r2r(mm3,mm5);\
+ psraw_i2r(1,mm5);\
+ packuswb_r2r(mm7,mm5);\
+ movd_r2m(mm0,cb[0]);\
+ movd_r2m(mm5,cr[0]);
+
+#define LINE_NOUV_AVG \
+ movq_m2r(ptr[0],mm0);\
+ movq_m2r(ptr[8],mm1);\
+ pand_r2r(mm5,mm0);\
+ pand_r2r(mm5,mm1);\
+ pmaddwd_r2r(mm6,mm0);\
+ pmaddwd_r2r(mm6,mm1);\
+ packssdw_r2r(mm1,mm0);\
+ paddw_r2r(mm6,mm0);\
+ psraw_i2r(1,mm0);\
+ movq_m2r(ptr[16],mm2);\
+ movq_m2r(ptr[24],mm3);\
+ pand_r2r(mm5,mm2);\
+ pand_r2r(mm5,mm3);\
+ pmaddwd_r2r(mm6,mm2);\
+ pmaddwd_r2r(mm6,mm3);\
+ packssdw_r2r(mm3,mm2);\
+ paddw_r2r(mm6,mm2);\
+ psraw_i2r(1,mm2);\
+ packuswb_r2r(mm2,mm0);\
+ movq_r2m(mm0,lum[0]);
+
+#define DEINT_LINE_LUM(ptroff) \
+ movd_m2r(lum_m4[(ptroff)],mm0);\
+ movd_m2r(lum_m3[(ptroff)],mm1);\
+ movd_m2r(lum_m2[(ptroff)],mm2);\
+ movd_m2r(lum_m1[(ptroff)],mm3);\
+ movd_m2r(lum[(ptroff)],mm4);\
+ punpcklbw_r2r(mm7,mm0);\
+ movd_r2m(mm2,lum_m4[(ptroff)]);\
+ punpcklbw_r2r(mm7,mm1);\
+ punpcklbw_r2r(mm7,mm2);\
+ punpcklbw_r2r(mm7,mm3);\
+ punpcklbw_r2r(mm7,mm4);\
+ psllw_i2r(2,mm1);\
+ psllw_i2r(1,mm2);\
+ paddw_r2r(mm6,mm1);\
+ psllw_i2r(2,mm3);\
+ paddw_r2r(mm2,mm1);\
+ paddw_r2r(mm4,mm0);\
+ paddw_r2r(mm3,mm1);\
+ psubusw_r2r(mm0,mm1);\
+ psrlw_i2r(3,mm1);\
+ packuswb_r2r(mm7,mm1);\
+ movd_r2m(mm1,lum_m2[(ptroff)]);
+
+#else
+#include "libavcodec/dsputil.h"
+
+#define LINE_WITH_UV \
+ lum[0]=ptr[0];lum[1]=ptr[2];lum[2]=ptr[4];lum[3]=ptr[6];\
+ cb[0]=ptr[1];cb[1]=ptr[5];\
+ cr[0]=ptr[3];cr[1]=ptr[7];\
+ lum[4]=ptr[8];lum[5]=ptr[10];lum[6]=ptr[12];lum[7]=ptr[14];\
+ cb[2]=ptr[9];cb[3]=ptr[13];\
+ cr[2]=ptr[11];cr[3]=ptr[15];\
+ lum[8]=ptr[16];lum[9]=ptr[18];lum[10]=ptr[20];lum[11]=ptr[22];\
+ cb[4]=ptr[17];cb[5]=ptr[21];\
+ cr[4]=ptr[19];cr[5]=ptr[23];\
+ lum[12]=ptr[24];lum[13]=ptr[26];lum[14]=ptr[28];lum[15]=ptr[30];\
+ cb[6]=ptr[25];cb[7]=ptr[29];\
+ cr[6]=ptr[27];cr[7]=ptr[31];
+
+#define LINE_NO_UV \
+ lum[0]=ptr[0];lum[1]=ptr[2];lum[2]=ptr[4];lum[3]=ptr[6];\
+ lum[4]=ptr[8];lum[5]=ptr[10];lum[6]=ptr[12];lum[7]=ptr[14];\
+ lum[8]=ptr[16];lum[9]=ptr[18];lum[10]=ptr[20];lum[11]=ptr[22];\
+ lum[12]=ptr[24];lum[13]=ptr[26];lum[14]=ptr[28];lum[15]=ptr[30];
+
+#define LINE_WITHUV_AVG \
+ sum=(ptr[0]+ptr[2]+1) >> 1;lum[0]=sum; \
+ sum=(ptr[4]+ptr[6]+1) >> 1;lum[1]=sum; \
+ sum=(ptr[1]+ptr[5]+1) >> 1;cb[0]=sum; \
+ sum=(ptr[3]+ptr[7]+1) >> 1;cr[0]=sum; \
+ sum=(ptr[8]+ptr[10]+1) >> 1;lum[2]=sum; \
+ sum=(ptr[12]+ptr[14]+1) >> 1;lum[3]=sum; \
+ sum=(ptr[9]+ptr[13]+1) >> 1;cb[1]=sum; \
+ sum=(ptr[11]+ptr[15]+1) >> 1;cr[1]=sum; \
+ sum=(ptr[16]+ptr[18]+1) >> 1;lum[4]=sum; \
+ sum=(ptr[20]+ptr[22]+1) >> 1;lum[5]=sum; \
+ sum=(ptr[17]+ptr[21]+1) >> 1;cb[2]=sum; \
+ sum=(ptr[19]+ptr[23]+1) >> 1;cr[2]=sum; \
+ sum=(ptr[24]+ptr[26]+1) >> 1;lum[6]=sum; \
+ sum=(ptr[28]+ptr[30]+1) >> 1;lum[7]=sum; \
+ sum=(ptr[25]+ptr[29]+1) >> 1;cb[3]=sum; \
+ sum=(ptr[27]+ptr[31]+1) >> 1;cr[3]=sum;
+
+#define LINE_NOUV_AVG \
+ sum=(ptr[0]+ptr[2]+1) >> 1;lum[0]=sum; \
+ sum=(ptr[4]+ptr[6]+1) >> 1;lum[1]=sum; \
+ sum=(ptr[8]+ptr[10]+1) >> 1;lum[2]=sum; \
+ sum=(ptr[12]+ptr[14]+1) >> 1;lum[3]=sum; \
+ sum=(ptr[16]+ptr[18]+1) >> 1;lum[4]=sum; \
+ sum=(ptr[20]+ptr[22]+1) >> 1;lum[5]=sum; \
+ sum=(ptr[24]+ptr[26]+1) >> 1;lum[6]=sum; \
+ sum=(ptr[28]+ptr[30]+1) >> 1;lum[7]=sum;
+
+#define DEINT_LINE_LUM(ptroff) \
+ sum=(-lum_m4[(ptroff)]+(lum_m3[(ptroff)]<<2)+(lum_m2[(ptroff)]<<1)+(lum_m1[(ptroff)]<<2)-lum[(ptroff)]); \
+ lum_m4[(ptroff)]=lum_m2[(ptroff)];\
+ lum_m2[(ptroff)]=cm[(sum+4)>>3];\
+ sum=(-lum_m4[(ptroff)+1]+(lum_m3[(ptroff)+1]<<2)+(lum_m2[(ptroff)+1]<<1)+(lum_m1[(ptroff)+1]<<2)-lum[(ptroff)+1]); \
+ lum_m4[(ptroff)+1]=lum_m2[(ptroff)+1];\
+ lum_m2[(ptroff)+1]=cm[(sum+4)>>3];\
+ sum=(-lum_m4[(ptroff)+2]+(lum_m3[(ptroff)+2]<<2)+(lum_m2[(ptroff)+2]<<1)+(lum_m1[(ptroff)+2]<<2)-lum[(ptroff)+2]); \
+ lum_m4[(ptroff)+2]=lum_m2[(ptroff)+2];\
+ lum_m2[(ptroff)+2]=cm[(sum+4)>>3];\
+ sum=(-lum_m4[(ptroff)+3]+(lum_m3[(ptroff)+3]<<2)+(lum_m2[(ptroff)+3]<<1)+(lum_m1[(ptroff)+3]<<2)-lum[(ptroff)+3]); \
+ lum_m4[(ptroff)+3]=lum_m2[(ptroff)+3];\
+ lum_m2[(ptroff)+3]=cm[(sum+4)>>3];
+
+#endif
+
+
+/* Read two fields separately. */
+static int aiw_read_picture(VideoData *s, uint8_t *data)
+{
+ uint8_t *ptr, *lum, *cb, *cr;
+ int h;
+#ifndef HAVE_MMX
+ int sum;
+#endif
+ uint8_t* src = s->src_mem;
+ uint8_t *ptrend = &src[s->width*2];
+ lum=data;
+ cb=&lum[s->width*s->height];
+ cr=&cb[(s->width*s->height)/4];
+ if (s->deint == 0 && s->halfw == 0) {
+ while (read(s->fd,src,s->width*2) < 0) {
+ usleep(100);
+ }
+ for (h = 0; h < s->height-2; h+=2) {
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ read(s->fd,src,s->width*2);
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16) {
+ LINE_NO_UV
+ }
+ read(s->fd,src,s->width*2);
+ }
+ /*
+ * Do last two lines
+ */
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ read(s->fd,src,s->width*2);
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16) {
+ LINE_NO_UV
+ }
+ /* drop second field */
+ while (read(s->fd,src,s->width*2) < 0) {
+ usleep(100);
+ }
+ for (h = 0; h < s->height - 1; h++) {
+ read(s->fd,src,s->width*2);
+ }
+ } else if (s->halfw == 1) {
+#ifdef HAVE_MMX
+ mmx_t rounder;
+ mmx_t masker;
+ rounder.uw[0]=1;
+ rounder.uw[1]=1;
+ rounder.uw[2]=1;
+ rounder.uw[3]=1;
+ masker.ub[0]=0xff;
+ masker.ub[1]=0;
+ masker.ub[2]=0xff;
+ masker.ub[3]=0;
+ masker.ub[4]=0xff;
+ masker.ub[5]=0;
+ masker.ub[6]=0xff;
+ masker.ub[7]=0;
+ pxor_r2r(mm7,mm7);
+ movq_m2r(rounder,mm6);
+#endif
+ while (read(s->fd,src,s->width*4) < 0) {
+ usleep(100);
+ }
+ ptrend = &src[s->width*4];
+ for (h = 0; h < s->height-2; h+=2) {
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8, cb+=4, cr+=4) {
+ LINE_WITHUV_AVG
+ }
+ read(s->fd,src,s->width*4);
+#ifdef HAVE_MMX
+ movq_m2r(masker,mm5);
+#endif
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8) {
+ LINE_NOUV_AVG
+ }
+ read(s->fd,src,s->width*4);
+ }
+ /*
+ * Do last two lines
+ */
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8, cb+=4, cr+=4) {
+ LINE_WITHUV_AVG
+ }
+ read(s->fd,src,s->width*4);
+#ifdef HAVE_MMX
+ movq_m2r(masker,mm5);
+#endif
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=8) {
+ LINE_NOUV_AVG
+ }
+ /* drop second field */
+ while (read(s->fd,src,s->width*4) < 0) {
+ usleep(100);
+ }
+ for (h = 0; h < s->height - 1; h++) {
+ read(s->fd,src,s->width*4);
+ }
+ } else {
+ uint8_t *lum_m1, *lum_m2, *lum_m3, *lum_m4;
+#ifdef HAVE_MMX
+ mmx_t rounder;
+ rounder.uw[0]=4;
+ rounder.uw[1]=4;
+ rounder.uw[2]=4;
+ rounder.uw[3]=4;
+ movq_m2r(rounder,mm6);
+ pxor_r2r(mm7,mm7);
+#else
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+#endif
+
+ /* read two fields and deinterlace them */
+ while (read(s->fd,src,s->width*2) < 0) {
+ usleep(100);
+ }
+ for (h = 0; h < (s->height/2)-2; h+=2) {
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ read(s->fd,src,s->width*2);
+ /* skip a luminance line - will be filled in later */
+ lum += s->width;
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ /* skip a luminance line - will be filled in later */
+ lum += s->width;
+ read(s->fd,src,s->width*2);
+ }
+ /*
+ * Do last two lines
+ */
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ /* skip a luminance line - will be filled in later */
+ lum += s->width;
+ read(s->fd,src,s->width*2);
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, cb+=8, cr+=8) {
+ LINE_WITH_UV
+ }
+ /*
+ *
+ * SECOND FIELD
+ *
+ */
+ lum=&data[s->width];
+ while (read(s->fd,src,s->width*2) < 0) {
+ usleep(10);
+ }
+ /* First (and last) two lines not interlaced */
+ for (h = 0; h < 2; h++) {
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16) {
+ LINE_NO_UV
+ }
+ read(s->fd,src,s->width*2);
+ /* skip a luminance line */
+ lum += s->width;
+ }
+ lum_m1=&lum[-s->width];
+ lum_m2=&lum_m1[-s->width];
+ lum_m3=&lum_m2[-s->width];
+ memmove(s->lum_m4_mem,&lum_m3[-s->width],s->width);
+ for (; h < (s->height/2)-1; h++) {
+ lum_m4=s->lum_m4_mem;
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16,lum_m1+=16,lum_m2+=16,lum_m3+=16,lum_m4+=16) {
+ LINE_NO_UV
+
+ DEINT_LINE_LUM(0)
+ DEINT_LINE_LUM(4)
+ DEINT_LINE_LUM(8)
+ DEINT_LINE_LUM(12)
+ }
+ read(s->fd,src,s->width*2);
+ /* skip a luminance line */
+ lum += s->width;
+ lum_m1 += s->width;
+ lum_m2 += s->width;
+ lum_m3 += s->width;
+ // lum_m4 += s->width;
+ }
+ /*
+ * Do last line
+ */
+ lum_m4=s->lum_m4_mem;
+ for (ptr = &src[0]; ptr < ptrend; ptr+=32, lum+=16, lum_m1+=16, lum_m2+=16, lum_m3+=16, lum_m4+=16) {
+ LINE_NO_UV
+
+ DEINT_LINE_LUM(0)
+ DEINT_LINE_LUM(4)
+ DEINT_LINE_LUM(8)
+ DEINT_LINE_LUM(12)
+ }
+ }
+#ifdef HAVE_MMX
+ emms();
+#endif
+ return s->frame_size;
+}
+
+static int aiw_close(VideoData *s)
+{
+ av_freep(&s->lum_m4_mem);
+ av_freep(&s->src_mem);
+ return 0;
+}
diff --git a/contrib/ffmpeg/libavformat/grab_bktr.c b/contrib/ffmpeg/libavformat/grab_bktr.c
new file mode 100644
index 000000000..214599490
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/grab_bktr.c
@@ -0,0 +1,330 @@
+/*
+ * *BSD video grab interface
+ * Copyright (c) 2002 Steve O'Hara-Smith
+ * based on
+ * Linux video grab interface
+ * Copyright (c) 2000,2001 Gerard Lantau.
+ * and
+ * simple_grab.c Copyright (c) 1999 Roger Hardiman
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#if defined(__FreeBSD__)
+# if __FreeBSD__ >= 502100
+# include <dev/bktr/ioctl_meteor.h>
+# include <dev/bktr/ioctl_bt848.h>
+# else
+# include <machine/ioctl_meteor.h>
+# include <machine/ioctl_bt848.h>
+# endif
+#elif defined(__FreeBSD_kernel__)
+# include <dev/bktr/ioctl_meteor.h>
+# include <dev/bktr/ioctl_bt848.h>
+#elif defined(__DragonFly__)
+# include <dev/video/meteor/ioctl_meteor.h>
+# include <dev/video/bktr/ioctl_bt848.h>
+#else
+# include <dev/ic/bt8xx.h>
+#endif
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <signal.h>
+
+typedef struct {
+ int video_fd;
+ int tuner_fd;
+ int width, height;
+ int frame_rate;
+ int frame_rate_base;
+ u_int64_t per_frame;
+} VideoData;
+
+
+#define PAL 1
+#define PALBDGHI 1
+#define NTSC 2
+#define NTSCM 2
+#define SECAM 3
+#define PALN 4
+#define PALM 5
+#define NTSCJ 6
+
+/* PAL is 768 x 576. NTSC is 640 x 480 */
+#define PAL_HEIGHT 576
+#define SECAM_HEIGHT 576
+#define NTSC_HEIGHT 480
+
+#ifndef VIDEO_FORMAT
+#define VIDEO_FORMAT NTSC
+#endif
+
+static int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
+ METEOR_DEV3, METEOR_DEV_SVIDEO };
+
+uint8_t *video_buf;
+size_t video_buf_size;
+u_int64_t last_frame_time;
+volatile sig_atomic_t nsignals;
+
+
+static void catchsignal(int signal)
+{
+ nsignals++;
+ return;
+}
+
+static int bktr_init(const char *video_device, int width, int height,
+ int format, int *video_fd, int *tuner_fd, int idev, double frequency)
+{
+ struct meteor_geomet geo;
+ int h_max;
+ long ioctl_frequency;
+ char *arg;
+ int c;
+ struct sigaction act, old;
+
+ if (idev < 0 || idev > 4)
+ {
+ arg = getenv ("BKTR_DEV");
+ if (arg)
+ idev = atoi (arg);
+ if (idev < 0 || idev > 4)
+ idev = 1;
+ }
+
+ if (format < 1 || format > 6)
+ {
+ arg = getenv ("BKTR_FORMAT");
+ if (arg)
+ format = atoi (arg);
+ if (format < 1 || format > 6)
+ format = VIDEO_FORMAT;
+ }
+
+ if (frequency <= 0)
+ {
+ arg = getenv ("BKTR_FREQUENCY");
+ if (arg)
+ frequency = atof (arg);
+ if (frequency <= 0)
+ frequency = 0.0;
+ }
+
+ memset(&act, 0, sizeof(act));
+ sigemptyset(&act.sa_mask);
+ act.sa_handler = catchsignal;
+ sigaction(SIGUSR1, &act, &old);
+
+ *tuner_fd = open("/dev/tuner0", O_RDONLY);
+ if (*tuner_fd < 0)
+ perror("Warning: Tuner not opened, continuing");
+
+ *video_fd = open(video_device, O_RDONLY);
+ if (*video_fd < 0) {
+ perror(video_device);
+ return -1;
+ }
+
+ geo.rows = height;
+ geo.columns = width;
+ geo.frames = 1;
+ geo.oformat = METEOR_GEO_YUV_422 | METEOR_GEO_YUV_12;
+
+ switch (format) {
+ case PAL: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
+ case PALN: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALN; break;
+ case PALM: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALM; break;
+ case SECAM: h_max = SECAM_HEIGHT; c = BT848_IFORM_F_SECAM; break;
+ case NTSC: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCM; break;
+ case NTSCJ: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCJ; break;
+ default: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
+ }
+
+ if (height <= h_max / 2)
+ geo.oformat |= METEOR_GEO_EVEN_ONLY;
+
+ if (ioctl(*video_fd, METEORSETGEO, &geo) < 0) {
+ perror("METEORSETGEO");
+ return -1;
+ }
+
+ if (ioctl(*video_fd, BT848SFMT, &c) < 0) {
+ perror("BT848SFMT");
+ return -1;
+ }
+
+ c = bktr_dev[idev];
+ if (ioctl(*video_fd, METEORSINPUT, &c) < 0) {
+ perror("METEORSINPUT");
+ return -1;
+ }
+
+ video_buf_size = width * height * 12 / 8;
+
+ video_buf = (uint8_t *)mmap((caddr_t)0, video_buf_size,
+ PROT_READ, MAP_SHARED, *video_fd, (off_t)0);
+ if (video_buf == MAP_FAILED) {
+ perror("mmap");
+ return -1;
+ }
+
+ if (frequency != 0.0) {
+ ioctl_frequency = (unsigned long)(frequency*16);
+ if (ioctl(*tuner_fd, TVTUNER_SETFREQ, &ioctl_frequency) < 0)
+ perror("TVTUNER_SETFREQ");
+ }
+
+ c = AUDIO_UNMUTE;
+ if (ioctl(*tuner_fd, BT848_SAUDIO, &c) < 0)
+ perror("TVTUNER_SAUDIO");
+
+ c = METEOR_CAP_CONTINOUS;
+ ioctl(*video_fd, METEORCAPTUR, &c);
+
+ c = SIGUSR1;
+ ioctl(*video_fd, METEORSSIGNAL, &c);
+
+ return 0;
+}
+
+static void bktr_getframe(u_int64_t per_frame)
+{
+ u_int64_t curtime;
+
+ curtime = av_gettime();
+ if (!last_frame_time
+ || ((last_frame_time + per_frame) > curtime)) {
+ if (!usleep(last_frame_time + per_frame + per_frame / 8 - curtime)) {
+ if (!nsignals)
+ av_log(NULL, AV_LOG_INFO,
+ "SLEPT NO signals - %d microseconds late\n",
+ (int)(av_gettime() - last_frame_time - per_frame));
+ }
+ }
+ nsignals = 0;
+ last_frame_time = curtime;
+}
+
+
+/* note: we support only one picture read at a time */
+static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+
+ if (av_new_packet(pkt, video_buf_size) < 0)
+ return -EIO;
+
+ bktr_getframe(s->per_frame);
+
+ pkt->pts = av_gettime();
+ memcpy(pkt->data, video_buf, video_buf_size);
+
+ return video_buf_size;
+}
+
+static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ AVStream *st;
+ int width, height;
+ int frame_rate;
+ int frame_rate_base;
+ int format = -1;
+ const char *video_device;
+
+ if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0)
+ return -1;
+
+ width = ap->width;
+ height = ap->height;
+ frame_rate = ap->time_base.den;
+ frame_rate_base = ap->time_base.num;
+
+ video_device = ap->device;
+ if (!video_device)
+ video_device = "/dev/bktr0";
+
+ st = av_new_stream(s1, 0);
+ if (!st)
+ return -ENOMEM;
+ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
+
+ s->width = width;
+ s->height = height;
+ s->frame_rate = frame_rate;
+ s->frame_rate_base = frame_rate_base;
+ s->per_frame = ((u_int64_t)1000000 * s->frame_rate_base) / s->frame_rate;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->pix_fmt = PIX_FMT_YUV420P;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = width;
+ st->codec->height = height;
+ st->codec->time_base.den = frame_rate;
+ st->codec->time_base.num = frame_rate_base;
+
+ if (ap->standard) {
+ if (!strcasecmp(ap->standard, "pal"))
+ format = PAL;
+ else if (!strcasecmp(ap->standard, "secam"))
+ format = SECAM;
+ else if (!strcasecmp(ap->standard, "ntsc"))
+ format = NTSC;
+ }
+
+ if (bktr_init(video_device, width, height, format,
+ &(s->video_fd), &(s->tuner_fd), -1, 0.0) < 0)
+ return -EIO;
+
+ nsignals = 0;
+ last_frame_time = 0;
+
+ return 0;
+}
+
+static int grab_read_close(AVFormatContext *s1)
+{
+ VideoData *s = s1->priv_data;
+ int c;
+
+ c = METEOR_CAP_STOP_CONT;
+ ioctl(s->video_fd, METEORCAPTUR, &c);
+ close(s->video_fd);
+
+ c = AUDIO_MUTE;
+ ioctl(s->tuner_fd, BT848_SAUDIO, &c);
+ close(s->tuner_fd);
+
+ munmap((caddr_t)video_buf, video_buf_size);
+
+ return 0;
+}
+
+AVInputFormat video_grab_device_demuxer = {
+ "bktr",
+ "video grab",
+ sizeof(VideoData),
+ NULL,
+ grab_read_header,
+ grab_read_packet,
+ grab_read_close,
+ .flags = AVFMT_NOFILE,
+};
diff --git a/contrib/ffmpeg/libavformat/gxf.c b/contrib/ffmpeg/libavformat/gxf.c
new file mode 100644
index 000000000..897cdade0
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gxf.c
@@ -0,0 +1,525 @@
+/*
+ * GXF demuxer.
+ * Copyright (c) 2006 Reimar Doeffinger.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "common.h"
+
+typedef enum {
+ PKT_MAP = 0xbc,
+ PKT_MEDIA = 0xbf,
+ PKT_EOS = 0xfb,
+ PKT_FLT = 0xfc,
+ PKT_UMF = 0xfd
+} pkt_type_t;
+
+typedef enum {
+ MAT_NAME = 0x40,
+ MAT_FIRST_FIELD = 0x41,
+ MAT_LAST_FIELD = 0x42,
+ MAT_MARK_IN = 0x43,
+ MAT_MARK_OUT = 0x44,
+ MAT_SIZE = 0x45
+} mat_tag_t;
+
+typedef enum {
+ TRACK_NAME = 0x4c,
+ TRACK_AUX = 0x4d,
+ TRACK_VER = 0x4e,
+ TRACK_MPG_AUX = 0x4f,
+ TRACK_FPS = 0x50,
+ TRACK_LINES = 0x51,
+ TRACK_FPF = 0x52
+} track_tag_t;
+
+typedef struct {
+ int64_t first_field;
+ int64_t last_field;
+ AVRational frames_per_second;
+ int32_t fields_per_frame;
+} st_info_t;
+
+/**
+ * \brief parses a packet header, extracting type and length
+ * \param pb ByteIOContext to read header from
+ * \param type detected packet type is stored here
+ * \param length detected packet length, excluding header is stored here
+ * \return 0 if header not found or contains invalid data, 1 otherwise
+ */
+static int parse_packet_header(ByteIOContext *pb, pkt_type_t *type, int *length) {
+ if (get_be32(pb))
+ return 0;
+ if (get_byte(pb) != 1)
+ return 0;
+ *type = get_byte(pb);
+ *length = get_be32(pb);
+ if ((*length >> 24) || *length < 16)
+ return 0;
+ *length -= 16;
+ if (get_be32(pb))
+ return 0;
+ if (get_byte(pb) != 0xe1)
+ return 0;
+ if (get_byte(pb) != 0xe2)
+ return 0;
+ return 1;
+}
+
+/**
+ * \brief check if file starts with a PKT_MAP header
+ */
+static int gxf_probe(AVProbeData *p) {
+ static const uint8_t startcode[] = {0, 0, 0, 0, 1, 0xbc}; // start with map packet
+ static const uint8_t endcode[] = {0, 0, 0, 0, 0xe1, 0xe2};
+ if (p->buf_size < 16)
+ return 0;
+ if (!memcmp(p->buf, startcode, sizeof(startcode)) &&
+ !memcmp(&p->buf[16 - sizeof(endcode)], endcode, sizeof(endcode)))
+ return AVPROBE_SCORE_MAX;
+ return 0;
+}
+
+/**
+ * \brief gets the stream index for the track with the specified id, creates new
+ * stream if not found
+ * \param stream id of stream to find / add
+ * \param format stream format identifier
+ */
+static int get_sindex(AVFormatContext *s, int id, int format) {
+ int i;
+ AVStream *st = NULL;
+ for (i = 0; i < s->nb_streams; i++) {
+ if (s->streams[i]->id == id)
+ return i;
+ }
+ st = av_new_stream(s, id);
+ switch (format) {
+ case 3:
+ case 4:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MJPEG;
+ break;
+ case 13:
+ case 15:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_DVVIDEO;
+ break;
+ case 14:
+ case 16:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_DVVIDEO;
+ break;
+ case 11:
+ case 12:
+ case 20:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MPEG2VIDEO;
+ st->need_parsing = 2; // get keyframe flag etc.
+ break;
+ case 22:
+ case 23:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MPEG1VIDEO;
+ st->need_parsing = 2; // get keyframe flag etc.
+ break;
+ case 9:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S24LE;
+ st->codec->channels = 1;
+ st->codec->sample_rate = 48000;
+ st->codec->bit_rate = 3 * 1 * 48000 * 8;
+ st->codec->block_align = 3 * 1;
+ st->codec->bits_per_sample = 24;
+ break;
+ case 10:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ st->codec->channels = 1;
+ st->codec->sample_rate = 48000;
+ st->codec->bit_rate = 2 * 1 * 48000 * 8;
+ st->codec->block_align = 2 * 1;
+ st->codec->bits_per_sample = 16;
+ break;
+ case 17:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_AC3;
+ st->codec->channels = 2;
+ st->codec->sample_rate = 48000;
+ break;
+ // timecode tracks:
+ case 7:
+ case 8:
+ case 24:
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ st->codec->codec_id = CODEC_ID_NONE;
+ break;
+ default:
+ st->codec->codec_type = CODEC_TYPE_UNKNOWN;
+ st->codec->codec_id = CODEC_ID_NONE;
+ break;
+ }
+ return s->nb_streams - 1;
+}
+
+/**
+ * \brief filters out interesting tags from material information.
+ * \param len lenght of tag section, will be adjusted to contain remaining bytes
+ * \param si struct to store collected information into
+ */
+static void gxf_material_tags(ByteIOContext *pb, int *len, st_info_t *si) {
+ si->first_field = AV_NOPTS_VALUE;
+ si->last_field = AV_NOPTS_VALUE;
+ while (*len >= 2) {
+ mat_tag_t tag = get_byte(pb);
+ int tlen = get_byte(pb);
+ *len -= 2;
+ if (tlen > *len)
+ return;
+ *len -= tlen;
+ if (tlen == 4) {
+ uint32_t value = get_be32(pb);
+ if (tag == MAT_FIRST_FIELD)
+ si->first_field = value;
+ else if (tag == MAT_LAST_FIELD)
+ si->last_field = value;
+ } else
+ url_fskip(pb, tlen);
+ }
+}
+
+/**
+ * \brief convert fps tag value to AVRational fps
+ * \param fps fps value from tag
+ * \return fps as AVRational, or 0 / 0 if unknown
+ */
+static AVRational fps_tag2avr(int32_t fps) {
+ extern const AVRational ff_frame_rate_tab[];
+ if (fps < 1 || fps > 9) fps = 9;
+ return ff_frame_rate_tab[9 - fps]; // values have opposite order
+}
+
+/**
+ * \brief convert UMF attributes flags to AVRational fps
+ * \param fps fps value from flags
+ * \return fps as AVRational, or 0 / 0 if unknown
+ */
+static AVRational fps_umf2avr(uint32_t flags) {
+ static const AVRational map[] = {{50, 1}, {60000, 1001}, {24, 1},
+ {25, 1}, {30000, 1001}};
+ int idx = av_log2((flags & 0x7c0) >> 6);
+ return map[idx];
+}
+
+/**
+ * \brief filters out interesting tags from track information.
+ * \param len length of tag section, will be adjusted to contain remaining bytes
+ * \param si struct to store collected information into
+ */
+static void gxf_track_tags(ByteIOContext *pb, int *len, st_info_t *si) {
+ si->frames_per_second = (AVRational){0, 0};
+ si->fields_per_frame = 0;
+ while (*len >= 2) {
+ track_tag_t tag = get_byte(pb);
+ int tlen = get_byte(pb);
+ *len -= 2;
+ if (tlen > *len)
+ return;
+ *len -= tlen;
+ if (tlen == 4) {
+ uint32_t value = get_be32(pb);
+ if (tag == TRACK_FPS)
+ si->frames_per_second = fps_tag2avr(value);
+ else if (tag == TRACK_FPF && (value == 1 || value == 2))
+ si->fields_per_frame = value;
+ } else
+ url_fskip(pb, tlen);
+ }
+}
+
+/**
+ * \brief read index from FLT packet into stream 0 av_index
+ */
+static void gxf_read_index(AVFormatContext *s, int pkt_len) {
+ ByteIOContext *pb = &s->pb;
+ AVStream *st = s->streams[0];
+ uint32_t fields_per_map = get_le32(pb);
+ uint32_t map_cnt = get_le32(pb);
+ int i;
+ pkt_len -= 8;
+ if (map_cnt > 1000) {
+ av_log(s, AV_LOG_ERROR, "GXF: too many index entries %u (%x)\n", map_cnt, map_cnt);
+ map_cnt = 1000;
+ }
+ if (pkt_len < 4 * map_cnt) {
+ av_log(s, AV_LOG_ERROR, "GXF: invalid index length\n");
+ url_fskip(pb, pkt_len);
+ return;
+ }
+ pkt_len -= 4 * map_cnt;
+ av_add_index_entry(st, 0, 0, 0, 0, 0);
+ for (i = 0; i < map_cnt; i++)
+ av_add_index_entry(st, (uint64_t)get_le32(pb) * 1024,
+ i * (uint64_t)fields_per_map + 1, 0, 0, 0);
+ url_fskip(pb, pkt_len);
+}
+
+static int gxf_header(AVFormatContext *s, AVFormatParameters *ap) {
+ ByteIOContext *pb = &s->pb;
+ pkt_type_t pkt_type;
+ int map_len;
+ int len;
+ AVRational main_timebase = {0, 0};
+ st_info_t si;
+ int i;
+ if (!parse_packet_header(pb, &pkt_type, &map_len) || pkt_type != PKT_MAP) {
+ av_log(s, AV_LOG_ERROR, "GXF: map packet not found\n");
+ return 0;
+ }
+ map_len -= 2;
+ if (get_byte(pb) != 0x0e0 || get_byte(pb) != 0xff) {
+ av_log(s, AV_LOG_ERROR, "GXF: unknown version or invalid map preamble\n");
+ return 0;
+ }
+ map_len -= 2;
+ len = get_be16(pb); // length of material data section
+ if (len > map_len) {
+ av_log(s, AV_LOG_ERROR, "GXF: material data longer than map data\n");
+ return 0;
+ }
+ map_len -= len;
+ gxf_material_tags(pb, &len, &si);
+ url_fskip(pb, len);
+ map_len -= 2;
+ len = get_be16(pb); // length of track description
+ if (len > map_len) {
+ av_log(s, AV_LOG_ERROR, "GXF: track description longer than map data\n");
+ return 0;
+ }
+ map_len -= len;
+ while (len > 0) {
+ int track_type, track_id, track_len;
+ AVStream *st;
+ int idx;
+ len -= 4;
+ track_type = get_byte(pb);
+ track_id = get_byte(pb);
+ track_len = get_be16(pb);
+ len -= track_len;
+ gxf_track_tags(pb, &track_len, &si);
+ url_fskip(pb, track_len);
+ if (!(track_type & 0x80)) {
+ av_log(s, AV_LOG_ERROR, "GXF: invalid track type %x\n", track_type);
+ continue;
+ }
+ track_type &= 0x7f;
+ if ((track_id & 0xc0) != 0xc0) {
+ av_log(s, AV_LOG_ERROR, "GXF: invalid track id %x\n", track_id);
+ continue;
+ }
+ track_id &= 0x3f;
+ idx = get_sindex(s, track_id, track_type);
+ if (idx < 0) continue;
+ st = s->streams[idx];
+ if (!main_timebase.num || !main_timebase.den) {
+ main_timebase.num = si.frames_per_second.den;
+ main_timebase.den = si.frames_per_second.num * si.fields_per_frame;
+ }
+ st->start_time = si.first_field;
+ if (si.first_field != AV_NOPTS_VALUE && si.last_field != AV_NOPTS_VALUE)
+ st->duration = si.last_field - si.first_field;
+ }
+ if (len < 0)
+ av_log(s, AV_LOG_ERROR, "GXF: invalid track description length specified\n");
+ if (map_len)
+ url_fskip(pb, map_len);
+ if (!parse_packet_header(pb, &pkt_type, &len)) {
+ av_log(s, AV_LOG_ERROR, "GXF: sync lost in header\n");
+ return -1;
+ }
+ if (pkt_type == PKT_FLT) {
+ gxf_read_index(s, len);
+ if (!parse_packet_header(pb, &pkt_type, &len)) {
+ av_log(s, AV_LOG_ERROR, "GXF: sync lost in header\n");
+ return -1;
+ }
+ }
+ if (pkt_type == PKT_UMF) {
+ if (len >= 9) {
+ AVRational fps;
+ len -= 9;
+ url_fskip(pb, 5);
+ fps = fps_umf2avr(get_le32(pb));
+ if (!main_timebase.num || !main_timebase.den) {
+ // this may not always be correct, but simply the best we can get
+ main_timebase.num = fps.den;
+ main_timebase.den = fps.num;
+ }
+ } else
+ av_log(s, AV_LOG_INFO, "GXF: UMF packet too short\n");
+ } else
+ av_log(s, AV_LOG_INFO, "GXF: UMF packet missing\n");
+ url_fskip(pb, len);
+ for (i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ if (main_timebase.num && main_timebase.den)
+ st->time_base = main_timebase;
+ else {
+ st->start_time = st->duration = AV_NOPTS_VALUE;
+ }
+ }
+ return 0;
+}
+
+#define READ_ONE() \
+ { \
+ if (!max_interval-- || url_feof(pb)) \
+ goto out; \
+ tmp = tmp << 8 | get_byte(pb); \
+ }
+
+/**
+ * \brief resync the stream on the next media packet with specified properties
+ * \param max_interval how many bytes to search for matching packet at most
+ * \param track track id the media packet must belong to, -1 for any
+ * \param timestamp minimum timestamp (== field number) the packet must have, -1 for any
+ * \return timestamp of packet found
+ */
+static int64_t gxf_resync_media(AVFormatContext *s, uint64_t max_interval, int track, int timestamp) {
+ uint32_t tmp;
+ uint64_t last_pos;
+ uint64_t last_found_pos = 0;
+ int cur_track;
+ int64_t cur_timestamp = AV_NOPTS_VALUE;
+ int len;
+ ByteIOContext *pb = &s->pb;
+ pkt_type_t type;
+ tmp = get_be32(pb);
+start:
+ while (tmp)
+ READ_ONE();
+ READ_ONE();
+ if (tmp != 1)
+ goto start;
+ last_pos = url_ftell(pb);
+ url_fseek(pb, -5, SEEK_CUR);
+ if (!parse_packet_header(pb, &type, &len) || type != PKT_MEDIA) {
+ url_fseek(pb, last_pos, SEEK_SET);
+ goto start;
+ }
+ get_byte(pb);
+ cur_track = get_byte(pb);
+ cur_timestamp = get_be32(pb);
+ last_found_pos = url_ftell(pb) - 16 - 6;
+ if ((track >= 0 && track != cur_track) || (timestamp >= 0 && timestamp > cur_timestamp)) {
+ url_fseek(pb, last_pos, SEEK_SET);
+ goto start;
+ }
+out:
+ if (last_found_pos)
+ url_fseek(pb, last_found_pos, SEEK_SET);
+ return cur_timestamp;
+}
+
+static int gxf_packet(AVFormatContext *s, AVPacket *pkt) {
+ ByteIOContext *pb = &s->pb;
+ pkt_type_t pkt_type;
+ int pkt_len;
+ while (!url_feof(pb)) {
+ int track_type, track_id, ret;
+ int field_nr;
+ if (!parse_packet_header(pb, &pkt_type, &pkt_len)) {
+ if (!url_feof(pb))
+ av_log(s, AV_LOG_ERROR, "GXF: sync lost\n");
+ return -1;
+ }
+ if (pkt_type == PKT_FLT) {
+ gxf_read_index(s, pkt_len);
+ continue;
+ }
+ if (pkt_type != PKT_MEDIA) {
+ url_fskip(pb, pkt_len);
+ continue;
+ }
+ if (pkt_len < 16) {
+ av_log(s, AV_LOG_ERROR, "GXF: invalid media packet length\n");
+ continue;
+ }
+ pkt_len -= 16;
+ track_type = get_byte(pb);
+ track_id = get_byte(pb);
+ field_nr = get_be32(pb);
+ get_be32(pb); // field information
+ get_be32(pb); // "timeline" field number
+ get_byte(pb); // flags
+ get_byte(pb); // reserved
+ // NOTE: there is also data length information in the
+ // field information, it might be better to take this into account
+ // as well.
+ ret = av_get_packet(pb, pkt, pkt_len);
+ pkt->stream_index = get_sindex(s, track_id, track_type);
+ pkt->dts = field_nr;
+ return ret;
+ }
+ return AVERROR_IO;
+}
+
+static int gxf_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) {
+ uint64_t pos;
+ uint64_t maxlen = 100 * 1024 * 1024;
+ AVStream *st = s->streams[0];
+ int64_t start_time = s->streams[stream_index]->start_time;
+ int64_t found;
+ int idx;
+ if (timestamp < start_time) timestamp = start_time;
+ idx = av_index_search_timestamp(st, timestamp - start_time,
+ AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
+ if (idx < 0)
+ return -1;
+ pos = st->index_entries[idx].pos;
+ if (idx < st->nb_index_entries - 2)
+ maxlen = st->index_entries[idx + 2].pos - pos;
+ maxlen = FFMAX(maxlen, 200 * 1024);
+ url_fseek(&s->pb, pos, SEEK_SET);
+ found = gxf_resync_media(s, maxlen, -1, timestamp);
+ if (FFABS(found - timestamp) > 4)
+ return -1;
+ return 0;
+}
+
+static int64_t gxf_read_timestamp(AVFormatContext *s, int stream_index,
+ int64_t *pos, int64_t pos_limit) {
+ ByteIOContext *pb = &s->pb;
+ int64_t res;
+ url_fseek(pb, *pos, SEEK_SET);
+ res = gxf_resync_media(s, pos_limit - *pos, -1, -1);
+ *pos = url_ftell(pb);
+ return res;
+}
+
+AVInputFormat gxf_demuxer = {
+ "gxf",
+ "GXF format",
+ 0,
+ gxf_probe,
+ gxf_header,
+ gxf_packet,
+ NULL,
+ gxf_seek,
+ gxf_read_timestamp,
+};
diff --git a/contrib/ffmpeg/libavformat/gxf.h b/contrib/ffmpeg/libavformat/gxf.h
new file mode 100644
index 000000000..0e2a31ca4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gxf.h
@@ -0,0 +1,34 @@
+/*
+ * GXF demuxer
+ * copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FFMPEG_GXF_H
+#define FFMPEG_GXF_H
+
+/* gxf.c */
+typedef enum {
+ PKT_MAP = 0xbc,
+ PKT_MEDIA = 0xbf,
+ PKT_EOS = 0xfb,
+ PKT_FLT = 0xfc,
+ PKT_UMF = 0xfd
+} pkt_type_t;
+
+#endif /* FFMPEG_GXF_H */
diff --git a/contrib/ffmpeg/libavformat/gxfenc.c b/contrib/ffmpeg/libavformat/gxfenc.c
new file mode 100644
index 000000000..fef5ec104
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/gxfenc.c
@@ -0,0 +1,829 @@
+/*
+ * GXF muxer.
+ * Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "gxf.h"
+#include "riff.h"
+#include "fifo.h"
+
+#define GXF_AUDIO_PACKET_SIZE 65536
+
+typedef struct GXFStreamContext {
+ AVCodecContext *codec;
+ AVFifoBuffer audio_buffer;
+ uint32_t track_type;
+ uint32_t sample_size;
+ uint32_t sample_rate;
+ uint16_t media_type;
+ uint16_t media_info;
+ uint8_t index;
+ int frame_rate_index;
+ int lines_index;
+ int fields;
+ int iframes;
+ int pframes;
+ int bframes;
+ int p_per_gop;
+ int b_per_gop;
+ int first_gop_closed;
+ int64_t current_dts;
+ int dts_delay;
+} GXFStreamContext;
+
+typedef struct GXFContext {
+ uint32_t nb_frames;
+ uint32_t material_flags;
+ uint16_t audio_tracks;
+ uint16_t mpeg_tracks;
+ int64_t creation_time;
+ uint32_t umf_start_offset;
+ uint32_t umf_track_offset;
+ uint32_t umf_media_offset;
+ uint32_t umf_user_data_offset;
+ uint32_t umf_user_data_size;
+ uint32_t umf_length;
+ uint16_t umf_track_size;
+ uint16_t umf_media_size;
+ int audio_written;
+ int sample_rate;
+ int flags;
+ AVFormatContext *fc;
+ GXFStreamContext streams[48];
+} GXFContext;
+
+typedef struct GXF_Lines {
+ int height;
+ int index;
+} GXF_Lines;
+
+
+/* FIXME check if it is relevant */
+static const GXF_Lines gxf_lines_tab[] = {
+ { 480, 1 }, /* NTSC */
+ { 512, 1 }, /* NTSC + VBI */
+ { 576, 2 }, /* PAL */
+ { 608, 2 }, /* PAL + VBI */
+ { 1080, 4 },
+ { 720, 6 },
+};
+
+static const CodecTag gxf_media_types[] = {
+ { CODEC_ID_MJPEG , 3 }, /* NTSC */
+ { CODEC_ID_MJPEG , 4 }, /* PAL */
+ { CODEC_ID_PCM_S24LE , 9 },
+ { CODEC_ID_PCM_S16LE , 10 },
+ { CODEC_ID_MPEG2VIDEO, 11 }, /* NTSC */
+ { CODEC_ID_MPEG2VIDEO, 12 }, /* PAL */
+ { CODEC_ID_DVVIDEO , 13 }, /* NTSC */
+ { CODEC_ID_DVVIDEO , 14 }, /* PAL */
+ { CODEC_ID_DVVIDEO , 15 }, /* 50M NTSC */
+ { CODEC_ID_DVVIDEO , 16 }, /* 50M PAL */
+ { CODEC_ID_AC3 , 17 },
+ //{ CODEC_ID_NONE, , 18 }, /* Non compressed 24 bit audio */
+ { CODEC_ID_MPEG2VIDEO, 20 }, /* MPEG HD */
+ { CODEC_ID_MPEG1VIDEO, 22 }, /* NTSC */
+ { CODEC_ID_MPEG1VIDEO, 23 }, /* PAL */
+ { 0, 0 },
+};
+
+#define SERVER_PATH "/space/"
+#define ES_NAME_PATTERN "ES."
+
+static int gxf_find_lines_index(GXFStreamContext *ctx)
+{
+ int i;
+
+ for (i = 0; i < 6; ++i) {
+ if (ctx->codec->height == gxf_lines_tab[i].height) {
+ ctx->lines_index = gxf_lines_tab[i].index;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static void gxf_write_padding(ByteIOContext *pb, offset_t to_pad)
+{
+ for (; to_pad > 0; to_pad--) {
+ put_byte(pb, 0);
+ }
+}
+
+static offset_t updatePacketSize(ByteIOContext *pb, offset_t pos)
+{
+ offset_t curpos;
+ int size;
+
+ size = url_ftell(pb) - pos;
+ if (size % 4) {
+ gxf_write_padding(pb, 4 - size % 4);
+ size = url_ftell(pb) - pos;
+ }
+ curpos = url_ftell(pb);
+ url_fseek(pb, pos + 6, SEEK_SET);
+ put_be32(pb, size);
+ url_fseek(pb, curpos, SEEK_SET);
+ return curpos - pos;
+}
+
+static offset_t updateSize(ByteIOContext *pb, offset_t pos)
+{
+ offset_t curpos;
+
+ curpos = url_ftell(pb);
+ url_fseek(pb, pos, SEEK_SET);
+ put_be16(pb, curpos - pos - 2);
+ url_fseek(pb, curpos, SEEK_SET);
+ return curpos - pos;
+}
+
+static void gxf_write_packet_header(ByteIOContext *pb, pkt_type_t type)
+{
+ put_be32(pb, 0); /* packet leader for synchro */
+ put_byte(pb, 1);
+ put_byte(pb, type); /* map packet */
+ put_be32(pb, 0); /* size */
+ put_be32(pb, 0); /* reserved */
+ put_byte(pb, 0xE1); /* trailer 1 */
+ put_byte(pb, 0xE2); /* trailer 2 */
+}
+
+static int gxf_write_mpeg_auxiliary(ByteIOContext *pb, GXFStreamContext *ctx)
+{
+ char buffer[1024];
+ int size;
+
+ if (ctx->iframes) {
+ ctx->p_per_gop = ctx->pframes / ctx->iframes;
+ if (ctx->pframes % ctx->iframes)
+ ctx->p_per_gop++;
+ if (ctx->pframes)
+ ctx->b_per_gop = ctx->bframes / ctx->pframes;
+ if (ctx->p_per_gop > 9)
+ ctx->p_per_gop = 9; /* ensure value won't take more than one char */
+ if (ctx->b_per_gop > 9)
+ ctx->b_per_gop = 9; /* ensure value won't take more than one char */
+ }
+ size = snprintf(buffer, 1024, "Ver 1\nBr %.6f\nIpg 1\nPpi %d\nBpiop %d\n"
+ "Pix 0\nCf %d\nCg %d\nSl 7\nnl16 %d\nVi 1\nf1 1\n",
+ (float)ctx->codec->bit_rate, ctx->p_per_gop, ctx->b_per_gop,
+ ctx->codec->pix_fmt == PIX_FMT_YUV422P ? 2 : 1, ctx->first_gop_closed == 1,
+ ctx->codec->height / 16);
+ put_byte(pb, 0x4F);
+ put_byte(pb, size + 1);
+ put_buffer(pb, (uint8_t *)buffer, size + 1);
+ return size + 3;
+}
+
+static int gxf_write_timecode_auxiliary(ByteIOContext *pb, GXFStreamContext *ctx)
+{
+ /* FIXME implement that */
+ put_byte(pb, 0); /* fields */
+ put_byte(pb, 0); /* seconds */
+ put_byte(pb, 0); /* minutes */
+ put_byte(pb, 0); /* flags + hours */
+ /* reserved */
+ put_be32(pb, 0);
+ return 8;
+}
+
+static int gxf_write_track_description(ByteIOContext *pb, GXFStreamContext *stream)
+{
+ offset_t pos;
+
+ /* track description section */
+ put_byte(pb, stream->media_type + 0x80);
+ put_byte(pb, stream->index + 0xC0);
+
+ pos = url_ftell(pb);
+ put_be16(pb, 0); /* size */
+
+ /* media file name */
+ put_byte(pb, 0x4C);
+ put_byte(pb, strlen(ES_NAME_PATTERN) + 3);
+ put_tag(pb, ES_NAME_PATTERN);
+ put_be16(pb, stream->media_info);
+ put_byte(pb, 0);
+
+ if (stream->codec->codec_id != CODEC_ID_MPEG2VIDEO) {
+ /* auxiliary information */
+ put_byte(pb, 0x4D);
+ put_byte(pb, 8);
+ if (stream->codec->codec_id == CODEC_ID_NONE)
+ gxf_write_timecode_auxiliary(pb, stream);
+ else
+ put_le64(pb, 0);
+ }
+
+ /* file system version */
+ put_byte(pb, 0x4E);
+ put_byte(pb, 4);
+ put_be32(pb, 0);
+
+ if (stream->codec->codec_id == CODEC_ID_MPEG2VIDEO)
+ gxf_write_mpeg_auxiliary(pb, stream);
+
+ /* frame rate */
+ put_byte(pb, 0x50);
+ put_byte(pb, 4);
+ put_be32(pb, stream->frame_rate_index);
+
+ /* lines per frame */
+ put_byte(pb, 0x51);
+ put_byte(pb, 4);
+ put_be32(pb, stream->lines_index);
+
+ /* fields per frame */
+ put_byte(pb, 0x52);
+ put_byte(pb, 4);
+ put_be32(pb, stream->fields);
+
+ return updateSize(pb, pos);
+}
+
+static int gxf_write_material_data_section(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos;
+ const char *filename = strrchr(ctx->fc->filename, '/');
+
+ pos = url_ftell(pb);
+ put_be16(pb, 0); /* size */
+
+ /* name */
+ if (filename)
+ filename++;
+ else
+ filename = ctx->fc->filename;
+ put_byte(pb, 0x40);
+ put_byte(pb, strlen(SERVER_PATH) + strlen(filename) + 1);
+ put_tag(pb, SERVER_PATH);
+ put_tag(pb, filename);
+ put_byte(pb, 0);
+
+ /* first field */
+ put_byte(pb, 0x41);
+ put_byte(pb, 4);
+ put_be32(pb, 0);
+
+ /* last field */
+ put_byte(pb, 0x42);
+ put_byte(pb, 4);
+ put_be32(pb, ctx->nb_frames);
+
+ /* reserved */
+ put_byte(pb, 0x43);
+ put_byte(pb, 4);
+ put_be32(pb, 0);
+
+ put_byte(pb, 0x44);
+ put_byte(pb, 4);
+ put_be32(pb, ctx->nb_frames);
+
+ /* estimated size */
+ put_byte(pb, 0x45);
+ put_byte(pb, 4);
+ put_be32(pb, url_fsize(pb) / 1024);
+
+ return updateSize(pb, pos);
+}
+
+static int gxf_write_track_description_section(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos;
+ int i;
+
+ pos = url_ftell(pb);
+ put_be16(pb, 0); /* size */
+ for (i = 0; i < ctx->fc->nb_streams; ++i)
+ gxf_write_track_description(pb, &ctx->streams[i]);
+ return updateSize(pb, pos);
+}
+
+static int gxf_write_map_packet(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+
+ gxf_write_packet_header(pb, PKT_MAP);
+
+ /* preamble */
+ put_byte(pb, 0xE0); /* version */
+ put_byte(pb, 0xFF); /* reserved */
+
+ gxf_write_material_data_section(pb, ctx);
+ gxf_write_track_description_section(pb, ctx);
+
+ return updatePacketSize(pb, pos);
+}
+
+#if 0
+static int gxf_write_flt_packet(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+ int i;
+
+ gxf_write_packet_header(pb, PKT_FLT);
+
+ put_le32(pb, 1000); /* number of fields */
+ put_le32(pb, 0); /* number of active flt entries */
+
+ for (i = 0; i < 1000; ++i) {
+ put_le32(pb, 0);
+ }
+ return updatePacketSize(pb, pos);
+}
+#endif
+
+static int gxf_write_umf_material_description(ByteIOContext *pb, GXFContext *ctx)
+{
+ put_le32(pb, ctx->flags);
+ put_le32(pb, ctx->nb_frames); /* length of the longest track */
+ put_le32(pb, ctx->nb_frames); /* length of the shortest track */
+ put_le32(pb, 0); /* mark in */
+ put_le32(pb, ctx->nb_frames); /* mark out */
+ put_le32(pb, 0); /* timecode mark in */
+ put_le32(pb, ctx->nb_frames); /* timecode mark out */
+ put_le64(pb, ctx->fc->timestamp); /* modification time */
+ put_le64(pb, ctx->fc->timestamp); /* creation time */
+ put_le16(pb, 0); /* reserved */
+ put_le16(pb, 0); /* reserved */
+ put_le16(pb, ctx->audio_tracks);
+ put_le16(pb, 0); /* timecode track count */
+ put_le16(pb, 0); /* reserved */
+ put_le16(pb, ctx->mpeg_tracks);
+ return 48;
+}
+
+static int gxf_write_umf_payload(ByteIOContext *pb, GXFContext *ctx)
+{
+ put_le32(pb, ctx->umf_length); /* total length of the umf data */
+ put_le32(pb, 3); /* version */
+ put_le32(pb, ctx->fc->nb_streams);
+ put_le32(pb, ctx->umf_track_offset); /* umf track section offset */
+ put_le32(pb, ctx->umf_track_size);
+ put_le32(pb, ctx->fc->nb_streams);
+ put_le32(pb, ctx->umf_media_offset);
+ put_le32(pb, ctx->umf_media_size);
+ put_le32(pb, ctx->umf_user_data_offset); /* user data offset */
+ put_le32(pb, ctx->umf_user_data_size); /* user data size */
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+ return 48;
+}
+
+static int gxf_write_umf_track_description(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+ int tracks[255]={0};
+ int i;
+
+ ctx->umf_track_offset = pos - ctx->umf_start_offset;
+ for (i = 0; i < ctx->fc->nb_streams; ++i) {
+ AVStream *st = ctx->fc->streams[i];
+ GXFStreamContext *sc = &ctx->streams[i];
+ int id = 0;
+
+ switch (st->codec->codec_id) {
+ case CODEC_ID_MPEG1VIDEO: id= 'L'; break;
+ case CODEC_ID_MPEG2VIDEO: id= 'M'; break;
+ case CODEC_ID_PCM_S16LE: id= 'A'; break;
+ case CODEC_ID_DVVIDEO: id= sc->track_type == 6 ? 'E' : 'D'; break;
+ case CODEC_ID_MJPEG: id= 'V'; break;
+ default: break;
+ }
+ sc->media_info= id << 8;
+ /* FIXME first 10 audio tracks are 0 to 9 next 22 are A to V */
+ sc->media_info |= '0' + (tracks[id]++);
+ put_le16(pb, sc->media_info);
+ put_le16(pb, 1);
+ }
+ return url_ftell(pb) - pos;
+}
+
+static int gxf_write_umf_media_mpeg(ByteIOContext *pb, GXFStreamContext *stream)
+{
+ if (stream->codec->pix_fmt == PIX_FMT_YUV422P)
+ put_le32(pb, 2);
+ else
+ put_le32(pb, 1); /* default to 420 */
+ put_le32(pb, stream->first_gop_closed == 1); /* closed = 1, open = 0, unknown = 255 */
+ put_le32(pb, 3); /* top = 1, bottom = 2, frame = 3, unknown = 0 */
+ put_le32(pb, 1); /* I picture per GOP */
+ put_le32(pb, stream->p_per_gop);
+ put_le32(pb, stream->b_per_gop);
+ if (stream->codec->codec_id == CODEC_ID_MPEG2VIDEO)
+ put_le32(pb, 2);
+ else if (stream->codec->codec_id == CODEC_ID_MPEG1VIDEO)
+ put_le32(pb, 1);
+ else
+ put_le32(pb, 0);
+ put_le32(pb, 0); /* reserved */
+ return 32;
+}
+
+static int gxf_write_umf_media_timecode(ByteIOContext *pb, GXFStreamContext *track)
+{
+ /* FIXME implement */
+ put_be32(pb, 0); /* drop frame flag */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ return 32;
+}
+
+static int gxf_write_umf_media_dv(ByteIOContext *pb, GXFStreamContext *track)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ put_be32(pb, 0);
+ }
+ return 32;
+}
+
+static int gxf_write_umf_media_audio(ByteIOContext *pb, GXFStreamContext *track)
+{
+ put_le64(pb, av_dbl2int(1)); /* sound level to begin to */
+ put_le64(pb, av_dbl2int(1)); /* sound level to begin to */
+ put_le32(pb, 0); /* number of fields over which to ramp up sound level */
+ put_le32(pb, 0); /* number of fields over which to ramp down sound level */
+ put_le32(pb, 0); /* reserved */
+ put_le32(pb, 0); /* reserved */
+ return 32;
+}
+
+#if 0
+static int gxf_write_umf_media_mjpeg(ByteIOContext *pb, GXFStreamContext *track)
+{
+ put_be64(pb, 0); /* FIXME FLOAT max chroma quant level */
+ put_be64(pb, 0); /* FIXME FLOAT max luma quant level */
+ put_be64(pb, 0); /* FIXME FLOAT min chroma quant level */
+ put_be64(pb, 0); /* FIXME FLOAT min luma quant level */
+ return 32;
+}
+#endif
+
+static int gxf_write_umf_media_description(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos;
+ int i;
+
+ pos = url_ftell(pb);
+ ctx->umf_media_offset = pos - ctx->umf_start_offset;
+ for (i = 0; i < ctx->fc->nb_streams; ++i) {
+ GXFStreamContext *sc = &ctx->streams[i];
+ char buffer[88];
+ offset_t startpos, curpos;
+ int path_size = strlen(ES_NAME_PATTERN);
+
+ memset(buffer, 0, 88);
+ startpos = url_ftell(pb);
+ put_le16(pb, 0); /* length */
+ put_le16(pb, sc->media_info);
+ put_le16(pb, 0); /* reserved */
+ put_le16(pb, 0); /* reserved */
+ put_le32(pb, ctx->nb_frames);
+ put_le32(pb, 0); /* attributes rw, ro */
+ put_le32(pb, 0); /* mark in */
+ put_le32(pb, ctx->nb_frames); /* mark out */
+ strncpy(buffer, ES_NAME_PATTERN, path_size);
+ put_buffer(pb, (uint8_t *)buffer, path_size);
+ put_be16(pb, sc->media_info);
+ put_buffer(pb, (uint8_t *)buffer + path_size + 2, 88 - path_size - 2);
+ put_le32(pb, sc->track_type);
+ put_le32(pb, sc->sample_rate);
+ put_le32(pb, sc->sample_size);
+ put_le32(pb, 0); /* reserved */
+ switch (sc->codec->codec_id) {
+ case CODEC_ID_MPEG2VIDEO:
+ gxf_write_umf_media_mpeg(pb, sc);
+ break;
+ case CODEC_ID_PCM_S16LE:
+ gxf_write_umf_media_audio(pb, sc);
+ break;
+ case CODEC_ID_DVVIDEO:
+ gxf_write_umf_media_dv(pb, sc);
+ break;
+ default:
+ gxf_write_umf_media_timecode(pb, sc); /* 8 0bytes */
+ }
+ curpos = url_ftell(pb);
+ url_fseek(pb, startpos, SEEK_SET);
+ put_le16(pb, curpos - startpos);
+ url_fseek(pb, curpos, SEEK_SET);
+ }
+ return url_ftell(pb) - pos;
+}
+
+static int gxf_write_umf_user_data(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+ ctx->umf_user_data_offset = pos - ctx->umf_start_offset;
+ put_le32(pb, 20);
+ put_le32(pb, 0);
+ put_le16(pb, 0);
+ put_le16(pb, 0);
+ put_le32(pb, 0);
+ put_byte(pb, 0);
+ put_byte(pb, 0);
+ put_byte(pb, 0);
+ put_byte(pb, 0);
+ return 20;
+}
+
+static int gxf_write_umf_packet(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+
+ gxf_write_packet_header(pb, PKT_UMF);
+
+ /* preamble */
+ put_byte(pb, 3); /* first and last (only) packet */
+ put_be32(pb, ctx->umf_length); /* data length */
+
+ ctx->umf_start_offset = url_ftell(pb);
+ gxf_write_umf_payload(pb, ctx);
+ gxf_write_umf_material_description(pb, ctx);
+ ctx->umf_track_size = gxf_write_umf_track_description(pb, ctx);
+ ctx->umf_media_size = gxf_write_umf_media_description(pb, ctx);
+ ctx->umf_user_data_size = gxf_write_umf_user_data(pb, ctx);
+ ctx->umf_length = url_ftell(pb) - ctx->umf_start_offset;
+ return updatePacketSize(pb, pos);
+}
+
+static int gxf_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ GXFContext *gxf = s->priv_data;
+ int i;
+
+ gxf->fc = s;
+ gxf->flags |= 0x00080000; /* material is simple clip */
+ for (i = 0; i < s->nb_streams; ++i) {
+ AVStream *st = s->streams[i];
+ GXFStreamContext *sc = &gxf->streams[i];
+
+ sc->codec = st->codec;
+ sc->index = i;
+ sc->media_type = codec_get_tag(gxf_media_types, sc->codec->codec_id);
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ if (st->codec->codec_id != CODEC_ID_PCM_S16LE) {
+ av_log(s, AV_LOG_ERROR, "only 16 BIT PCM LE allowed for now\n");
+ return -1;
+ }
+ if (st->codec->sample_rate != 48000) {
+ av_log(s, AV_LOG_ERROR, "only 48000hz sampling rate is allowed\n");
+ return -1;
+ }
+ if (st->codec->channels != 1) {
+ av_log(s, AV_LOG_ERROR, "only mono tracks are allowed\n");
+ return -1;
+ }
+ sc->track_type = 2;
+ sc->sample_rate = st->codec->sample_rate;
+ av_set_pts_info(st, 64, 1, sc->sample_rate);
+ sc->sample_size = 16;
+ sc->frame_rate_index = -2;
+ sc->lines_index = -2;
+ sc->fields = -2;
+ gxf->audio_tracks++;
+ gxf->flags |= 0x04000000; /* audio is 16 bit pcm */
+ av_fifo_init(&sc->audio_buffer, 3*GXF_AUDIO_PACKET_SIZE);
+ } else if (sc->codec->codec_type == CODEC_TYPE_VIDEO) {
+ /* FIXME check from time_base ? */
+ if (sc->codec->height == 480 || sc->codec->height == 512) { /* NTSC or NTSC+VBI */
+ sc->frame_rate_index = 5;
+ sc->sample_rate = 60;
+ gxf->flags |= 0x00000080;
+ } else { /* assume PAL */
+ sc->frame_rate_index = 6;
+ sc->media_type++;
+ sc->sample_rate = 50;
+ gxf->flags |= 0x00000040;
+ }
+ gxf->sample_rate = sc->sample_rate;
+ av_set_pts_info(st, 64, 1, sc->sample_rate);
+ if (gxf_find_lines_index(sc) < 0)
+ sc->lines_index = -1;
+ sc->sample_size = st->codec->bit_rate;
+ sc->fields = 2; /* interlaced */
+ switch (sc->codec->codec_id) {
+ case CODEC_ID_MPEG2VIDEO:
+ sc->first_gop_closed = -1;
+ sc->track_type = 4;
+ gxf->mpeg_tracks++;
+ gxf->flags |= 0x00008000;
+ break;
+ case CODEC_ID_DVVIDEO:
+ if (sc->codec->pix_fmt == PIX_FMT_YUV422P) {
+ sc->media_type += 2;
+ sc->track_type = 6;
+ gxf->flags |= 0x00002000;
+ } else {
+ sc->track_type = 5;
+ gxf->flags |= 0x00001000;
+ }
+ break;
+ default:
+ av_log(s, AV_LOG_ERROR, "video codec not supported\n");
+ return -1;
+ }
+ }
+ }
+ gxf_write_map_packet(pb, gxf);
+ //gxf_write_flt_packet(pb, gxf);
+ gxf_write_umf_packet(pb, gxf);
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int gxf_write_eos_packet(ByteIOContext *pb, GXFContext *ctx)
+{
+ offset_t pos = url_ftell(pb);
+
+ gxf_write_packet_header(pb, PKT_EOS);
+ return updatePacketSize(pb, pos);
+}
+
+static int gxf_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ GXFContext *gxf = s->priv_data;
+ offset_t end;
+ int i;
+
+ for (i = 0; i < s->nb_streams; ++i) {
+ if (s->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
+ av_fifo_free(&gxf->streams[i].audio_buffer);
+ }
+ if (s->streams[i]->codec->frame_number > gxf->nb_frames)
+ gxf->nb_frames = 2 * s->streams[i]->codec->frame_number;
+ }
+
+ gxf_write_eos_packet(pb, gxf);
+ end = url_ftell(pb);
+ url_fseek(pb, 0, SEEK_SET);
+ /* overwrite map and umf packets with new values */
+ gxf_write_map_packet(pb, gxf);
+ //gxf_write_flt_packet(pb, gxf);
+ gxf_write_umf_packet(pb, gxf);
+ url_fseek(pb, end, SEEK_SET);
+ return 0;
+}
+
+static int gxf_parse_mpeg_frame(GXFStreamContext *sc, const uint8_t *buf, int size)
+{
+ uint32_t c=-1;
+ int i;
+ for(i=0; i<size-4 && c!=0x100; i++){
+ c = (c<<8) + buf[i];
+ if(c == 0x1B8 && sc->first_gop_closed == -1) /* GOP start code */
+ sc->first_gop_closed= (buf[i+4]>>6)&1;
+ }
+ return (buf[i+1]>>3)&7;
+}
+
+static int gxf_write_media_preamble(ByteIOContext *pb, GXFContext *ctx, AVPacket *pkt, int size)
+{
+ GXFStreamContext *sc = &ctx->streams[pkt->stream_index];
+ int64_t dts = av_rescale(pkt->dts, ctx->sample_rate, sc->sample_rate);
+
+ put_byte(pb, sc->media_type);
+ put_byte(pb, sc->index);
+ put_be32(pb, dts);
+ if (sc->codec->codec_type == CODEC_TYPE_AUDIO) {
+ put_be16(pb, 0);
+ put_be16(pb, size / 2);
+ } else if (sc->codec->codec_id == CODEC_ID_MPEG2VIDEO) {
+ int frame_type = gxf_parse_mpeg_frame(sc, pkt->data, pkt->size);
+ if (frame_type == FF_I_TYPE) {
+ put_byte(pb, 0x0d);
+ sc->iframes++;
+ } else if (frame_type == FF_B_TYPE) {
+ put_byte(pb, 0x0f);
+ sc->bframes++;
+ } else {
+ put_byte(pb, 0x0e);
+ sc->pframes++;
+ }
+ put_be24(pb, size);
+ } else if (sc->codec->codec_id == CODEC_ID_DVVIDEO) {
+ put_byte(pb, size / 4096);
+ put_be24(pb, 0);
+ } else
+ put_be32(pb, size);
+ put_be32(pb, dts);
+ put_byte(pb, 1); /* flags */
+ put_byte(pb, 0); /* reserved */
+ return 16;
+}
+
+static int gxf_write_media_packet(ByteIOContext *pb, GXFContext *ctx, AVPacket *pkt)
+{
+ GXFStreamContext *sc = &ctx->streams[pkt->stream_index];
+ offset_t pos = url_ftell(pb);
+ int padding = 0;
+
+ gxf_write_packet_header(pb, PKT_MEDIA);
+ if (sc->codec->codec_id == CODEC_ID_MPEG2VIDEO && pkt->size % 4) /* MPEG-2 frames must be padded */
+ padding = 4 - pkt->size % 4;
+ else if (sc->codec->codec_type == CODEC_TYPE_AUDIO)
+ padding = GXF_AUDIO_PACKET_SIZE - pkt->size;
+ gxf_write_media_preamble(pb, ctx, pkt, pkt->size + padding);
+ put_buffer(pb, pkt->data, pkt->size);
+ gxf_write_padding(pb, padding);
+ return updatePacketSize(pb, pos);
+}
+
+static int gxf_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ GXFContext *gxf = s->priv_data;
+
+ gxf_write_media_packet(&s->pb, gxf, pkt);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int gxf_new_audio_packet(GXFContext *gxf, GXFStreamContext *sc, AVPacket *pkt, int flush)
+{
+ int size = flush ? av_fifo_size(&sc->audio_buffer) : GXF_AUDIO_PACKET_SIZE;
+
+ if (!size)
+ return 0;
+ av_new_packet(pkt, size);
+ av_fifo_read(&sc->audio_buffer, pkt->data, size);
+ pkt->stream_index = sc->index;
+ pkt->dts = sc->current_dts;
+ sc->current_dts += size / 2; /* we only support 16 bit pcm mono for now */
+ return size;
+}
+
+static int gxf_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush)
+{
+ GXFContext *gxf = s->priv_data;
+ AVPacket new_pkt;
+ int i;
+
+ for (i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ GXFStreamContext *sc = &gxf->streams[i];
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ if (pkt && pkt->stream_index == i) {
+ av_fifo_write(&sc->audio_buffer, pkt->data, pkt->size);
+ pkt = NULL;
+ }
+ if (flush || av_fifo_size(&sc->audio_buffer) >= GXF_AUDIO_PACKET_SIZE) {
+ if (!pkt && gxf_new_audio_packet(gxf, sc, &new_pkt, flush) > 0) {
+ pkt = &new_pkt;
+ break; /* add pkt right now into list */
+ }
+ }
+ } else if (pkt) {
+ /* adjust dts if negative */
+ if (pkt->dts < 0 && !sc->dts_delay) {
+ /* XXX: rescale if codec time base is different from stream time base */
+ sc->dts_delay = av_rescale_q(pkt->dts, st->codec->time_base, st->time_base);
+ pkt->dts = sc->dts_delay; /* set to 0 */
+ }
+ pkt->dts -= sc->dts_delay;
+ }
+ }
+ return av_interleave_packet_per_dts(s, out, pkt, flush);
+}
+
+AVOutputFormat gxf_muxer = {
+ "gxf",
+ "GXF format",
+ NULL,
+ "gxf",
+ sizeof(GXFContext),
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_MPEG2VIDEO,
+ gxf_write_header,
+ gxf_write_packet,
+ gxf_write_trailer,
+ 0,
+ NULL,
+ gxf_interleave_packet,
+};
diff --git a/contrib/ffmpeg/libavformat/http.c b/contrib/ffmpeg/libavformat/http.c
new file mode 100644
index 000000000..34dd5031a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/http.c
@@ -0,0 +1,289 @@
+/*
+ * HTTP protocol for ffmpeg client
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+
+#include "base64.h"
+
+/* XXX: POST protocol is not completly implemented because ffmpeg use
+ only a subset of it */
+
+//#define DEBUG
+
+/* used for protocol handling */
+#define BUFFER_SIZE 1024
+#define URL_SIZE 4096
+
+typedef struct {
+ URLContext *hd;
+ unsigned char buffer[BUFFER_SIZE], *buf_ptr, *buf_end;
+ int line_count;
+ int http_code;
+ char location[URL_SIZE];
+} HTTPContext;
+
+static int http_connect(URLContext *h, const char *path, const char *hoststr,
+ const char *auth);
+static int http_write(URLContext *h, uint8_t *buf, int size);
+
+
+/* return non zero if error */
+static int http_open(URLContext *h, const char *uri, int flags)
+{
+ const char *path, *proxy_path;
+ char hostname[1024], hoststr[1024];
+ char auth[1024];
+ char path1[1024];
+ char buf[1024];
+ int port, use_proxy, err;
+ HTTPContext *s;
+ URLContext *hd = NULL;
+
+ h->is_streamed = 1;
+
+ s = av_malloc(sizeof(HTTPContext));
+ if (!s) {
+ return -ENOMEM;
+ }
+ h->priv_data = s;
+
+ proxy_path = getenv("http_proxy");
+ use_proxy = (proxy_path != NULL) && !getenv("no_proxy") &&
+ strstart(proxy_path, "http://", NULL);
+
+ /* fill the dest addr */
+ redo:
+ /* needed in any case to build the host string */
+ url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
+ path1, sizeof(path1), uri);
+ if (port > 0) {
+ snprintf(hoststr, sizeof(hoststr), "%s:%d", hostname, port);
+ } else {
+ pstrcpy(hoststr, sizeof(hoststr), hostname);
+ }
+
+ if (use_proxy) {
+ url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
+ NULL, 0, proxy_path);
+ path = uri;
+ } else {
+ if (path1[0] == '\0')
+ path = "/";
+ else
+ path = path1;
+ }
+ if (port < 0)
+ port = 80;
+
+ snprintf(buf, sizeof(buf), "tcp://%s:%d", hostname, port);
+ err = url_open(&hd, buf, URL_RDWR);
+ if (err < 0)
+ goto fail;
+
+ s->hd = hd;
+ if (http_connect(h, path, hoststr, auth) < 0)
+ goto fail;
+ if (s->http_code == 303 && s->location[0] != '\0') {
+ /* url moved, get next */
+ uri = s->location;
+ url_close(hd);
+ goto redo;
+ }
+ return 0;
+ fail:
+ if (hd)
+ url_close(hd);
+ av_free(s);
+ return AVERROR_IO;
+}
+
+static int http_getc(HTTPContext *s)
+{
+ int len;
+ if (s->buf_ptr >= s->buf_end) {
+ len = url_read(s->hd, s->buffer, BUFFER_SIZE);
+ if (len < 0) {
+ return AVERROR_IO;
+ } else if (len == 0) {
+ return -1;
+ } else {
+ s->buf_ptr = s->buffer;
+ s->buf_end = s->buffer + len;
+ }
+ }
+ return *s->buf_ptr++;
+}
+
+static int process_line(HTTPContext *s, char *line, int line_count)
+{
+ char *tag, *p;
+
+ /* end of header */
+ if (line[0] == '\0')
+ return 0;
+
+ p = line;
+ if (line_count == 0) {
+ while (!isspace(*p) && *p != '\0')
+ p++;
+ while (isspace(*p))
+ p++;
+ s->http_code = strtol(p, NULL, 10);
+#ifdef DEBUG
+ printf("http_code=%d\n", s->http_code);
+#endif
+ } else {
+ while (*p != '\0' && *p != ':')
+ p++;
+ if (*p != ':')
+ return 1;
+
+ *p = '\0';
+ tag = line;
+ p++;
+ while (isspace(*p))
+ p++;
+ if (!strcmp(tag, "Location")) {
+ strcpy(s->location, p);
+ }
+ }
+ return 1;
+}
+
+static int http_connect(URLContext *h, const char *path, const char *hoststr,
+ const char *auth)
+{
+ HTTPContext *s = h->priv_data;
+ int post, err, ch;
+ char line[1024], *q;
+ char *auth_b64;
+
+
+ /* send http header */
+ post = h->flags & URL_WRONLY;
+
+ auth_b64 = av_base64_encode((uint8_t *)auth, strlen(auth));
+ snprintf(s->buffer, sizeof(s->buffer),
+ "%s %s HTTP/1.0\r\n"
+ "User-Agent: %s\r\n"
+ "Accept: */*\r\n"
+ "Host: %s\r\n"
+ "Authorization: Basic %s\r\n"
+ "\r\n",
+ post ? "POST" : "GET",
+ path,
+ LIBAVFORMAT_IDENT,
+ hoststr,
+ auth_b64);
+
+ av_freep(&auth_b64);
+ if (http_write(h, s->buffer, strlen(s->buffer)) < 0)
+ return AVERROR_IO;
+
+ /* init input buffer */
+ s->buf_ptr = s->buffer;
+ s->buf_end = s->buffer;
+ s->line_count = 0;
+ s->location[0] = '\0';
+ if (post) {
+ sleep(1);
+ return 0;
+ }
+
+ /* wait for header */
+ q = line;
+ for(;;) {
+ ch = http_getc(s);
+ if (ch < 0)
+ return AVERROR_IO;
+ if (ch == '\n') {
+ /* process line */
+ if (q > line && q[-1] == '\r')
+ q--;
+ *q = '\0';
+#ifdef DEBUG
+ printf("header='%s'\n", line);
+#endif
+ err = process_line(s, line, s->line_count);
+ if (err < 0)
+ return err;
+ if (err == 0)
+ return 0;
+ s->line_count++;
+ q = line;
+ } else {
+ if ((q - line) < sizeof(line) - 1)
+ *q++ = ch;
+ }
+ }
+}
+
+
+static int http_read(URLContext *h, uint8_t *buf, int size)
+{
+ HTTPContext *s = h->priv_data;
+ int len;
+
+ /* read bytes from input buffer first */
+ len = s->buf_end - s->buf_ptr;
+ if (len > 0) {
+ if (len > size)
+ len = size;
+ memcpy(buf, s->buf_ptr, len);
+ s->buf_ptr += len;
+ } else {
+ len = url_read(s->hd, buf, size);
+ }
+ return len;
+}
+
+/* used only when posting data */
+static int http_write(URLContext *h, uint8_t *buf, int size)
+{
+ HTTPContext *s = h->priv_data;
+ return url_write(s->hd, buf, size);
+}
+
+static int http_close(URLContext *h)
+{
+ HTTPContext *s = h->priv_data;
+ url_close(s->hd);
+ av_free(s);
+ return 0;
+}
+
+URLProtocol http_protocol = {
+ "http",
+ http_open,
+ http_read,
+ http_write,
+ NULL, /* seek */
+ http_close,
+};
diff --git a/contrib/ffmpeg/libavformat/idcin.c b/contrib/ffmpeg/libavformat/idcin.c
new file mode 100644
index 000000000..48d1e250d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/idcin.c
@@ -0,0 +1,301 @@
+/*
+ * Id Quake II CIN File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file idcin.c
+ * Id Quake II CIN file demuxer by Mike Melanson (melanson@pcisys.net)
+ * For more information about the Id CIN format, visit:
+ * http://www.csse.monash.edu.au/~timf/
+ *
+ * CIN is a somewhat quirky and ill-defined format. Here are some notes
+ * for anyone trying to understand the technical details of this format:
+ *
+ * The format has no definite file signature. This is problematic for a
+ * general-purpose media player that wants to automatically detect file
+ * types. However, a CIN file does start with 5 32-bit numbers that
+ * specify audio and video parameters. This demuxer gets around the lack
+ * of file signature by performing sanity checks on those parameters.
+ * Probabalistically, this is a reasonable solution since the number of
+ * valid combinations of the 5 parameters is a very small subset of the
+ * total 160-bit number space.
+ *
+ * Refer to the function idcin_probe() for the precise A/V parameters
+ * that this demuxer allows.
+ *
+ * Next, each audio and video frame has a duration of 1/14 sec. If the
+ * audio sample rate is a multiple of the common frequency 22050 Hz it will
+ * divide evenly by 14. However, if the sample rate is 11025 Hz:
+ * 11025 (samples/sec) / 14 (frames/sec) = 787.5 (samples/frame)
+ * The way the CIN stores audio in this case is by storing 787 sample
+ * frames in the first audio frame and 788 sample frames in the second
+ * audio frame. Therefore, the total number of bytes in an audio frame
+ * is given as:
+ * audio frame #0: 787 * (bytes/sample) * (# channels) bytes in frame
+ * audio frame #1: 788 * (bytes/sample) * (# channels) bytes in frame
+ * audio frame #2: 787 * (bytes/sample) * (# channels) bytes in frame
+ * audio frame #3: 788 * (bytes/sample) * (# channels) bytes in frame
+ *
+ * Finally, not all Id CIN creation tools agree on the resolution of the
+ * color palette, apparently. Some creation tools specify red, green, and
+ * blue palette components in terms of 6-bit VGA color DAC values which
+ * range from 0..63. Other tools specify the RGB components as full 8-bit
+ * values that range from 0..255. Since there are no markers in the file to
+ * differentiate between the two variants, this demuxer uses the following
+ * heuristic:
+ * - load the 768 palette bytes from disk
+ * - assume that they will need to be shifted left by 2 bits to
+ * transform them from 6-bit values to 8-bit values
+ * - scan through all 768 palette bytes
+ * - if any bytes exceed 63, do not shift the bytes at all before
+ * transmitting them to the video decoder
+ */
+
+#include "avformat.h"
+
+#define HUFFMAN_TABLE_SIZE (64 * 1024)
+#define FRAME_PTS_INC (90000 / 14)
+
+typedef struct IdcinDemuxContext {
+ int video_stream_index;
+ int audio_stream_index;
+ int audio_chunk_size1;
+ int audio_chunk_size2;
+
+ /* demux state variables */
+ int current_audio_chunk;
+ int next_chunk_is_video;
+ int audio_present;
+
+ int64_t pts;
+
+ AVPaletteControl palctrl;
+} IdcinDemuxContext;
+
+static int idcin_probe(AVProbeData *p)
+{
+ unsigned int number;
+
+ /*
+ * This is what you could call a "probabilistic" file check: Id CIN
+ * files don't have a definite file signature. In lieu of such a marker,
+ * perform sanity checks on the 5 32-bit header fields:
+ * width, height: greater than 0, less than or equal to 1024
+ * audio sample rate: greater than or equal to 8000, less than or
+ * equal to 48000, or 0 for no audio
+ * audio sample width (bytes/sample): 0 for no audio, or 1 or 2
+ * audio channels: 0 for no audio, or 1 or 2
+ */
+
+ /* cannot proceed without 20 bytes */
+ if (p->buf_size < 20)
+ return 0;
+
+ /* check the video width */
+ number = LE_32(&p->buf[0]);
+ if ((number == 0) || (number > 1024))
+ return 0;
+
+ /* check the video height */
+ number = LE_32(&p->buf[4]);
+ if ((number == 0) || (number > 1024))
+ return 0;
+
+ /* check the audio sample rate */
+ number = LE_32(&p->buf[8]);
+ if ((number != 0) && ((number < 8000) | (number > 48000)))
+ return 0;
+
+ /* check the audio bytes/sample */
+ number = LE_32(&p->buf[12]);
+ if (number > 2)
+ return 0;
+
+ /* check the audio channels */
+ number = LE_32(&p->buf[16]);
+ if (number > 2)
+ return 0;
+
+ /* return half certainly since this check is a bit sketchy */
+ return AVPROBE_SCORE_MAX / 2;
+}
+
+static int idcin_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ IdcinDemuxContext *idcin = (IdcinDemuxContext *)s->priv_data;
+ AVStream *st;
+ unsigned int width, height;
+ unsigned int sample_rate, bytes_per_sample, channels;
+
+ /* get the 5 header parameters */
+ width = get_le32(pb);
+ height = get_le32(pb);
+ sample_rate = get_le32(pb);
+ bytes_per_sample = get_le32(pb);
+ channels = get_le32(pb);
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ idcin->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_IDCIN;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = width;
+ st->codec->height = height;
+
+ /* load up the Huffman tables into extradata */
+ st->codec->extradata_size = HUFFMAN_TABLE_SIZE;
+ st->codec->extradata = av_malloc(HUFFMAN_TABLE_SIZE);
+ if (get_buffer(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) !=
+ HUFFMAN_TABLE_SIZE)
+ return AVERROR_IO;
+ /* save a reference in order to transport the palette */
+ st->codec->palctrl = &idcin->palctrl;
+
+ /* if sample rate is 0, assume no audio */
+ if (sample_rate) {
+ idcin->audio_present = 1;
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ idcin->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = 1;
+ st->codec->channels = channels;
+ st->codec->sample_rate = sample_rate;
+ st->codec->bits_per_sample = bytes_per_sample * 8;
+ st->codec->bit_rate = sample_rate * bytes_per_sample * 8 * channels;
+ st->codec->block_align = bytes_per_sample * channels;
+ if (bytes_per_sample == 1)
+ st->codec->codec_id = CODEC_ID_PCM_U8;
+ else
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+
+ if (sample_rate % 14 != 0) {
+ idcin->audio_chunk_size1 = (sample_rate / 14) *
+ bytes_per_sample * channels;
+ idcin->audio_chunk_size2 = (sample_rate / 14 + 1) *
+ bytes_per_sample * channels;
+ } else {
+ idcin->audio_chunk_size1 = idcin->audio_chunk_size2 =
+ (sample_rate / 14) * bytes_per_sample * channels;
+ }
+ idcin->current_audio_chunk = 0;
+ } else
+ idcin->audio_present = 1;
+
+ idcin->next_chunk_is_video = 1;
+ idcin->pts = 0;
+
+ return 0;
+}
+
+static int idcin_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ int ret;
+ unsigned int command;
+ unsigned int chunk_size;
+ IdcinDemuxContext *idcin = (IdcinDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int i;
+ int palette_scale;
+ unsigned char r, g, b;
+ unsigned char palette_buffer[768];
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+
+ if (idcin->next_chunk_is_video) {
+ command = get_le32(pb);
+ if (command == 2) {
+ return AVERROR_IO;
+ } else if (command == 1) {
+ /* trigger a palette change */
+ idcin->palctrl.palette_changed = 1;
+ if (get_buffer(pb, palette_buffer, 768) != 768)
+ return AVERROR_IO;
+ /* scale the palette as necessary */
+ palette_scale = 2;
+ for (i = 0; i < 768; i++)
+ if (palette_buffer[i] > 63) {
+ palette_scale = 0;
+ break;
+ }
+
+ for (i = 0; i < 256; i++) {
+ r = palette_buffer[i * 3 ] << palette_scale;
+ g = palette_buffer[i * 3 + 1] << palette_scale;
+ b = palette_buffer[i * 3 + 2] << palette_scale;
+ idcin->palctrl.palette[i] = (r << 16) | (g << 8) | (b);
+ }
+ }
+
+ chunk_size = get_le32(pb);
+ /* skip the number of decoded bytes (always equal to width * height) */
+ url_fseek(pb, 4, SEEK_CUR);
+ chunk_size -= 4;
+ ret= av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ return AVERROR_IO;
+ pkt->stream_index = idcin->video_stream_index;
+ pkt->pts = idcin->pts;
+ } else {
+ /* send out the audio chunk */
+ if (idcin->current_audio_chunk)
+ chunk_size = idcin->audio_chunk_size2;
+ else
+ chunk_size = idcin->audio_chunk_size1;
+ ret= av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ return AVERROR_IO;
+ pkt->stream_index = idcin->audio_stream_index;
+ pkt->pts = idcin->pts;
+
+ idcin->current_audio_chunk ^= 1;
+ idcin->pts += FRAME_PTS_INC;
+ }
+
+ if (idcin->audio_present)
+ idcin->next_chunk_is_video ^= 1;
+
+ return ret;
+}
+
+static int idcin_read_close(AVFormatContext *s)
+{
+
+ return 0;
+}
+
+AVInputFormat idcin_demuxer = {
+ "idcin",
+ "Id CIN format",
+ sizeof(IdcinDemuxContext),
+ idcin_probe,
+ idcin_read_header,
+ idcin_read_packet,
+ idcin_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/idroq.c b/contrib/ffmpeg/libavformat/idroq.c
new file mode 100644
index 000000000..419696c9a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/idroq.c
@@ -0,0 +1,291 @@
+/*
+ * Id RoQ (.roq) File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file idroq.c
+ * Id RoQ format file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the .roq file format, visit:
+ * http://www.csse.monash.edu.au/~timf/
+ */
+
+#include "avformat.h"
+
+#define RoQ_MAGIC_NUMBER 0x1084
+#define RoQ_CHUNK_PREAMBLE_SIZE 8
+#define RoQ_AUDIO_SAMPLE_RATE 22050
+#define RoQ_CHUNKS_TO_SCAN 30
+
+#define RoQ_INFO 0x1001
+#define RoQ_QUAD_CODEBOOK 0x1002
+#define RoQ_QUAD_VQ 0x1011
+#define RoQ_SOUND_MONO 0x1020
+#define RoQ_SOUND_STEREO 0x1021
+
+typedef struct RoqDemuxContext {
+
+ int width;
+ int height;
+ int audio_channels;
+ int framerate;
+ int frame_pts_inc;
+
+ int video_stream_index;
+ int audio_stream_index;
+
+ int64_t video_pts;
+ unsigned int audio_frame_count;
+
+} RoqDemuxContext;
+
+static int roq_probe(AVProbeData *p)
+{
+ if (p->buf_size < 6)
+ return 0;
+
+ if ((LE_16(&p->buf[0]) != RoQ_MAGIC_NUMBER) ||
+ (LE_32(&p->buf[2]) != 0xFFFFFFFF))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int roq_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ RoqDemuxContext *roq = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
+ int i;
+ unsigned int chunk_size;
+ unsigned int chunk_type;
+
+ /* get the main header */
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ roq->framerate = LE_16(&preamble[6]);
+ roq->frame_pts_inc = 90000 / roq->framerate;
+
+ /* init private context parameters */
+ roq->width = roq->height = roq->audio_channels = roq->video_pts =
+ roq->audio_frame_count = 0;
+
+ /* scan the first n chunks searching for A/V parameters */
+ for (i = 0; i < RoQ_CHUNKS_TO_SCAN; i++) {
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+
+ chunk_type = LE_16(&preamble[0]);
+ chunk_size = LE_32(&preamble[2]);
+
+ switch (chunk_type) {
+
+ case RoQ_INFO:
+ /* fetch the width and height; reuse the preamble bytes */
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ roq->width = LE_16(&preamble[0]);
+ roq->height = LE_16(&preamble[2]);
+ break;
+
+ case RoQ_QUAD_CODEBOOK:
+ case RoQ_QUAD_VQ:
+ /* ignore during this scan */
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ break;
+
+ case RoQ_SOUND_MONO:
+ roq->audio_channels = 1;
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ break;
+
+ case RoQ_SOUND_STEREO:
+ roq->audio_channels = 2;
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ break;
+
+ default:
+ av_log(s, AV_LOG_ERROR, " unknown RoQ chunk type (%04X)\n", LE_16(&preamble[0]));
+ return AVERROR_INVALIDDATA;
+ break;
+ }
+
+ /* if all necessary parameters have been gathered, exit early */
+ if ((roq->width && roq->height) && roq->audio_channels)
+ break;
+ }
+
+ /* seek back to the first chunk */
+ url_fseek(pb, RoQ_CHUNK_PREAMBLE_SIZE, SEEK_SET);
+
+ /* initialize the decoders */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ /* set the pts reference (1 pts = 1/90000) */
+ av_set_pts_info(st, 33, 1, 90000);
+ roq->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_ROQ;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = roq->width;
+ st->codec->height = roq->height;
+
+ if (roq->audio_channels) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ roq->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_ROQ_DPCM;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = roq->audio_channels;
+ st->codec->sample_rate = RoQ_AUDIO_SAMPLE_RATE;
+ st->codec->bits_per_sample = 16;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+ }
+
+ return 0;
+}
+
+static int roq_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ RoqDemuxContext *roq = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret = 0;
+ unsigned int chunk_size;
+ unsigned int chunk_type;
+ unsigned int codebook_size;
+ unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
+ int packet_read = 0;
+ offset_t codebook_offset;
+
+ while (!packet_read) {
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+
+ /* get the next chunk preamble */
+ if ((ret = get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+
+ chunk_type = LE_16(&preamble[0]);
+ chunk_size = LE_32(&preamble[2]);
+ if(chunk_size > INT_MAX)
+ return AVERROR_INVALIDDATA;
+
+ switch (chunk_type) {
+
+ case RoQ_INFO:
+ /* don't care about this chunk anymore */
+ url_fseek(pb, RoQ_CHUNK_PREAMBLE_SIZE, SEEK_CUR);
+ break;
+
+ case RoQ_QUAD_CODEBOOK:
+ /* packet needs to contain both this codebook and next VQ chunk */
+ codebook_offset = url_ftell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
+ codebook_size = chunk_size;
+ url_fseek(pb, codebook_size, SEEK_CUR);
+ if (get_buffer(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
+ RoQ_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ chunk_size = LE_32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
+ codebook_size;
+
+ /* rewind */
+ url_fseek(pb, codebook_offset, SEEK_SET);
+
+ /* load up the packet */
+ ret= av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ return AVERROR_IO;
+ pkt->stream_index = roq->video_stream_index;
+ pkt->pts = roq->video_pts;
+
+ roq->video_pts += roq->frame_pts_inc;
+ packet_read = 1;
+ break;
+
+ case RoQ_SOUND_MONO:
+ case RoQ_SOUND_STEREO:
+ case RoQ_QUAD_VQ:
+ /* load up the packet */
+ if (av_new_packet(pkt, chunk_size + RoQ_CHUNK_PREAMBLE_SIZE))
+ return AVERROR_IO;
+ /* copy over preamble */
+ memcpy(pkt->data, preamble, RoQ_CHUNK_PREAMBLE_SIZE);
+
+ if (chunk_type == RoQ_QUAD_VQ) {
+ pkt->stream_index = roq->video_stream_index;
+ pkt->pts = roq->video_pts;
+ roq->video_pts += roq->frame_pts_inc;
+ } else {
+ pkt->stream_index = roq->audio_stream_index;
+ pkt->pts = roq->audio_frame_count;
+ pkt->pts *= 90000;
+ pkt->pts /= RoQ_AUDIO_SAMPLE_RATE;
+ roq->audio_frame_count += (chunk_size / roq->audio_channels);
+ }
+
+ pkt->pos= url_ftell(pb);
+ ret = get_buffer(pb, pkt->data + RoQ_CHUNK_PREAMBLE_SIZE,
+ chunk_size);
+ if (ret != chunk_size)
+ ret = AVERROR_IO;
+
+ packet_read = 1;
+ break;
+
+ default:
+ av_log(s, AV_LOG_ERROR, " unknown RoQ chunk (%04X)\n", chunk_type);
+ return AVERROR_INVALIDDATA;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int roq_read_close(AVFormatContext *s)
+{
+// RoqDemuxContext *roq = (RoqDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+AVInputFormat roq_demuxer = {
+ "RoQ",
+ "Id RoQ format",
+ sizeof(RoqDemuxContext),
+ roq_probe,
+ roq_read_header,
+ roq_read_packet,
+ roq_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/img.c b/contrib/ffmpeg/libavformat/img.c
new file mode 100644
index 000000000..5223c691e
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/img.c
@@ -0,0 +1,400 @@
+/*
+ * Image format
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+typedef struct {
+ int width;
+ int height;
+ int img_first;
+ int img_last;
+ int img_number;
+ int img_count;
+ int img_size;
+ AVImageFormat *img_fmt;
+ int pix_fmt;
+ int is_pipe;
+ char path[1024];
+ /* temporary usage */
+ void *ptr;
+} VideoData;
+
+
+/* return -1 if no image found */
+static int find_image_range(int *pfirst_index, int *plast_index,
+ const char *path)
+{
+ char buf[1024];
+ int range, last_index, range1, first_index;
+
+ /* find the first image */
+ for(first_index = 0; first_index < 5; first_index++) {
+ if (av_get_frame_filename(buf, sizeof(buf), path, first_index) < 0)
+ goto fail;
+ if (url_exist(buf))
+ break;
+ }
+ if (first_index == 5)
+ goto fail;
+
+ /* find the last image */
+ last_index = first_index;
+ for(;;) {
+ range = 0;
+ for(;;) {
+ if (!range)
+ range1 = 1;
+ else
+ range1 = 2 * range;
+ if (av_get_frame_filename(buf, sizeof(buf), path,
+ last_index + range1) < 0)
+ goto fail;
+ if (!url_exist(buf))
+ break;
+ range = range1;
+ /* just in case... */
+ if (range >= (1 << 30))
+ goto fail;
+ }
+ /* we are sure than image last_index + range exists */
+ if (!range)
+ break;
+ last_index += range;
+ }
+ *pfirst_index = first_index;
+ *plast_index = last_index;
+ return 0;
+ fail:
+ return -1;
+}
+
+
+static int image_probe(AVProbeData *p)
+{
+ if (av_filename_number_test(p->filename) && guess_image_format(p->filename))
+ return AVPROBE_SCORE_MAX-1;
+ else
+ return 0;
+}
+
+static int read_header_alloc_cb(void *opaque, AVImageInfo *info)
+{
+ VideoData *s = opaque;
+
+ s->width = info->width;
+ s->height = info->height;
+ s->pix_fmt = info->pix_fmt;
+ /* stop image reading but no error */
+ return 1;
+}
+
+static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ int ret, first_index, last_index;
+ char buf[1024];
+ ByteIOContext pb1, *f = &pb1;
+ AVStream *st;
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+
+ if (ap->image_format)
+ s->img_fmt = ap->image_format;
+
+ pstrcpy(s->path, sizeof(s->path), s1->filename);
+ s->img_number = 0;
+ s->img_count = 0;
+
+ /* find format */
+ if (s1->iformat->flags & AVFMT_NOFILE)
+ s->is_pipe = 0;
+ else
+ s->is_pipe = 1;
+
+ if (!ap->time_base.num) {
+ st->codec->time_base= (AVRational){1,25};
+ } else {
+ st->codec->time_base= ap->time_base;
+ }
+
+ if (!s->is_pipe) {
+ if (find_image_range(&first_index, &last_index, s->path) < 0)
+ goto fail;
+ s->img_first = first_index;
+ s->img_last = last_index;
+ s->img_number = first_index;
+ /* compute duration */
+ st->start_time = 0;
+ st->duration = last_index - first_index + 1;
+ if (av_get_frame_filename(buf, sizeof(buf), s->path, s->img_number) < 0)
+ goto fail;
+ if (url_fopen(f, buf, URL_RDONLY) < 0)
+ goto fail;
+ } else {
+ f = &s1->pb;
+ }
+
+ ret = av_read_image(f, s1->filename, s->img_fmt, read_header_alloc_cb, s);
+ if (ret < 0)
+ goto fail1;
+
+ if (!s->is_pipe) {
+ url_fclose(f);
+ } else {
+ url_fseek(f, 0, SEEK_SET);
+ }
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = s->width;
+ st->codec->height = s->height;
+ st->codec->pix_fmt = s->pix_fmt;
+ s->img_size = avpicture_get_size(s->pix_fmt, (s->width+15)&(~15), (s->height+15)&(~15));
+
+ return 0;
+ fail1:
+ if (!s->is_pipe)
+ url_fclose(f);
+ fail:
+ return AVERROR_IO;
+}
+
+static int read_packet_alloc_cb(void *opaque, AVImageInfo *info)
+{
+ VideoData *s = opaque;
+
+ if (info->width != s->width ||
+ info->height != s->height)
+ return -1;
+ avpicture_fill(&info->pict, s->ptr, info->pix_fmt, (info->width+15)&(~15), (info->height+15)&(~15));
+ return 0;
+}
+
+static int img_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+ char filename[1024];
+ int ret;
+ ByteIOContext f1, *f;
+
+ if (!s->is_pipe) {
+ /* loop over input */
+ if (s1->loop_input && s->img_number > s->img_last) {
+ s->img_number = s->img_first;
+ }
+ if (av_get_frame_filename(filename, sizeof(filename),
+ s->path, s->img_number) < 0)
+ return AVERROR_IO;
+ f = &f1;
+ if (url_fopen(f, filename, URL_RDONLY) < 0)
+ return AVERROR_IO;
+ } else {
+ f = &s1->pb;
+ if (url_feof(f))
+ return AVERROR_IO;
+ }
+
+ av_new_packet(pkt, s->img_size);
+ pkt->stream_index = 0;
+
+ s->ptr = pkt->data;
+ ret = av_read_image(f, filename, s->img_fmt, read_packet_alloc_cb, s);
+ if (!s->is_pipe) {
+ url_fclose(f);
+ }
+
+ if (ret < 0) {
+ av_free_packet(pkt);
+ return AVERROR_IO; /* signal EOF */
+ } else {
+ /* XXX: computing this pts is not necessary as it is done in
+ the generic code too */
+ pkt->pts = av_rescale((int64_t)s->img_count * s1->streams[0]->codec->time_base.num, s1->streams[0]->time_base.den, s1->streams[0]->codec->time_base.den) / s1->streams[0]->time_base.num;
+ s->img_count++;
+ s->img_number++;
+ return 0;
+ }
+}
+
+static int img_read_close(AVFormatContext *s1)
+{
+ return 0;
+}
+
+/******************************************************/
+/* image output */
+
+static int img_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
+{
+ VideoData *img = s->priv_data;
+ AVStream *st;
+ AVImageFormat *img_fmt;
+ int i;
+
+ /* find output image format */
+ if (ap->image_format) {
+ img_fmt = ap->image_format;
+ } else {
+ img_fmt = guess_image_format(s->filename);
+ }
+ if (!img_fmt)
+ return -1;
+
+ if (s->nb_streams != 1)
+ return -1;
+
+ st = s->streams[0];
+ /* we select the first matching format */
+ for(i=0;i<PIX_FMT_NB;i++) {
+ if (img_fmt->supported_pixel_formats & (1 << i))
+ break;
+ }
+ if (i >= PIX_FMT_NB)
+ return -1;
+ img->img_fmt = img_fmt;
+ img->pix_fmt = i;
+ st->codec->pix_fmt = img->pix_fmt;
+ return 0;
+}
+
+static int img_write_header(AVFormatContext *s)
+{
+ VideoData *img = s->priv_data;
+
+ img->img_number = 1;
+ pstrcpy(img->path, sizeof(img->path), s->filename);
+
+ /* find format */
+ if (s->oformat->flags & AVFMT_NOFILE)
+ img->is_pipe = 0;
+ else
+ img->is_pipe = 1;
+
+ return 0;
+}
+
+static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ VideoData *img = s->priv_data;
+ AVStream *st = s->streams[pkt->stream_index];
+ ByteIOContext pb1, *pb;
+ AVPicture *picture;
+ int width, height, ret;
+ char filename[1024];
+ AVImageInfo info;
+
+ width = st->codec->width;
+ height = st->codec->height;
+
+ picture = (AVPicture *)pkt->data;
+
+ if (!img->is_pipe) {
+ if (av_get_frame_filename(filename, sizeof(filename),
+ img->path, img->img_number) < 0)
+ return AVERROR_IO;
+ pb = &pb1;
+ if (url_fopen(pb, filename, URL_WRONLY) < 0)
+ return AVERROR_IO;
+ } else {
+ pb = &s->pb;
+ }
+ info.width = width;
+ info.height = height;
+ info.pix_fmt = st->codec->pix_fmt;
+ info.interleaved = 0; /* FIXME: there should be a way to set it right */
+ info.pict = *picture;
+ ret = av_write_image(pb, img->img_fmt, &info);
+ if (!img->is_pipe) {
+ url_fclose(pb);
+ }
+
+ img->img_number++;
+ return 0;
+}
+
+static int img_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+/* input */
+#ifdef CONFIG_IMAGE_DEMUXER
+AVInputFormat image_demuxer = {
+ "image",
+ "image sequence",
+ sizeof(VideoData),
+ image_probe,
+ img_read_header,
+ img_read_packet,
+ img_read_close,
+ NULL,
+ NULL,
+ AVFMT_NOFILE | AVFMT_NEEDNUMBER,
+};
+#endif
+#ifdef CONFIG_IMAGEPIPE_DEMUXER
+AVInputFormat imagepipe_demuxer = {
+ "imagepipe",
+ "piped image sequence",
+ sizeof(VideoData),
+ NULL, /* no probe */
+ img_read_header,
+ img_read_packet,
+ img_read_close,
+ NULL,
+};
+#endif
+
+/* output */
+#ifdef CONFIG_IMAGE_MUXER
+AVOutputFormat image_muxer = {
+ "image",
+ "image sequence",
+ "",
+ "",
+ sizeof(VideoData),
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ img_write_header,
+ img_write_packet,
+ img_write_trailer,
+ AVFMT_NOFILE | AVFMT_NEEDNUMBER | AVFMT_RAWPICTURE,
+ img_set_parameters,
+};
+#endif
+#ifdef CONFIG_IMAGEPIPE_MUXER
+AVOutputFormat imagepipe_muxer = {
+ "imagepipe",
+ "piped image sequence",
+ "",
+ "",
+ sizeof(VideoData),
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ img_write_header,
+ img_write_packet,
+ img_write_trailer,
+ AVFMT_RAWPICTURE,
+ img_set_parameters,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/img2.c b/contrib/ffmpeg/libavformat/img2.c
new file mode 100644
index 000000000..303190ad2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/img2.c
@@ -0,0 +1,425 @@
+/*
+ * Image format
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ * Copyright (c) 2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+typedef struct {
+ int img_first;
+ int img_last;
+ int img_number;
+ int img_count;
+ int is_pipe;
+ char path[1024];
+} VideoData;
+
+typedef struct {
+ enum CodecID id;
+ const char *str;
+} IdStrMap;
+
+static const IdStrMap img_tags[] = {
+ { CODEC_ID_MJPEG , "jpeg"},
+ { CODEC_ID_MJPEG , "jpg"},
+ { CODEC_ID_LJPEG , "ljpg"},
+ { CODEC_ID_PNG , "png"},
+ { CODEC_ID_PPM , "ppm"},
+ { CODEC_ID_PGM , "pgm"},
+ { CODEC_ID_PGMYUV , "pgmyuv"},
+ { CODEC_ID_PBM , "pbm"},
+ { CODEC_ID_PAM , "pam"},
+ { CODEC_ID_MPEG1VIDEO, "mpg1-img"},
+ { CODEC_ID_MPEG2VIDEO, "mpg2-img"},
+ { CODEC_ID_MPEG4 , "mpg4-img"},
+ { CODEC_ID_FFV1 , "ffv1-img"},
+ { CODEC_ID_RAWVIDEO , "y"},
+ { CODEC_ID_BMP , "bmp"},
+ { CODEC_ID_GIF , "gif"},
+ { CODEC_ID_TARGA , "tga"},
+ { CODEC_ID_TIFF , "tiff"},
+ {0, NULL}
+};
+
+static int sizes[][2] = {
+ { 640, 480 },
+ { 720, 480 },
+ { 720, 576 },
+ { 352, 288 },
+ { 352, 240 },
+ { 160, 128 },
+ { 512, 384 },
+ { 640, 352 },
+ { 640, 240 },
+};
+
+static int infer_size(int *width_ptr, int *height_ptr, int size)
+{
+ int i;
+
+ for(i=0;i<sizeof(sizes)/sizeof(sizes[0]);i++) {
+ if ((sizes[i][0] * sizes[i][1]) == size) {
+ *width_ptr = sizes[i][0];
+ *height_ptr = sizes[i][1];
+ return 0;
+ }
+ }
+ return -1;
+}
+static enum CodecID av_str2id(const IdStrMap *tags, const char *str)
+{
+ str= strrchr(str, '.');
+ if(!str) return CODEC_ID_NONE;
+ str++;
+
+ while (tags->id) {
+ int i;
+ for(i=0; toupper(tags->str[i]) == toupper(str[i]); i++){
+ if(tags->str[i]==0 && str[i]==0)
+ return tags->id;
+ }
+
+ tags++;
+ }
+ return CODEC_ID_NONE;
+}
+
+/* return -1 if no image found */
+static int find_image_range(int *pfirst_index, int *plast_index,
+ const char *path)
+{
+ char buf[1024];
+ int range, last_index, range1, first_index;
+
+ /* find the first image */
+ for(first_index = 0; first_index < 5; first_index++) {
+ if (av_get_frame_filename(buf, sizeof(buf), path, first_index) < 0){
+ *pfirst_index =
+ *plast_index = 1;
+ return 0;
+ }
+ if (url_exist(buf))
+ break;
+ }
+ if (first_index == 5)
+ goto fail;
+
+ /* find the last image */
+ last_index = first_index;
+ for(;;) {
+ range = 0;
+ for(;;) {
+ if (!range)
+ range1 = 1;
+ else
+ range1 = 2 * range;
+ if (av_get_frame_filename(buf, sizeof(buf), path,
+ last_index + range1) < 0)
+ goto fail;
+ if (!url_exist(buf))
+ break;
+ range = range1;
+ /* just in case... */
+ if (range >= (1 << 30))
+ goto fail;
+ }
+ /* we are sure than image last_index + range exists */
+ if (!range)
+ break;
+ last_index += range;
+ }
+ *pfirst_index = first_index;
+ *plast_index = last_index;
+ return 0;
+ fail:
+ return -1;
+}
+
+
+static int image_probe(AVProbeData *p)
+{
+ if (av_filename_number_test(p->filename) && av_str2id(img_tags, p->filename))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+enum CodecID av_guess_image2_codec(const char *filename){
+ return av_str2id(img_tags, filename);
+}
+
+static int img_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ VideoData *s = s1->priv_data;
+ int first_index, last_index;
+ AVStream *st;
+
+ s1->ctx_flags |= AVFMTCTX_NOHEADER;
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+
+ pstrcpy(s->path, sizeof(s->path), s1->filename);
+ s->img_number = 0;
+ s->img_count = 0;
+
+ /* find format */
+ if (s1->iformat->flags & AVFMT_NOFILE)
+ s->is_pipe = 0;
+ else{
+ s->is_pipe = 1;
+ st->need_parsing= 1;
+ }
+
+ if (!ap->time_base.num) {
+ av_set_pts_info(st, 60, 1, 25);
+ } else {
+ av_set_pts_info(st, 60, ap->time_base.num, ap->time_base.den);
+ }
+
+ if(ap->width && ap->height){
+ st->codec->width = ap->width;
+ st->codec->height= ap->height;
+ }
+
+ if (!s->is_pipe) {
+ if (find_image_range(&first_index, &last_index, s->path) < 0)
+ return AVERROR_IO;
+ s->img_first = first_index;
+ s->img_last = last_index;
+ s->img_number = first_index;
+ /* compute duration */
+ st->start_time = 0;
+ st->duration = last_index - first_index + 1;
+ }
+
+ if(ap->video_codec_id){
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = ap->video_codec_id;
+ }else if(ap->audio_codec_id){
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = ap->audio_codec_id;
+ }else{
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = av_str2id(img_tags, s->path);
+ }
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO && ap->pix_fmt != PIX_FMT_NONE)
+ st->codec->pix_fmt = ap->pix_fmt;
+
+ return 0;
+}
+
+static int img_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ VideoData *s = s1->priv_data;
+ char filename[1024];
+ int i;
+ int size[3]={0}, ret[3]={0};
+ ByteIOContext f1[3], *f[3]= {&f1[0], &f1[1], &f1[2]};
+ AVCodecContext *codec= s1->streams[0]->codec;
+
+ if (!s->is_pipe) {
+ /* loop over input */
+ if (s1->loop_input && s->img_number > s->img_last) {
+ s->img_number = s->img_first;
+ }
+ if (av_get_frame_filename(filename, sizeof(filename),
+ s->path, s->img_number)<0 && s->img_number > 1)
+ return AVERROR_IO;
+ for(i=0; i<3; i++){
+ if (url_fopen(f[i], filename, URL_RDONLY) < 0)
+ return AVERROR_IO;
+ size[i]= url_fsize(f[i]);
+
+ if(codec->codec_id != CODEC_ID_RAWVIDEO)
+ break;
+ filename[ strlen(filename) - 1 ]= 'U' + i;
+ }
+
+ if(codec->codec_id == CODEC_ID_RAWVIDEO && !codec->width)
+ infer_size(&codec->width, &codec->height, size[0]);
+ } else {
+ f[0] = &s1->pb;
+ if (url_feof(f[0]))
+ return AVERROR_IO;
+ size[0]= 4096;
+ }
+
+ av_new_packet(pkt, size[0] + size[1] + size[2]);
+ pkt->stream_index = 0;
+ pkt->flags |= PKT_FLAG_KEY;
+
+ pkt->size= 0;
+ for(i=0; i<3; i++){
+ if(size[i]){
+ ret[i]= get_buffer(f[i], pkt->data + pkt->size, size[i]);
+ if (!s->is_pipe)
+ url_fclose(f[i]);
+ if(ret[i]>0)
+ pkt->size += ret[i];
+ }
+ }
+
+ if (ret[0] <= 0 || ret[1]<0 || ret[2]<0) {
+ av_free_packet(pkt);
+ return AVERROR_IO; /* signal EOF */
+ } else {
+ s->img_count++;
+ s->img_number++;
+ return 0;
+ }
+}
+
+static int img_read_close(AVFormatContext *s1)
+{
+ return 0;
+}
+
+#ifdef CONFIG_MUXERS
+/******************************************************/
+/* image output */
+
+static int img_write_header(AVFormatContext *s)
+{
+ VideoData *img = s->priv_data;
+
+ img->img_number = 1;
+ pstrcpy(img->path, sizeof(img->path), s->filename);
+
+ /* find format */
+ if (s->oformat->flags & AVFMT_NOFILE)
+ img->is_pipe = 0;
+ else
+ img->is_pipe = 1;
+
+ return 0;
+}
+
+static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ VideoData *img = s->priv_data;
+ ByteIOContext pb1[3], *pb[3]= {&pb1[0], &pb1[1], &pb1[2]};
+ char filename[1024];
+ AVCodecContext *codec= s->streams[ pkt->stream_index ]->codec;
+ int i;
+
+ if (!img->is_pipe) {
+ if (av_get_frame_filename(filename, sizeof(filename),
+ img->path, img->img_number) < 0 && img->img_number>1)
+ return AVERROR_IO;
+ for(i=0; i<3; i++){
+ if (url_fopen(pb[i], filename, URL_WRONLY) < 0)
+ return AVERROR_IO;
+
+ if(codec->codec_id != CODEC_ID_RAWVIDEO)
+ break;
+ filename[ strlen(filename) - 1 ]= 'U' + i;
+ }
+ } else {
+ pb[0] = &s->pb;
+ }
+
+ if(codec->codec_id == CODEC_ID_RAWVIDEO){
+ int ysize = codec->width * codec->height;
+ put_buffer(pb[0], pkt->data , ysize);
+ put_buffer(pb[1], pkt->data + ysize, (pkt->size - ysize)/2);
+ put_buffer(pb[2], pkt->data + ysize +(pkt->size - ysize)/2, (pkt->size - ysize)/2);
+ put_flush_packet(pb[1]);
+ put_flush_packet(pb[2]);
+ url_fclose(pb[1]);
+ url_fclose(pb[2]);
+ }else{
+ put_buffer(pb[0], pkt->data, pkt->size);
+ }
+ put_flush_packet(pb[0]);
+ if (!img->is_pipe) {
+ url_fclose(pb[0]);
+ }
+
+ img->img_number++;
+ return 0;
+}
+
+static int img_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+#endif /* CONFIG_MUXERS */
+
+/* input */
+#ifdef CONFIG_IMAGE2_DEMUXER
+AVInputFormat image2_demuxer = {
+ "image2",
+ "image2 sequence",
+ sizeof(VideoData),
+ image_probe,
+ img_read_header,
+ img_read_packet,
+ img_read_close,
+ NULL,
+ NULL,
+ AVFMT_NOFILE,
+};
+#endif
+#ifdef CONFIG_IMAGE2PIPE_DEMUXER
+AVInputFormat image2pipe_demuxer = {
+ "image2pipe",
+ "piped image2 sequence",
+ sizeof(VideoData),
+ NULL, /* no probe */
+ img_read_header,
+ img_read_packet,
+ img_read_close,
+ NULL,
+};
+#endif
+
+/* output */
+#ifdef CONFIG_IMAGE2_MUXER
+AVOutputFormat image2_muxer = {
+ "image2",
+ "image2 sequence",
+ "",
+ "",
+ sizeof(VideoData),
+ CODEC_ID_NONE,
+ CODEC_ID_MJPEG,
+ img_write_header,
+ img_write_packet,
+ img_write_trailer,
+ AVFMT_NOFILE,
+};
+#endif
+#ifdef CONFIG_IMAGE2PIPE_MUXER
+AVOutputFormat image2pipe_muxer = {
+ "image2pipe",
+ "piped image2 sequence",
+ "",
+ "",
+ sizeof(VideoData),
+ CODEC_ID_NONE,
+ CODEC_ID_MJPEG,
+ img_write_header,
+ img_write_packet,
+ img_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/ipmovie.c b/contrib/ffmpeg/libavformat/ipmovie.c
new file mode 100644
index 000000000..3c0459938
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ipmovie.c
@@ -0,0 +1,625 @@
+/*
+ * Interplay MVE File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file ipmovie.c
+ * Interplay MVE file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * For more information regarding the Interplay MVE file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ * The aforementioned site also contains a command line utility for parsing
+ * IP MVE files so that you can get a good idea of the typical structure of
+ * such files. This demuxer is not the best example to use if you are trying
+ * to write your own as it uses a rather roundabout approach for splitting
+ * up and sending out the chunks.
+ */
+
+#include "avformat.h"
+
+/* debugging support: #define DEBUG_IPMOVIE as non-zero to see extremely
+ * verbose information about the demux process */
+#define DEBUG_IPMOVIE 0
+
+#if DEBUG_IPMOVIE
+#define debug_ipmovie printf
+#else
+static inline void debug_ipmovie(const char *format, ...) { }
+#endif
+
+#define IPMOVIE_SIGNATURE "Interplay MVE File\x1A\0"
+#define IPMOVIE_SIGNATURE_SIZE 20
+#define CHUNK_PREAMBLE_SIZE 4
+#define OPCODE_PREAMBLE_SIZE 4
+
+#define CHUNK_INIT_AUDIO 0x0000
+#define CHUNK_AUDIO_ONLY 0x0001
+#define CHUNK_INIT_VIDEO 0x0002
+#define CHUNK_VIDEO 0x0003
+#define CHUNK_SHUTDOWN 0x0004
+#define CHUNK_END 0x0005
+/* these last types are used internally */
+#define CHUNK_DONE 0xFFFC
+#define CHUNK_NOMEM 0xFFFD
+#define CHUNK_EOF 0xFFFE
+#define CHUNK_BAD 0xFFFF
+
+#define OPCODE_END_OF_STREAM 0x00
+#define OPCODE_END_OF_CHUNK 0x01
+#define OPCODE_CREATE_TIMER 0x02
+#define OPCODE_INIT_AUDIO_BUFFERS 0x03
+#define OPCODE_START_STOP_AUDIO 0x04
+#define OPCODE_INIT_VIDEO_BUFFERS 0x05
+#define OPCODE_UNKNOWN_06 0x06
+#define OPCODE_SEND_BUFFER 0x07
+#define OPCODE_AUDIO_FRAME 0x08
+#define OPCODE_SILENCE_FRAME 0x09
+#define OPCODE_INIT_VIDEO_MODE 0x0A
+#define OPCODE_CREATE_GRADIENT 0x0B
+#define OPCODE_SET_PALETTE 0x0C
+#define OPCODE_SET_PALETTE_COMPRESSED 0x0D
+#define OPCODE_UNKNOWN_0E 0x0E
+#define OPCODE_SET_DECODING_MAP 0x0F
+#define OPCODE_UNKNOWN_10 0x10
+#define OPCODE_VIDEO_DATA 0x11
+#define OPCODE_UNKNOWN_12 0x12
+#define OPCODE_UNKNOWN_13 0x13
+#define OPCODE_UNKNOWN_14 0x14
+#define OPCODE_UNKNOWN_15 0x15
+
+#define PALETTE_COUNT 256
+
+typedef struct IPMVEContext {
+
+ unsigned char *buf;
+ int buf_size;
+
+ float fps;
+ int frame_pts_inc;
+
+ unsigned int video_width;
+ unsigned int video_height;
+ int64_t video_pts;
+
+ unsigned int audio_bits;
+ unsigned int audio_channels;
+ unsigned int audio_sample_rate;
+ unsigned int audio_type;
+ unsigned int audio_frame_count;
+
+ int video_stream_index;
+ int audio_stream_index;
+
+ offset_t audio_chunk_offset;
+ int audio_chunk_size;
+ offset_t video_chunk_offset;
+ int video_chunk_size;
+ offset_t decode_map_chunk_offset;
+ int decode_map_chunk_size;
+
+ offset_t next_chunk_offset;
+
+ AVPaletteControl palette_control;
+
+} IPMVEContext;
+
+static int load_ipmovie_packet(IPMVEContext *s, ByteIOContext *pb,
+ AVPacket *pkt) {
+
+ int chunk_type;
+ int64_t audio_pts = 0;
+
+ if (s->audio_chunk_offset) {
+
+ /* adjust for PCM audio by skipping chunk header */
+ if (s->audio_type != CODEC_ID_INTERPLAY_DPCM) {
+ s->audio_chunk_offset += 6;
+ s->audio_chunk_size -= 6;
+ }
+
+ url_fseek(pb, s->audio_chunk_offset, SEEK_SET);
+ s->audio_chunk_offset = 0;
+
+ /* figure out the audio pts */
+ audio_pts = 90000;
+ audio_pts *= s->audio_frame_count;
+ audio_pts /= s->audio_sample_rate;
+
+ if (s->audio_chunk_size != av_get_packet(pb, pkt, s->audio_chunk_size))
+ return CHUNK_EOF;
+
+ pkt->stream_index = s->audio_stream_index;
+ pkt->pts = audio_pts;
+
+ /* audio frame maintenance */
+ if (s->audio_type != CODEC_ID_INTERPLAY_DPCM)
+ s->audio_frame_count +=
+ (s->audio_chunk_size / s->audio_channels / (s->audio_bits / 8));
+ else
+ s->audio_frame_count +=
+ (s->audio_chunk_size - 6) / s->audio_channels;
+
+ debug_ipmovie("sending audio frame with pts %"PRId64" (%d audio frames)\n",
+ audio_pts, s->audio_frame_count);
+
+ chunk_type = CHUNK_VIDEO;
+
+ } else if (s->decode_map_chunk_offset) {
+
+ /* send both the decode map and the video data together */
+
+ if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size))
+ return CHUNK_NOMEM;
+
+ pkt->pos= s->decode_map_chunk_offset;
+ url_fseek(pb, s->decode_map_chunk_offset, SEEK_SET);
+ s->decode_map_chunk_offset = 0;
+
+ if (get_buffer(pb, pkt->data, s->decode_map_chunk_size) !=
+ s->decode_map_chunk_size) {
+ av_free_packet(pkt);
+ return CHUNK_EOF;
+ }
+
+ url_fseek(pb, s->video_chunk_offset, SEEK_SET);
+ s->video_chunk_offset = 0;
+
+ if (get_buffer(pb, pkt->data + s->decode_map_chunk_size,
+ s->video_chunk_size) != s->video_chunk_size) {
+ av_free_packet(pkt);
+ return CHUNK_EOF;
+ }
+
+ pkt->stream_index = s->video_stream_index;
+ pkt->pts = s->video_pts;
+
+ debug_ipmovie("sending video frame with pts %"PRId64"\n",
+ pkt->pts);
+
+ s->video_pts += s->frame_pts_inc;
+
+ chunk_type = CHUNK_VIDEO;
+
+ } else {
+
+ url_fseek(pb, s->next_chunk_offset, SEEK_SET);
+ chunk_type = CHUNK_DONE;
+
+ }
+
+ return chunk_type;
+}
+
+/* This function loads and processes a single chunk in an IP movie file.
+ * It returns the type of chunk that was processed. */
+static int process_ipmovie_chunk(IPMVEContext *s, ByteIOContext *pb,
+ AVPacket *pkt)
+{
+ unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
+ int chunk_type;
+ int chunk_size;
+ unsigned char opcode_preamble[OPCODE_PREAMBLE_SIZE];
+ unsigned char opcode_type;
+ unsigned char opcode_version;
+ int opcode_size;
+ unsigned char scratch[1024];
+ int i, j;
+ int first_color, last_color;
+ int audio_flags;
+ unsigned char r, g, b;
+
+ /* see if there are any pending packets */
+ chunk_type = load_ipmovie_packet(s, pb, pkt);
+ if ((chunk_type == CHUNK_VIDEO) && (chunk_type != CHUNK_DONE))
+ return chunk_type;
+
+ /* read the next chunk, wherever the file happens to be pointing */
+ if (url_feof(pb))
+ return CHUNK_EOF;
+ if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
+ CHUNK_PREAMBLE_SIZE)
+ return CHUNK_BAD;
+ chunk_size = LE_16(&chunk_preamble[0]);
+ chunk_type = LE_16(&chunk_preamble[2]);
+
+ debug_ipmovie("chunk type 0x%04X, 0x%04X bytes: ", chunk_type, chunk_size);
+
+ switch (chunk_type) {
+
+ case CHUNK_INIT_AUDIO:
+ debug_ipmovie("initialize audio\n");
+ break;
+
+ case CHUNK_AUDIO_ONLY:
+ debug_ipmovie("audio only\n");
+ break;
+
+ case CHUNK_INIT_VIDEO:
+ debug_ipmovie("initialize video\n");
+ break;
+
+ case CHUNK_VIDEO:
+ debug_ipmovie("video (and audio)\n");
+ break;
+
+ case CHUNK_SHUTDOWN:
+ debug_ipmovie("shutdown\n");
+ break;
+
+ case CHUNK_END:
+ debug_ipmovie("end\n");
+ break;
+
+ default:
+ debug_ipmovie("invalid chunk\n");
+ chunk_type = CHUNK_BAD;
+ break;
+
+ }
+
+ while ((chunk_size > 0) && (chunk_type != CHUNK_BAD)) {
+
+ /* read the next chunk, wherever the file happens to be pointing */
+ if (url_feof(pb)) {
+ chunk_type = CHUNK_EOF;
+ break;
+ }
+ if (get_buffer(pb, opcode_preamble, CHUNK_PREAMBLE_SIZE) !=
+ CHUNK_PREAMBLE_SIZE) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+
+ opcode_size = LE_16(&opcode_preamble[0]);
+ opcode_type = opcode_preamble[2];
+ opcode_version = opcode_preamble[3];
+
+ chunk_size -= OPCODE_PREAMBLE_SIZE;
+ chunk_size -= opcode_size;
+ if (chunk_size < 0) {
+ debug_ipmovie("chunk_size countdown just went negative\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+
+ debug_ipmovie(" opcode type %02X, version %d, 0x%04X bytes: ",
+ opcode_type, opcode_version, opcode_size);
+ switch (opcode_type) {
+
+ case OPCODE_END_OF_STREAM:
+ debug_ipmovie("end of stream\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_END_OF_CHUNK:
+ debug_ipmovie("end of chunk\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_CREATE_TIMER:
+ debug_ipmovie("create timer\n");
+ if ((opcode_version > 0) || (opcode_size > 6)) {
+ debug_ipmovie("bad create_timer opcode\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ if (get_buffer(pb, scratch, opcode_size) !=
+ opcode_size) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ s->fps = 1000000.0 / (LE_32(&scratch[0]) * LE_16(&scratch[4]));
+ s->frame_pts_inc = 90000 / s->fps;
+ debug_ipmovie(" %.2f frames/second (timer div = %d, subdiv = %d)\n",
+ s->fps, LE_32(&scratch[0]), LE_16(&scratch[4]));
+ break;
+
+ case OPCODE_INIT_AUDIO_BUFFERS:
+ debug_ipmovie("initialize audio buffers\n");
+ if ((opcode_version > 1) || (opcode_size > 10)) {
+ debug_ipmovie("bad init_audio_buffers opcode\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ if (get_buffer(pb, scratch, opcode_size) !=
+ opcode_size) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ s->audio_sample_rate = LE_16(&scratch[4]);
+ audio_flags = LE_16(&scratch[2]);
+ /* bit 0 of the flags: 0 = mono, 1 = stereo */
+ s->audio_channels = (audio_flags & 1) + 1;
+ /* bit 1 of the flags: 0 = 8 bit, 1 = 16 bit */
+ s->audio_bits = (((audio_flags >> 1) & 1) + 1) * 8;
+ /* bit 2 indicates compressed audio in version 1 opcode */
+ if ((opcode_version == 1) && (audio_flags & 0x4))
+ s->audio_type = CODEC_ID_INTERPLAY_DPCM;
+ else if (s->audio_bits == 16)
+ s->audio_type = CODEC_ID_PCM_S16LE;
+ else
+ s->audio_type = CODEC_ID_PCM_U8;
+ debug_ipmovie("audio: %d bits, %d Hz, %s, %s format\n",
+ s->audio_bits,
+ s->audio_sample_rate,
+ (s->audio_channels == 2) ? "stereo" : "mono",
+ (s->audio_type == CODEC_ID_INTERPLAY_DPCM) ?
+ "Interplay audio" : "PCM");
+ break;
+
+ case OPCODE_START_STOP_AUDIO:
+ debug_ipmovie("start/stop audio\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_INIT_VIDEO_BUFFERS:
+ debug_ipmovie("initialize video buffers\n");
+ if ((opcode_version > 2) || (opcode_size > 8)) {
+ debug_ipmovie("bad init_video_buffers opcode\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ if (get_buffer(pb, scratch, opcode_size) !=
+ opcode_size) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ s->video_width = LE_16(&scratch[0]) * 8;
+ s->video_height = LE_16(&scratch[2]) * 8;
+ debug_ipmovie("video resolution: %d x %d\n",
+ s->video_width, s->video_height);
+ break;
+
+ case OPCODE_UNKNOWN_06:
+ case OPCODE_UNKNOWN_0E:
+ case OPCODE_UNKNOWN_10:
+ case OPCODE_UNKNOWN_12:
+ case OPCODE_UNKNOWN_13:
+ case OPCODE_UNKNOWN_14:
+ case OPCODE_UNKNOWN_15:
+ debug_ipmovie("unknown (but documented) opcode %02X\n", opcode_type);
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_SEND_BUFFER:
+ debug_ipmovie("send buffer\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_AUDIO_FRAME:
+ debug_ipmovie("audio frame\n");
+
+ /* log position and move on for now */
+ s->audio_chunk_offset = url_ftell(pb);
+ s->audio_chunk_size = opcode_size;
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_SILENCE_FRAME:
+ debug_ipmovie("silence frame\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_INIT_VIDEO_MODE:
+ debug_ipmovie("initialize video mode\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_CREATE_GRADIENT:
+ debug_ipmovie("create gradient\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_SET_PALETTE:
+ debug_ipmovie("set palette\n");
+ /* check for the logical maximum palette size
+ * (3 * 256 + 4 bytes) */
+ if (opcode_size > 0x304) {
+ debug_ipmovie("demux_ipmovie: set_palette opcode too large\n");
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ if (get_buffer(pb, scratch, opcode_size) != opcode_size) {
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+
+ /* load the palette into internal data structure */
+ first_color = LE_16(&scratch[0]);
+ last_color = first_color + LE_16(&scratch[2]) - 1;
+ /* sanity check (since they are 16 bit values) */
+ if ((first_color > 0xFF) || (last_color > 0xFF)) {
+ debug_ipmovie("demux_ipmovie: set_palette indices out of range (%d -> %d)\n",
+ first_color, last_color);
+ chunk_type = CHUNK_BAD;
+ break;
+ }
+ j = 4; /* offset of first palette data */
+ for (i = first_color; i <= last_color; i++) {
+ /* the palette is stored as a 6-bit VGA palette, thus each
+ * component is shifted up to a 8-bit range */
+ r = scratch[j++] * 4;
+ g = scratch[j++] * 4;
+ b = scratch[j++] * 4;
+ s->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
+ }
+ /* indicate a palette change */
+ s->palette_control.palette_changed = 1;
+ break;
+
+ case OPCODE_SET_PALETTE_COMPRESSED:
+ debug_ipmovie("set palette compressed\n");
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_SET_DECODING_MAP:
+ debug_ipmovie("set decoding map\n");
+
+ /* log position and move on for now */
+ s->decode_map_chunk_offset = url_ftell(pb);
+ s->decode_map_chunk_size = opcode_size;
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ case OPCODE_VIDEO_DATA:
+ debug_ipmovie("set video data\n");
+
+ /* log position and move on for now */
+ s->video_chunk_offset = url_ftell(pb);
+ s->video_chunk_size = opcode_size;
+ url_fseek(pb, opcode_size, SEEK_CUR);
+ break;
+
+ default:
+ debug_ipmovie("*** unknown opcode type\n");
+ chunk_type = CHUNK_BAD;
+ break;
+
+ }
+ }
+
+ /* make a note of where the stream is sitting */
+ s->next_chunk_offset = url_ftell(pb);
+
+ /* dispatch the first of any pending packets */
+ if ((chunk_type == CHUNK_VIDEO) || (chunk_type == CHUNK_AUDIO_ONLY))
+ chunk_type = load_ipmovie_packet(s, pb, pkt);
+
+ return chunk_type;
+}
+
+static int ipmovie_probe(AVProbeData *p)
+{
+ if (p->buf_size < IPMOVIE_SIGNATURE_SIZE)
+ return 0;
+ if (strncmp(p->buf, IPMOVIE_SIGNATURE, IPMOVIE_SIGNATURE_SIZE) != 0)
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int ipmovie_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ IPMVEContext *ipmovie = (IPMVEContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVPacket pkt;
+ AVStream *st;
+ unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
+ int chunk_type;
+
+ /* initialize private context members */
+ ipmovie->video_pts = ipmovie->audio_frame_count = 0;
+ ipmovie->audio_chunk_offset = ipmovie->video_chunk_offset =
+ ipmovie->decode_map_chunk_offset = 0;
+
+ /* on the first read, this will position the stream at the first chunk */
+ ipmovie->next_chunk_offset = IPMOVIE_SIGNATURE_SIZE + 6;
+
+ /* process the first chunk which should be CHUNK_INIT_VIDEO */
+ if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_VIDEO)
+ return AVERROR_INVALIDDATA;
+
+ /* peek ahead to the next chunk-- if it is an init audio chunk, process
+ * it; if it is the first video chunk, this is a silent file */
+ if (get_buffer(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
+ CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ chunk_type = LE_16(&chunk_preamble[2]);
+ url_fseek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);
+
+ if (chunk_type == CHUNK_VIDEO)
+ ipmovie->audio_type = 0; /* no audio */
+ else if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_AUDIO)
+ return AVERROR_INVALIDDATA;
+
+ /* initialize the stream decoders */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ ipmovie->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_INTERPLAY_VIDEO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = ipmovie->video_width;
+ st->codec->height = ipmovie->video_height;
+
+ /* palette considerations */
+ st->codec->palctrl = &ipmovie->palette_control;
+
+ if (ipmovie->audio_type) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ ipmovie->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = ipmovie->audio_type;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = ipmovie->audio_channels;
+ st->codec->sample_rate = ipmovie->audio_sample_rate;
+ st->codec->bits_per_sample = ipmovie->audio_bits;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ if (st->codec->codec_id == CODEC_ID_INTERPLAY_DPCM)
+ st->codec->bit_rate /= 2;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+ }
+
+ return 0;
+}
+
+static int ipmovie_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ IPMVEContext *ipmovie = (IPMVEContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret;
+
+ ret = process_ipmovie_chunk(ipmovie, pb, pkt);
+ if (ret == CHUNK_BAD)
+ ret = AVERROR_INVALIDDATA;
+ else if (ret == CHUNK_EOF)
+ ret = AVERROR_IO;
+ else if (ret == CHUNK_NOMEM)
+ ret = AVERROR_NOMEM;
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int ipmovie_read_close(AVFormatContext *s)
+{
+// IPMVEContext *ipmovie = (IPMVEContext *)s->priv_data;
+
+ return 0;
+}
+
+AVInputFormat ipmovie_demuxer = {
+ "ipmovie",
+ "Interplay MVE format",
+ sizeof(IPMVEContext),
+ ipmovie_probe,
+ ipmovie_read_header,
+ ipmovie_read_packet,
+ ipmovie_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/isom.c b/contrib/ffmpeg/libavformat/isom.c
new file mode 100644
index 000000000..d4e923853
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/isom.c
@@ -0,0 +1,131 @@
+/*
+ * ISO Media common code
+ * Copyright (c) 2001 Fabrice Bellard.
+ * Copyright (c) 2002 Francois Revol <revol@free.fr>
+ * Copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "riff.h"
+#include "isom.h"
+
+/* http://gpac.sourceforge.net/tutorial/mediatypes.htm */
+const CodecTag ff_mov_obj_type[] = {
+ { CODEC_ID_MPEG4 , 32 },
+ { CODEC_ID_H264 , 33 },
+ { CODEC_ID_AAC , 64 },
+ { CODEC_ID_MPEG2VIDEO, 96 }, /* MPEG2 Simple */
+ { CODEC_ID_MPEG2VIDEO, 97 }, /* MPEG2 Main */
+ { CODEC_ID_MPEG2VIDEO, 98 }, /* MPEG2 SNR */
+ { CODEC_ID_MPEG2VIDEO, 99 }, /* MPEG2 Spatial */
+ { CODEC_ID_MPEG2VIDEO, 100 }, /* MPEG2 High */
+ { CODEC_ID_MPEG2VIDEO, 101 }, /* MPEG2 422 */
+ { CODEC_ID_AAC , 102 }, /* MPEG2 AAC Main */
+ { CODEC_ID_AAC , 103 }, /* MPEG2 AAC Low */
+ { CODEC_ID_AAC , 104 }, /* MPEG2 AAC SSR */
+ { CODEC_ID_MP3 , 105 },
+ { CODEC_ID_MPEG1VIDEO, 106 },
+ { CODEC_ID_MP2 , 107 },
+ { CODEC_ID_MJPEG , 108 },
+ { CODEC_ID_PCM_S16LE , 224 },
+ { CODEC_ID_VORBIS , 221 },
+ { CODEC_ID_QCELP , 225 },
+ { CODEC_ID_AC3 , 226 },
+ { CODEC_ID_PCM_ALAW , 227 },
+ { CODEC_ID_PCM_MULAW , 228 },
+ { CODEC_ID_PCM_S16BE , 230 },
+ { CODEC_ID_H263 , 242 },
+ { CODEC_ID_H261 , 243 },
+ { 0, 0 },
+};
+
+/* map numeric codes from mdhd atom to ISO 639 */
+/* cf. QTFileFormat.pdf p253, qtff.pdf p205 */
+/* http://developer.apple.com/documentation/mac/Text/Text-368.html */
+/* deprecated by putting the code as 3*5bit ascii */
+static const char *mov_mdhd_language_map[] = {
+ /* 0-9 */
+ "eng", "fra", "ger", "ita", "dut", "sve", "spa", "dan", "por", "nor",
+ "heb", "jpn", "ara", "fin", "gre", "ice", "mlt", "tur", "hr "/*scr*/, "chi"/*ace?*/,
+ "urd", "hin", "tha", "kor", "lit", "pol", "hun", "est", "lav", NULL,
+ "fo ", NULL, "rus", "chi", NULL, "iri", "alb", "ron", "ces", "slk",
+ "slv", "yid", "sr ", "mac", "bul", "ukr", "bel", "uzb", "kaz", "aze",
+ /*?*/
+ "aze", "arm", "geo", "mol", "kir", "tgk", "tuk", "mon", NULL, "pus",
+ "kur", "kas", "snd", "tib", "nep", "san", "mar", "ben", "asm", "guj",
+ "pa ", "ori", "mal", "kan", "tam", "tel", NULL, "bur", "khm", "lao",
+ /* roman? arabic? */
+ "vie", "ind", "tgl", "may", "may", "amh", "tir", "orm", "som", "swa",
+ /*==rundi?*/
+ NULL, "run", NULL, "mlg", "epo", NULL, NULL, NULL, NULL, NULL,
+ /* 100 */
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "wel", "baq",
+ "cat", "lat", "que", "grn", "aym", "tat", "uig", "dzo", "jav"
+};
+
+int ff_mov_iso639_to_lang(const char *lang, int mp4)
+{
+ int i, code = 0;
+
+ /* old way, only for QT? */
+ for (i = 0; !mp4 && (i < (sizeof(mov_mdhd_language_map)/sizeof(char *))); i++) {
+ if (mov_mdhd_language_map[i] && !strcmp(lang, mov_mdhd_language_map[i]))
+ return i;
+ }
+ /* XXX:can we do that in mov too? */
+ if (!mp4)
+ return 0;
+ /* handle undefined as such */
+ if (lang[0] == '\0')
+ lang = "und";
+ /* 5bit ascii */
+ for (i = 0; i < 3; i++) {
+ unsigned char c = (unsigned char)lang[i];
+ if (c < 0x60)
+ return 0;
+ if (c > 0x60 + 0x1f)
+ return 0;
+ code <<= 5;
+ code |= (c - 0x60);
+ }
+ return code;
+}
+
+int ff_mov_lang_to_iso639(int code, char *to)
+{
+ int i;
+ /* is it the mangled iso code? */
+ /* see http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt */
+ if (code > 138) {
+ for (i = 2; i >= 0; i--) {
+ to[i] = 0x60 + (code & 0x1f);
+ code >>= 5;
+ }
+ return 1;
+ }
+ /* old fashion apple lang code */
+ if (code >= (sizeof(mov_mdhd_language_map)/sizeof(char *)))
+ return 0;
+ if (!mov_mdhd_language_map[code])
+ return 0;
+ strncpy(to, mov_mdhd_language_map[code], 4);
+ return 1;
+}
diff --git a/contrib/ffmpeg/libavformat/isom.h b/contrib/ffmpeg/libavformat/isom.h
new file mode 100644
index 000000000..85cbbdc6c
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/isom.h
@@ -0,0 +1,38 @@
+/*
+ * ISO Media common code
+ * copyright (c) 2001 Fabrice Bellard.
+ * copyright (c) 2002 Francois Revol <revol@free.fr>
+ * copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FFMPEG_ISOM_H
+#define FFMPEG_ISOM_H
+
+/* isom.c */
+extern const CodecTag ff_mov_obj_type[];
+
+int ff_mov_iso639_to_lang(const char *lang, int mp4);
+int ff_mov_lang_to_iso639(int code, char *to);
+
+typedef struct Time2Sample{
+ int count;
+ int duration;
+}Time2Sample;
+
+#endif /* FFMPEG_ISOM_H */
diff --git a/contrib/ffmpeg/libavformat/jpeg.c b/contrib/ffmpeg/libavformat/jpeg.c
new file mode 100644
index 000000000..b5fc043c9
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/jpeg.c
@@ -0,0 +1,240 @@
+/*
+ * JPEG image format
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static int jpeg_probe(AVProbeData *pd)
+{
+ if (pd->buf_size >= 64 &&
+ pd->buf[0] == 0xff && pd->buf[1] == 0xd8 && pd->buf[2] == 0xff)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+typedef struct JpegOpaque {
+ int (*alloc_cb)(void *opaque, AVImageInfo *info);
+ void *opaque;
+ int ret_code;
+} JpegOpaque;
+
+/* called by the codec to allocate the image */
+static int jpeg_get_buffer(AVCodecContext *c, AVFrame *picture)
+{
+ JpegOpaque *jctx = c->opaque;
+ AVImageInfo info1, *info = &info1;
+ int ret, i;
+
+ info->width = c->width;
+ info->height = c->height;
+ switch(c->pix_fmt) {
+ case PIX_FMT_YUV420P:
+ info->pix_fmt = PIX_FMT_YUVJ420P;
+ break;
+ case PIX_FMT_YUV422P:
+ info->pix_fmt = PIX_FMT_YUVJ422P;
+ break;
+ case PIX_FMT_YUV444P:
+ info->pix_fmt = PIX_FMT_YUVJ444P;
+ break;
+ default:
+ return -1;
+ }
+ ret = jctx->alloc_cb(jctx->opaque, info);
+ if (ret) {
+ jctx->ret_code = ret;
+ return -1;
+ } else {
+ for(i=0;i<3;i++) {
+ picture->data[i] = info->pict.data[i];
+ picture->linesize[i] = info->pict.linesize[i];
+ }
+ return 0;
+ }
+}
+
+static void jpeg_img_copy(uint8_t *dst, int dst_wrap,
+ uint8_t *src, int src_wrap,
+ int width, int height)
+{
+ for(;height > 0; height--) {
+ memcpy(dst, src, width);
+ dst += dst_wrap;
+ src += src_wrap;
+ }
+}
+
+/* XXX: libavcodec is broken for truncated jpegs! */
+#define IO_BUF_SIZE (1024*1024)
+
+static int jpeg_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ AVCodecContext *c;
+ AVFrame *picture, picture1;
+ int len, size, got_picture, i;
+ uint8_t *inbuf_ptr, inbuf[IO_BUF_SIZE];
+ JpegOpaque jctx;
+
+ jctx.alloc_cb = alloc_cb;
+ jctx.opaque = opaque;
+ jctx.ret_code = -1; /* default return code is error */
+
+ c = avcodec_alloc_context();
+ if (!c)
+ return -1;
+ picture= avcodec_alloc_frame();
+ if (!picture) {
+ av_free(c);
+ return -1;
+ }
+ c->opaque = &jctx;
+ c->get_buffer = jpeg_get_buffer;
+ c->flags |= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */
+ if (avcodec_open(c, &mjpeg_decoder) < 0)
+ goto fail1;
+ for(;;) {
+ size = get_buffer(f, inbuf, sizeof(inbuf));
+ if (size == 0)
+ break;
+ inbuf_ptr = inbuf;
+ while (size > 0) {
+ len = avcodec_decode_video(c, &picture1, &got_picture,
+ inbuf_ptr, size);
+ if (len < 0)
+ goto fail;
+ if (got_picture)
+ goto the_end;
+ size -= len;
+ inbuf_ptr += len;
+ }
+ }
+ the_end:
+ /* XXX: currently, the mjpeg decoder does not use AVFrame, so we
+ must do it by hand */
+ if (jpeg_get_buffer(c, picture) < 0)
+ goto fail;
+ for(i=0;i<3;i++) {
+ int w, h;
+ w = c->width;
+ h = c->height;
+ if (i >= 1) {
+ switch(c->pix_fmt) {
+ default:
+ case PIX_FMT_YUV420P:
+ w = (w + 1) >> 1;
+ h = (h + 1) >> 1;
+ break;
+ case PIX_FMT_YUV422P:
+ w = (w + 1) >> 1;
+ break;
+ case PIX_FMT_YUV444P:
+ break;
+ }
+ }
+ jpeg_img_copy(picture->data[i], picture->linesize[i],
+ picture1.data[i], picture1.linesize[i],
+ w, h);
+ }
+ jctx.ret_code = 0;
+ fail:
+ avcodec_close(c);
+ fail1:
+ av_free(picture);
+ av_free(c);
+ return jctx.ret_code;
+}
+
+#if defined(CONFIG_MUXERS) && defined(CONFIG_MJPEG_ENCODER)
+static int jpeg_write(ByteIOContext *pb, AVImageInfo *info)
+{
+ AVCodecContext *c;
+ uint8_t *outbuf = NULL;
+ int outbuf_size, ret, size, i;
+ AVFrame *picture;
+
+ ret = -1;
+ c = avcodec_alloc_context();
+ if (!c)
+ return -1;
+ picture = avcodec_alloc_frame();
+ if (!picture)
+ goto fail2;
+ c->width = info->width;
+ c->height = info->height;
+ /* XXX: currently move that to the codec ? */
+ switch(info->pix_fmt) {
+ case PIX_FMT_YUVJ420P:
+ c->pix_fmt = PIX_FMT_YUV420P;
+ break;
+ case PIX_FMT_YUVJ422P:
+ c->pix_fmt = PIX_FMT_YUV422P;
+ break;
+ case PIX_FMT_YUVJ444P:
+ c->pix_fmt = PIX_FMT_YUV444P;
+ break;
+ default:
+ goto fail1;
+ }
+ for(i=0;i<3;i++) {
+ picture->data[i] = info->pict.data[i];
+ picture->linesize[i] = info->pict.linesize[i];
+ }
+ /* set the quality */
+ picture->quality = 3; /* XXX: a parameter should be used */
+ c->flags |= CODEC_FLAG_QSCALE;
+
+ if (avcodec_open(c, &mjpeg_encoder) < 0)
+ goto fail1;
+
+ /* XXX: needs to sort out that size problem */
+ outbuf_size = 1000000;
+ outbuf = av_malloc(outbuf_size);
+
+ size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
+ if (size < 0)
+ goto fail;
+ put_buffer(pb, outbuf, size);
+ put_flush_packet(pb);
+ ret = 0;
+
+ fail:
+ avcodec_close(c);
+ av_free(outbuf);
+ fail1:
+ av_free(picture);
+ fail2:
+ av_free(c);
+ return ret;
+}
+#endif //CONFIG_MUXERS
+
+AVImageFormat jpeg_image_format = {
+ "jpeg",
+ "jpg,jpeg",
+ jpeg_probe,
+ jpeg_read,
+ (1 << PIX_FMT_YUVJ420P) | (1 << PIX_FMT_YUVJ422P) | (1 << PIX_FMT_YUVJ444P),
+#if defined(CONFIG_MUXERS) && defined(CONFIG_MJPEG_ENCODER)
+ jpeg_write,
+#else
+ NULL,
+#endif //CONFIG_MUXERS
+};
diff --git a/contrib/ffmpeg/libavformat/libnut.c b/contrib/ffmpeg/libavformat/libnut.c
new file mode 100644
index 000000000..d4e7201ab
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/libnut.c
@@ -0,0 +1,283 @@
+#include "avformat.h"
+#include "riff.h"
+#include <libnut.h>
+
+#define ID_STRING "nut/multimedia container"
+#define ID_LENGTH (strlen(ID_STRING) + 1)
+
+typedef struct {
+ nut_context_t * nut;
+ nut_stream_header_t * s;
+} NUTContext;
+
+static const CodecTag nut_tags[] = {
+ { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
+ { CODEC_ID_MP3, MKTAG('m', 'p', '3', ' ') },
+ { CODEC_ID_VORBIS, MKTAG('v', 'r', 'b', 's') },
+ { 0, 0 },
+};
+
+#ifdef CONFIG_MUXERS
+static int av_write(void * h, size_t len, const uint8_t * buf) {
+ ByteIOContext * bc = h;
+ put_buffer(bc, buf, len);
+ //put_flush_packet(bc);
+ return len;
+}
+
+static int nut_write_header(AVFormatContext * avf) {
+ NUTContext * priv = avf->priv_data;
+ ByteIOContext * bc = &avf->pb;
+ nut_muxer_opts_t mopts = {
+ .output = {
+ .priv = bc,
+ .write = av_write,
+ },
+ .alloc = { av_malloc, av_realloc, av_free },
+ .write_index = 1,
+ .realtime_stream = 0,
+ .max_distance = 32768,
+ .fti = NULL,
+ };
+ nut_stream_header_t * s;
+ int i;
+
+ priv->s = s = av_mallocz((avf->nb_streams + 1) * sizeof*s);
+
+ for (i = 0; i < avf->nb_streams; i++) {
+ AVCodecContext * codec = avf->streams[i]->codec;
+ int j;
+ int fourcc = 0;
+ int nom, denom, ssize;
+
+ s[i].type = codec->codec_type == CODEC_TYPE_VIDEO ? NUT_VIDEO_CLASS : NUT_AUDIO_CLASS;
+
+ if (codec->codec_tag) fourcc = codec->codec_tag;
+ else fourcc = codec_get_tag(nut_tags, codec->codec_id);
+
+ if (!fourcc) {
+ if (codec->codec_type == CODEC_TYPE_VIDEO) fourcc = codec_get_bmp_tag(codec->codec_id);
+ if (codec->codec_type == CODEC_TYPE_AUDIO) fourcc = codec_get_wav_tag(codec->codec_id);
+ }
+
+ s[i].fourcc_len = 4;
+ s[i].fourcc = av_malloc(s[i].fourcc_len);
+ for (j = 0; j < s[i].fourcc_len; j++) s[i].fourcc[j] = (fourcc >> (j*8)) & 0xFF;
+
+ ff_parse_specific_params(codec, &nom, &ssize, &denom);
+ av_set_pts_info(avf->streams[i], 60, denom, nom);
+
+ s[i].time_base.nom = denom;
+ s[i].time_base.den = nom;
+
+ s[i].fixed_fps = 0;
+ s[i].decode_delay = codec->has_b_frames;
+ s[i].codec_specific_len = codec->extradata_size;
+ s[i].codec_specific = codec->extradata;
+
+ if (codec->codec_type == CODEC_TYPE_VIDEO) {
+ s[i].width = codec->width;
+ s[i].height = codec->height;
+ s[i].sample_width = 0;
+ s[i].sample_height = 0;
+ s[i].colorspace_type = 0;
+ } else {
+ s[i].samplerate_nom = codec->sample_rate;
+ s[i].samplerate_denom = 1;
+ s[i].channel_count = codec->channels;
+ }
+ }
+
+ s[avf->nb_streams].type = -1;
+ priv->nut = nut_muxer_init(&mopts, s, NULL);
+
+ return 0;
+}
+
+static int nut_write_packet(AVFormatContext * avf, AVPacket * pkt) {
+ NUTContext * priv = avf->priv_data;
+ nut_packet_t p;
+
+ p.len = pkt->size;
+ p.stream = pkt->stream_index;
+ p.pts = pkt->pts;
+ p.flags = pkt->flags & PKT_FLAG_KEY ? NUT_FLAG_KEY : 0;
+ p.next_pts = 0;
+
+ nut_write_frame_reorder(priv->nut, &p, pkt->data);
+
+ return 0;
+}
+
+static int nut_write_trailer(AVFormatContext * avf) {
+ ByteIOContext * bc = &avf->pb;
+ NUTContext * priv = avf->priv_data;
+ int i;
+
+ nut_muxer_uninit_reorder(priv->nut);
+ put_flush_packet(bc);
+
+ for(i = 0; priv->s[i].type != -1; i++ ) av_freep(&priv->s[i].fourcc);
+ av_freep(&priv->s);
+
+ return 0;
+}
+
+AVOutputFormat nut_muxer = {
+ "nut",
+ "nut format",
+ "video/x-nut",
+ "nut",
+ sizeof(NUTContext),
+ CODEC_ID_VORBIS,
+ CODEC_ID_MPEG4,
+ nut_write_header,
+ nut_write_packet,
+ nut_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif //CONFIG_MUXERS
+
+static int nut_probe(AVProbeData *p) {
+ if (p->buf_size >= ID_LENGTH && !memcmp(p->buf, ID_STRING, ID_LENGTH)) return AVPROBE_SCORE_MAX;
+
+ return 0;
+}
+
+static size_t av_read(void * h, size_t len, uint8_t * buf) {
+ ByteIOContext * bc = h;
+ return get_buffer(bc, buf, len);
+}
+
+static off_t av_seek(void * h, long long pos, int whence) {
+ ByteIOContext * bc = h;
+ if (whence == SEEK_END) {
+ pos = url_fsize(bc) + pos;
+ whence = SEEK_SET;
+ }
+ return url_fseek(bc, pos, whence);
+}
+
+static int nut_read_header(AVFormatContext * avf, AVFormatParameters * ap) {
+ NUTContext * priv = avf->priv_data;
+ ByteIOContext * bc = &avf->pb;
+ nut_demuxer_opts_t dopts = {
+ .input = {
+ .priv = bc,
+ .seek = av_seek,
+ .read = av_read,
+ .eof = NULL,
+ .file_pos = 0,
+ },
+ .alloc = { av_malloc, av_realloc, av_free },
+ .read_index = 1,
+ .cache_syncpoints = 1,
+ };
+ nut_context_t * nut = priv->nut = nut_demuxer_init(&dopts);
+ nut_stream_header_t * s;
+ int ret, i;
+
+ if ((ret = nut_read_headers(nut, &s, NULL))) {
+ av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(ret));
+ nut_demuxer_uninit(nut);
+ return -1;
+ }
+
+ priv->s = s;
+
+ for (i = 0; s[i].type != -1 && i < 2; i++) {
+ AVStream * st = av_new_stream(avf, i);
+ int j;
+
+ for (j = 0; j < s[i].fourcc_len && j < 8; j++) st->codec->codec_tag |= s[i].fourcc[j]<<(j*8);
+
+ st->codec->has_b_frames = s[i].decode_delay;
+
+ st->codec->extradata_size = s[i].codec_specific_len;
+ if (st->codec->extradata_size) {
+ st->codec->extradata = av_mallocz(st->codec->extradata_size);
+ memcpy(st->codec->extradata, s[i].codec_specific, st->codec->extradata_size);
+ }
+
+ av_set_pts_info(avf->streams[i], 60, s[i].time_base.nom, s[i].time_base.den);
+ st->start_time = 0;
+ st->duration = s[i].max_pts;
+
+ st->codec->codec_id = codec_get_id(nut_tags, st->codec->codec_tag);
+
+ switch(s[i].type) {
+ case NUT_AUDIO_CLASS:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_wav_id(st->codec->codec_tag);
+
+ st->codec->channels = s[i].channel_count;
+ st->codec->sample_rate = s[i].samplerate_nom / s[i].samplerate_denom;
+ break;
+ case NUT_VIDEO_CLASS:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = codec_get_bmp_id(st->codec->codec_tag);
+
+ st->codec->width = s[i].width;
+ st->codec->height = s[i].height;
+ st->codec->sample_aspect_ratio.num = s[i].sample_width;
+ st->codec->sample_aspect_ratio.den = s[i].sample_height;
+ break;
+ }
+ if (st->codec->codec_id == CODEC_ID_NONE) av_log(avf, AV_LOG_ERROR, "Unknown codec?!\n");
+ }
+
+ return 0;
+}
+
+static int nut_read_packet(AVFormatContext * avf, AVPacket * pkt) {
+ NUTContext * priv = avf->priv_data;
+ nut_packet_t pd;
+ int ret;
+
+ ret = nut_read_next_packet(priv->nut, &pd);
+
+ if (ret || av_new_packet(pkt, pd.len) < 0) {
+ if (ret != NUT_ERR_EOF)
+ av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(ret));
+ return -1;
+ }
+
+ if (pd.flags & NUT_FLAG_KEY) pkt->flags |= PKT_FLAG_KEY;
+ pkt->pts = pd.pts;
+ pkt->stream_index = pd.stream;
+ pkt->pos = url_ftell(&avf->pb);
+
+ ret = nut_read_frame(priv->nut, &pd.len, pkt->data);
+
+ return ret;
+}
+
+static int nut_read_seek(AVFormatContext * avf, int stream_index, int64_t target_ts, int flags) {
+ NUTContext * priv = avf->priv_data;
+ int active_streams[] = { stream_index, -1 };
+ double time_pos = target_ts * priv->s[stream_index].time_base.nom / (double)priv->s[stream_index].time_base.den;
+
+ if (nut_seek(priv->nut, time_pos, 2*!(flags & AVSEEK_FLAG_BACKWARD), active_streams)) return -1;
+
+ return 0;
+}
+
+static int nut_read_close(AVFormatContext *s) {
+ NUTContext * priv = s->priv_data;
+
+ nut_demuxer_uninit(priv->nut);
+
+ return 0;
+}
+
+AVInputFormat nut_demuxer = {
+ "nut",
+ "nut format",
+ sizeof(NUTContext),
+ nut_probe,
+ nut_read_header,
+ nut_read_packet,
+ nut_read_close,
+ nut_read_seek,
+ .extensions = "nut",
+};
diff --git a/contrib/ffmpeg/libavformat/matroska.c b/contrib/ffmpeg/libavformat/matroska.c
new file mode 100644
index 000000000..0cd119e71
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/matroska.c
@@ -0,0 +1,2767 @@
+/*
+ * Matroska file demuxer (no muxer yet)
+ * Copyright (c) 2003-2004 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file matroska.c
+ * Matroska file demuxer
+ * by Ronald Bultje <rbultje@ronald.bitfreak.net>
+ * with a little help from Moritz Bunkus <moritz@bunkus.org>
+ * Specs available on the matroska project page:
+ * http://www.matroska.org/.
+ */
+
+#include "avformat.h"
+/* For codec_get_bmp_id and codec_get_wav_id. */
+#include "riff.h"
+#include "intfloat_readwrite.h"
+
+/* EBML version supported */
+#define EBML_VERSION 1
+
+/* top-level master-IDs */
+#define EBML_ID_HEADER 0x1A45DFA3
+
+/* IDs in the HEADER master */
+#define EBML_ID_EBMLVERSION 0x4286
+#define EBML_ID_EBMLREADVERSION 0x42F7
+#define EBML_ID_EBMLMAXIDLENGTH 0x42F2
+#define EBML_ID_EBMLMAXSIZELENGTH 0x42F3
+#define EBML_ID_DOCTYPE 0x4282
+#define EBML_ID_DOCTYPEVERSION 0x4287
+#define EBML_ID_DOCTYPEREADVERSION 0x4285
+
+/* general EBML types */
+#define EBML_ID_VOID 0xEC
+
+/*
+ * Matroska element IDs. max. 32-bit.
+ */
+
+/* toplevel segment */
+#define MATROSKA_ID_SEGMENT 0x18538067
+
+/* matroska top-level master IDs */
+#define MATROSKA_ID_INFO 0x1549A966
+#define MATROSKA_ID_TRACKS 0x1654AE6B
+#define MATROSKA_ID_CUES 0x1C53BB6B
+#define MATROSKA_ID_TAGS 0x1254C367
+#define MATROSKA_ID_SEEKHEAD 0x114D9B74
+#define MATROSKA_ID_CLUSTER 0x1F43B675
+
+/* IDs in the info master */
+#define MATROSKA_ID_TIMECODESCALE 0x2AD7B1
+#define MATROSKA_ID_DURATION 0x4489
+#define MATROSKA_ID_TITLE 0x7BA9
+#define MATROSKA_ID_WRITINGAPP 0x5741
+#define MATROSKA_ID_MUXINGAPP 0x4D80
+#define MATROSKA_ID_DATEUTC 0x4461
+
+/* ID in the tracks master */
+#define MATROSKA_ID_TRACKENTRY 0xAE
+
+/* IDs in the trackentry master */
+#define MATROSKA_ID_TRACKNUMBER 0xD7
+#define MATROSKA_ID_TRACKUID 0x73C5
+#define MATROSKA_ID_TRACKTYPE 0x83
+#define MATROSKA_ID_TRACKAUDIO 0xE1
+#define MATROSKA_ID_TRACKVIDEO 0xE0
+#define MATROSKA_ID_CODECID 0x86
+#define MATROSKA_ID_CODECPRIVATE 0x63A2
+#define MATROSKA_ID_CODECNAME 0x258688
+#define MATROSKA_ID_CODECINFOURL 0x3B4040
+#define MATROSKA_ID_CODECDOWNLOADURL 0x26B240
+#define MATROSKA_ID_TRACKNAME 0x536E
+#define MATROSKA_ID_TRACKLANGUAGE 0x22B59C
+#define MATROSKA_ID_TRACKFLAGENABLED 0xB9
+#define MATROSKA_ID_TRACKFLAGDEFAULT 0x88
+#define MATROSKA_ID_TRACKFLAGLACING 0x9C
+#define MATROSKA_ID_TRACKMINCACHE 0x6DE7
+#define MATROSKA_ID_TRACKMAXCACHE 0x6DF8
+#define MATROSKA_ID_TRACKDEFAULTDURATION 0x23E383
+
+/* IDs in the trackvideo master */
+#define MATROSKA_ID_VIDEOFRAMERATE 0x2383E3
+#define MATROSKA_ID_VIDEODISPLAYWIDTH 0x54B0
+#define MATROSKA_ID_VIDEODISPLAYHEIGHT 0x54BA
+#define MATROSKA_ID_VIDEOPIXELWIDTH 0xB0
+#define MATROSKA_ID_VIDEOPIXELHEIGHT 0xBA
+#define MATROSKA_ID_VIDEOFLAGINTERLACED 0x9A
+#define MATROSKA_ID_VIDEOSTEREOMODE 0x53B9
+#define MATROSKA_ID_VIDEOASPECTRATIO 0x54B3
+#define MATROSKA_ID_VIDEOCOLOURSPACE 0x2EB524
+
+/* IDs in the trackaudio master */
+#define MATROSKA_ID_AUDIOSAMPLINGFREQ 0xB5
+#define MATROSKA_ID_AUDIOOUTSAMPLINGFREQ 0x78B5
+
+#define MATROSKA_ID_AUDIOBITDEPTH 0x6264
+#define MATROSKA_ID_AUDIOCHANNELS 0x9F
+
+/* ID in the cues master */
+#define MATROSKA_ID_POINTENTRY 0xBB
+
+/* IDs in the pointentry master */
+#define MATROSKA_ID_CUETIME 0xB3
+#define MATROSKA_ID_CUETRACKPOSITION 0xB7
+
+/* IDs in the cuetrackposition master */
+#define MATROSKA_ID_CUETRACK 0xF7
+#define MATROSKA_ID_CUECLUSTERPOSITION 0xF1
+
+/* IDs in the tags master */
+/* TODO */
+
+/* IDs in the seekhead master */
+#define MATROSKA_ID_SEEKENTRY 0x4DBB
+
+/* IDs in the seekpoint master */
+#define MATROSKA_ID_SEEKID 0x53AB
+#define MATROSKA_ID_SEEKPOSITION 0x53AC
+
+/* IDs in the cluster master */
+#define MATROSKA_ID_CLUSTERTIMECODE 0xE7
+#define MATROSKA_ID_BLOCKGROUP 0xA0
+
+/* IDs in the blockgroup master */
+#define MATROSKA_ID_BLOCK 0xA1
+#define MATROSKA_ID_BLOCKDURATION 0x9B
+#define MATROSKA_ID_BLOCKREFERENCE 0xFB
+
+typedef enum {
+ MATROSKA_TRACK_TYPE_VIDEO = 0x1,
+ MATROSKA_TRACK_TYPE_AUDIO = 0x2,
+ MATROSKA_TRACK_TYPE_COMPLEX = 0x3,
+ MATROSKA_TRACK_TYPE_LOGO = 0x10,
+ MATROSKA_TRACK_TYPE_SUBTITLE = 0x11,
+ MATROSKA_TRACK_TYPE_CONTROL = 0x20,
+} MatroskaTrackType;
+
+typedef enum {
+ MATROSKA_EYE_MODE_MONO = 0x0,
+ MATROSKA_EYE_MODE_RIGHT = 0x1,
+ MATROSKA_EYE_MODE_LEFT = 0x2,
+ MATROSKA_EYE_MODE_BOTH = 0x3,
+} MatroskaEyeMode;
+
+typedef enum {
+ MATROSKA_ASPECT_RATIO_MODE_FREE = 0x0,
+ MATROSKA_ASPECT_RATIO_MODE_KEEP = 0x1,
+ MATROSKA_ASPECT_RATIO_MODE_FIXED = 0x2,
+} MatroskaAspectRatioMode;
+
+/*
+ * These aren't in any way "matroska-form" things,
+ * it's just something I use in the muxer/demuxer.
+ */
+
+typedef enum {
+ MATROSKA_TRACK_ENABLED = (1<<0),
+ MATROSKA_TRACK_DEFAULT = (1<<1),
+ MATROSKA_TRACK_LACING = (1<<2),
+ MATROSKA_TRACK_REAL_V = (1<<4),
+ MATROSKA_TRACK_SHIFT = (1<<16)
+} MatroskaTrackFlags;
+
+typedef enum {
+ MATROSKA_VIDEOTRACK_INTERLACED = (MATROSKA_TRACK_SHIFT<<0)
+} MatroskaVideoTrackFlags;
+
+/*
+ * Matroska Codec IDs. Strings.
+ */
+
+typedef struct CodecTags{
+ const char *str;
+ enum CodecID id;
+}CodecTags;
+
+#define MATROSKA_CODEC_ID_VIDEO_VFW_FOURCC "V_MS/VFW/FOURCC"
+#define MATROSKA_CODEC_ID_AUDIO_ACM "A_MS/ACM"
+
+static CodecTags codec_tags[]={
+// {"V_MS/VFW/FOURCC" , CODEC_ID_NONE},
+ {"V_UNCOMPRESSED" , CODEC_ID_RAWVIDEO},
+ {"V_MPEG4/ISO/SP" , CODEC_ID_MPEG4},
+ {"V_MPEG4/ISO/ASP" , CODEC_ID_MPEG4},
+ {"V_MPEG4/ISO/AP" , CODEC_ID_MPEG4},
+ {"V_MPEG4/ISO/AVC" , CODEC_ID_H264},
+ {"V_MPEG4/MS/V3" , CODEC_ID_MSMPEG4V3},
+ {"V_MPEG1" , CODEC_ID_MPEG1VIDEO},
+ {"V_MPEG2" , CODEC_ID_MPEG2VIDEO},
+ {"V_MJPEG" , CODEC_ID_MJPEG},
+ {"V_REAL/RV10" , CODEC_ID_RV10},
+ {"V_REAL/RV20" , CODEC_ID_RV20},
+ {"V_REAL/RV30" , CODEC_ID_RV30},
+ {"V_REAL/RV40" , CODEC_ID_RV40},
+/* TODO: Real/Quicktime */
+
+// {"A_MS/ACM" , CODEC_ID_NONE},
+ {"A_MPEG/L1" , CODEC_ID_MP3},
+ {"A_MPEG/L2" , CODEC_ID_MP3},
+ {"A_MPEG/L3" , CODEC_ID_MP3},
+ {"A_PCM/INT/BIG" , CODEC_ID_PCM_U16BE},
+ {"A_PCM/INT/LIT" , CODEC_ID_PCM_U16LE},
+// {"A_PCM/FLOAT/IEEE" , CODEC_ID_NONE},
+ {"A_AC3" , CODEC_ID_AC3},
+ {"A_DTS" , CODEC_ID_DTS},
+ {"A_VORBIS" , CODEC_ID_VORBIS},
+ {"A_AAC" , CODEC_ID_AAC},
+ {"A_FLAC" , CODEC_ID_FLAC},
+ {"A_WAVPACK4" , CODEC_ID_WAVPACK},
+ {"A_TTA1" , CODEC_ID_TTA},
+ {NULL , CODEC_ID_NONE}
+/* TODO: AC3-9/10 (?), Real, Musepack, Quicktime */
+};
+
+/* max. depth in the EBML tree structure */
+#define EBML_MAX_DEPTH 16
+
+typedef struct Track {
+ MatroskaTrackType type;
+
+ /* Unique track number and track ID. stream_index is the index that
+ * the calling app uses for this track. */
+ uint32_t num,
+ uid,
+ stream_index;
+
+ char *name,
+ *language;
+
+ char *codec_id,
+ *codec_name;
+
+ unsigned char *codec_priv;
+ int codec_priv_size;
+
+ int64_t default_duration;
+ MatroskaTrackFlags flags;
+} MatroskaTrack;
+
+typedef struct MatroskaVideoTrack {
+ MatroskaTrack track;
+
+ int pixel_width,
+ pixel_height,
+ display_width,
+ display_height;
+
+ uint32_t fourcc;
+
+ MatroskaAspectRatioMode ar_mode;
+ MatroskaEyeMode eye_mode;
+
+ //..
+} MatroskaVideoTrack;
+
+typedef struct MatroskaAudioTrack {
+ MatroskaTrack track;
+
+ int channels,
+ bitdepth,
+ internal_samplerate,
+ samplerate;
+ //..
+} MatroskaAudioTrack;
+
+typedef struct MatroskaSubtitleTrack {
+ MatroskaTrack track;
+
+ //..
+} MatroskaSubtitleTrack;
+
+typedef struct MatroskaLevel {
+ uint64_t start, length;
+} MatroskaLevel;
+
+typedef struct MatroskaDemuxIndex {
+ uint64_t pos; /* of the corresponding *cluster*! */
+ uint16_t track; /* reference to 'num' */
+ uint64_t time; /* in nanoseconds */
+} MatroskaDemuxIndex;
+
+typedef struct MatroskaDemuxContext {
+ AVFormatContext *ctx;
+
+ /* ebml stuff */
+ int num_levels;
+ MatroskaLevel levels[EBML_MAX_DEPTH];
+ int level_up;
+
+ /* matroska stuff */
+ char *writing_app,
+ *muxing_app;
+ int64_t created;
+
+ /* timescale in the file */
+ int64_t time_scale;
+
+ /* position (time, ns) */
+ int64_t pos;
+
+ /* num_streams is the number of streams that av_new_stream() was called
+ * for ( = that are available to the calling program). */
+ int num_tracks, num_streams;
+ MatroskaTrack *tracks[MAX_STREAMS];
+
+ /* cache for ID peeking */
+ uint32_t peek_id;
+
+ /* byte position of the segment inside the stream */
+ offset_t segment_start;
+
+ /* The packet queue. */
+ AVPacket **packets;
+ int num_packets;
+
+ /* have we already parse metadata/cues/clusters? */
+ int metadata_parsed,
+ index_parsed,
+ done;
+
+ /* The index for seeking. */
+ int num_indexes;
+ MatroskaDemuxIndex *index;
+} MatroskaDemuxContext;
+
+/*
+ * The first few functions handle EBML file parsing. The rest
+ * is the document interpretation. Matroska really just is a
+ * EBML file.
+ */
+
+/*
+ * Return: the amount of levels in the hierarchy that the
+ * current element lies higher than the previous one.
+ * The opposite isn't done - that's auto-done using master
+ * element reading.
+ */
+
+static int
+ebml_read_element_level_up (MatroskaDemuxContext *matroska)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ offset_t pos = url_ftell(pb);
+ int num = 0;
+
+ while (matroska->num_levels > 0) {
+ MatroskaLevel *level = &matroska->levels[matroska->num_levels - 1];
+
+ if (pos >= level->start + level->length) {
+ matroska->num_levels--;
+ num++;
+ } else {
+ break;
+ }
+ }
+
+ return num;
+}
+
+/*
+ * Read: an "EBML number", which is defined as a variable-length
+ * array of bytes. The first byte indicates the length by giving a
+ * number of 0-bits followed by a one. The position of the first
+ * "one" bit inside the first byte indicates the length of this
+ * number.
+ * Returns: num. of bytes read. < 0 on error.
+ */
+
+static int
+ebml_read_num (MatroskaDemuxContext *matroska,
+ int max_size,
+ uint64_t *number)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int len_mask = 0x80, read = 1, n = 1;
+ int64_t total = 0;
+
+ /* the first byte tells us the length in bytes - get_byte() can normally
+ * return 0, but since that's not a valid first ebmlID byte, we can
+ * use it safely here to catch EOS. */
+ if (!(total = get_byte(pb))) {
+ /* we might encounter EOS here */
+ if (!url_feof(pb)) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Read error at pos. %"PRIu64" (0x%"PRIx64")\n",
+ pos, pos);
+ }
+ return AVERROR_IO; /* EOS or actual I/O error */
+ }
+
+ /* get the length of the EBML number */
+ while (read <= max_size && !(total & len_mask)) {
+ read++;
+ len_mask >>= 1;
+ }
+ if (read > max_size) {
+ offset_t pos = url_ftell(pb) - 1;
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Invalid EBML number size tag 0x%02x at pos %"PRIu64" (0x%"PRIx64")\n",
+ (uint8_t) total, pos, pos);
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* read out length */
+ total &= ~len_mask;
+ while (n++ < read)
+ total = (total << 8) | get_byte(pb);
+
+ *number = total;
+
+ return read;
+}
+
+/*
+ * Read: the element content data ID.
+ * Return: the number of bytes read or < 0 on error.
+ */
+
+static int
+ebml_read_element_id (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ int *level_up)
+{
+ int read;
+ uint64_t total;
+
+ /* if we re-call this, use our cached ID */
+ if (matroska->peek_id != 0) {
+ if (level_up)
+ *level_up = 0;
+ *id = matroska->peek_id;
+ return 0;
+ }
+
+ /* read out the "EBML number", include tag in ID */
+ if ((read = ebml_read_num(matroska, 4, &total)) < 0)
+ return read;
+ *id = matroska->peek_id = total | (1 << (read * 7));
+
+ /* level tracking */
+ if (level_up)
+ *level_up = ebml_read_element_level_up(matroska);
+
+ return read;
+}
+
+/*
+ * Read: element content length.
+ * Return: the number of bytes read or < 0 on error.
+ */
+
+static int
+ebml_read_element_length (MatroskaDemuxContext *matroska,
+ uint64_t *length)
+{
+ /* clear cache since we're now beyond that data point */
+ matroska->peek_id = 0;
+
+ /* read out the "EBML number", include tag in ID */
+ return ebml_read_num(matroska, 8, length);
+}
+
+/*
+ * Return: the ID of the next element, or 0 on error.
+ * Level_up contains the amount of levels that this
+ * next element lies higher than the previous one.
+ */
+
+static uint32_t
+ebml_peek_id (MatroskaDemuxContext *matroska,
+ int *level_up)
+{
+ uint32_t id;
+
+ assert(level_up != NULL);
+
+ if (ebml_read_element_id(matroska, &id, level_up) < 0)
+ return 0;
+
+ return id;
+}
+
+/*
+ * Seek to a given offset.
+ * 0 is success, -1 is failure.
+ */
+
+static int
+ebml_read_seek (MatroskaDemuxContext *matroska,
+ offset_t offset)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+
+ /* clear ID cache, if any */
+ matroska->peek_id = 0;
+
+ return (url_fseek(pb, offset, SEEK_SET) == offset) ? 0 : -1;
+}
+
+/*
+ * Skip the next element.
+ * 0 is success, -1 is failure.
+ */
+
+static int
+ebml_read_skip (MatroskaDemuxContext *matroska)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ uint32_t id;
+ uint64_t length;
+ int res;
+
+ if ((res = ebml_read_element_id(matroska, &id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &length)) < 0)
+ return res;
+
+ url_fskip(pb, length);
+
+ return 0;
+}
+
+/*
+ * Read the next element as an unsigned int.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_uint (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ uint64_t *num)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int n = 0, size, res;
+ uint64_t rlength;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ size = rlength;
+ if (size < 1 || size > 8) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Invalid uint element size %d at position %"PRId64" (0x%"PRIx64")\n",
+ size, pos, pos);
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* big-endian ordening; build up number */
+ *num = 0;
+ while (n++ < size)
+ *num = (*num << 8) | get_byte(pb);
+
+ return 0;
+}
+
+/*
+ * Read the next element as a signed int.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_sint (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ int64_t *num)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int size, n = 1, negative = 0, res;
+ uint64_t rlength;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ size = rlength;
+ if (size < 1 || size > 8) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Invalid sint element size %d at position %"PRId64" (0x%"PRIx64")\n",
+ size, pos, pos);
+ return AVERROR_INVALIDDATA;
+ }
+ if ((*num = get_byte(pb)) & 0x80) {
+ negative = 1;
+ *num &= ~0x80;
+ }
+ *num = 0;
+ while (n++ < size)
+ *num = (*num << 8) | get_byte(pb);
+
+ /* make signed */
+ if (negative)
+ *num = *num - (1LL << ((8 * size) - 1));
+
+ return 0;
+}
+
+/*
+ * Read the next element as a float.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_float (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ double *num)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int size, res;
+ uint64_t rlength;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ size = rlength;
+
+ if (size == 4) {
+ *num= av_int2flt(get_be32(pb));
+ } else if(size==8){
+ *num= av_int2dbl(get_be64(pb));
+ } else{
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Invalid float element size %d at position %"PRIu64" (0x%"PRIx64")\n",
+ size, pos, pos);
+ return AVERROR_INVALIDDATA;
+ }
+
+ return 0;
+}
+
+/*
+ * Read the next element as an ASCII string.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_ascii (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ char **str)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ int size, res;
+ uint64_t rlength;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ size = rlength;
+
+ /* ebml strings are usually not 0-terminated, so we allocate one
+ * byte more, read the string and NULL-terminate it ourselves. */
+ if (size < 0 || !(*str = av_malloc(size + 1))) {
+ av_log(matroska->ctx, AV_LOG_ERROR, "Memory allocation failed\n");
+ return AVERROR_NOMEM;
+ }
+ if (get_buffer(pb, (uint8_t *) *str, size) != size) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Read error at pos. %"PRIu64" (0x%"PRIx64")\n", pos, pos);
+ return AVERROR_IO;
+ }
+ (*str)[size] = '\0';
+
+ return 0;
+}
+
+/*
+ * Read the next element as a UTF-8 string.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_utf8 (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ char **str)
+{
+ return ebml_read_ascii(matroska, id, str);
+}
+
+/*
+ * Read the next element as a date (nanoseconds since 1/1/2000).
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_date (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ int64_t *date)
+{
+ return ebml_read_sint(matroska, id, date);
+}
+
+/*
+ * Read the next element, but only the header. The contents
+ * are supposed to be sub-elements which can be read separately.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_master (MatroskaDemuxContext *matroska,
+ uint32_t *id)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ uint64_t length;
+ MatroskaLevel *level;
+ int res;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &length)) < 0)
+ return res;
+
+ /* protect... (Heaven forbids that the '>' is true) */
+ if (matroska->num_levels >= EBML_MAX_DEPTH) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "File moves beyond max. allowed depth (%d)\n", EBML_MAX_DEPTH);
+ return AVERROR_NOTSUPP;
+ }
+
+ /* remember level */
+ level = &matroska->levels[matroska->num_levels++];
+ level->start = url_ftell(pb);
+ level->length = length;
+
+ return 0;
+}
+
+/*
+ * Read the next element as binary data.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_binary (MatroskaDemuxContext *matroska,
+ uint32_t *id,
+ uint8_t **binary,
+ int *size)
+{
+ ByteIOContext *pb = &matroska->ctx->pb;
+ uint64_t rlength;
+ int res;
+
+ if ((res = ebml_read_element_id(matroska, id, NULL)) < 0 ||
+ (res = ebml_read_element_length(matroska, &rlength)) < 0)
+ return res;
+ *size = rlength;
+
+ if (!(*binary = av_malloc(*size))) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Memory allocation error\n");
+ return AVERROR_NOMEM;
+ }
+
+ if (get_buffer(pb, *binary, *size) != *size) {
+ offset_t pos = url_ftell(pb);
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Read error at pos. %"PRIu64" (0x%"PRIx64")\n", pos, pos);
+ return AVERROR_IO;
+ }
+
+ return 0;
+}
+
+/*
+ * Read signed/unsigned "EBML" numbers.
+ * Return: number of bytes processed, < 0 on error.
+ * XXX: use ebml_read_num().
+ */
+
+static int
+matroska_ebmlnum_uint (uint8_t *data,
+ uint32_t size,
+ uint64_t *num)
+{
+ int len_mask = 0x80, read = 1, n = 1, num_ffs = 0;
+ uint64_t total;
+
+ if (size <= 0)
+ return AVERROR_INVALIDDATA;
+
+ total = data[0];
+ while (read <= 8 && !(total & len_mask)) {
+ read++;
+ len_mask >>= 1;
+ }
+ if (read > 8)
+ return AVERROR_INVALIDDATA;
+
+ if ((total &= (len_mask - 1)) == len_mask - 1)
+ num_ffs++;
+ if (size < read)
+ return AVERROR_INVALIDDATA;
+ while (n < read) {
+ if (data[n] == 0xff)
+ num_ffs++;
+ total = (total << 8) | data[n];
+ n++;
+ }
+
+ if (read == num_ffs)
+ *num = (uint64_t)-1;
+ else
+ *num = total;
+
+ return read;
+}
+
+/*
+ * Same as above, but signed.
+ */
+
+static int
+matroska_ebmlnum_sint (uint8_t *data,
+ uint32_t size,
+ int64_t *num)
+{
+ uint64_t unum;
+ int res;
+
+ /* read as unsigned number first */
+ if ((res = matroska_ebmlnum_uint(data, size, &unum)) < 0)
+ return res;
+
+ /* make signed (weird way) */
+ if (unum == (uint64_t)-1)
+ *num = INT64_MAX;
+ else
+ *num = unum - ((1LL << ((7 * res) - 1)) - 1);
+
+ return res;
+}
+
+/*
+ * Read an EBML header.
+ * 0 is success, < 0 is failure.
+ */
+
+static int
+ebml_read_header (MatroskaDemuxContext *matroska,
+ char **doctype,
+ int *version)
+{
+ uint32_t id;
+ int level_up, res = 0;
+
+ /* default init */
+ if (doctype)
+ *doctype = NULL;
+ if (version)
+ *version = 1;
+
+ if (!(id = ebml_peek_id(matroska, &level_up)) ||
+ level_up != 0 || id != EBML_ID_HEADER) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "This is not an EBML file (id=0x%x/0x%x)\n", id, EBML_ID_HEADER);
+ return AVERROR_INVALIDDATA;
+ }
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ return res;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &level_up)))
+ return AVERROR_IO;
+
+ /* end-of-header */
+ if (level_up)
+ break;
+
+ switch (id) {
+ /* is our read version uptodate? */
+ case EBML_ID_EBMLREADVERSION: {
+ uint64_t num;
+
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ return res;
+ if (num > EBML_VERSION) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "EBML version %"PRIu64" (> %d) is not supported\n",
+ num, EBML_VERSION);
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ }
+
+ /* we only handle 8 byte lengths at max */
+ case EBML_ID_EBMLMAXSIZELENGTH: {
+ uint64_t num;
+
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ return res;
+ if (num > sizeof(uint64_t)) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Integers of size %"PRIu64" (> %zd) not supported\n",
+ num, sizeof(uint64_t));
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ }
+
+ /* we handle 4 byte IDs at max */
+ case EBML_ID_EBMLMAXIDLENGTH: {
+ uint64_t num;
+
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ return res;
+ if (num > sizeof(uint32_t)) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "IDs of size %"PRIu64" (> %zu) not supported\n",
+ num, sizeof(uint32_t));
+ return AVERROR_INVALIDDATA;
+ }
+ break;
+ }
+
+ case EBML_ID_DOCTYPE: {
+ char *text;
+
+ if ((res = ebml_read_ascii(matroska, &id, &text)) < 0)
+ return res;
+ if (doctype) {
+ if (*doctype)
+ av_free(*doctype);
+ *doctype = text;
+ } else
+ av_free(text);
+ break;
+ }
+
+ case EBML_ID_DOCTYPEREADVERSION: {
+ uint64_t num;
+
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ return res;
+ if (version)
+ *version = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown data type 0x%x in EBML header", id);
+ /* pass-through */
+
+ case EBML_ID_VOID:
+ /* we ignore these two, as they don't tell us anything we
+ * care about */
+ case EBML_ID_EBMLVERSION:
+ case EBML_ID_DOCTYPEVERSION:
+ res = ebml_read_skip (matroska);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Put one packet in an application-supplied AVPacket struct.
+ * Returns 0 on success or -1 on failure.
+ */
+
+static int
+matroska_deliver_packet (MatroskaDemuxContext *matroska,
+ AVPacket *pkt)
+{
+ if (matroska->num_packets > 0) {
+ memcpy(pkt, matroska->packets[0], sizeof(AVPacket));
+ av_free(matroska->packets[0]);
+ if (matroska->num_packets > 1) {
+ memmove(&matroska->packets[0], &matroska->packets[1],
+ (matroska->num_packets - 1) * sizeof(AVPacket *));
+ matroska->packets =
+ av_realloc(matroska->packets, (matroska->num_packets - 1) *
+ sizeof(AVPacket *));
+ } else {
+ av_freep(&matroska->packets);
+ }
+ matroska->num_packets--;
+ return 0;
+ }
+
+ return -1;
+}
+
+/*
+ * Put a packet into our internal queue. Will be delivered to the
+ * user/application during the next get_packet() call.
+ */
+
+static void
+matroska_queue_packet (MatroskaDemuxContext *matroska,
+ AVPacket *pkt)
+{
+ matroska->packets =
+ av_realloc(matroska->packets, (matroska->num_packets + 1) *
+ sizeof(AVPacket *));
+ matroska->packets[matroska->num_packets] = pkt;
+ matroska->num_packets++;
+}
+
+/*
+ * Autodetecting...
+ */
+
+static int
+matroska_probe (AVProbeData *p)
+{
+ uint64_t total = 0;
+ int len_mask = 0x80, size = 1, n = 1;
+ uint8_t probe_data[] = { 'm', 'a', 't', 'r', 'o', 's', 'k', 'a' };
+
+ if (p->buf_size < 5)
+ return 0;
+
+ /* ebml header? */
+ if ((p->buf[0] << 24 | p->buf[1] << 16 |
+ p->buf[2] << 8 | p->buf[3]) != EBML_ID_HEADER)
+ return 0;
+
+ /* length of header */
+ total = p->buf[4];
+ while (size <= 8 && !(total & len_mask)) {
+ size++;
+ len_mask >>= 1;
+ }
+ if (size > 8)
+ return 0;
+ total &= (len_mask - 1);
+ while (n < size)
+ total = (total << 8) | p->buf[4 + n++];
+
+ /* does the probe data contain the whole header? */
+ if (p->buf_size < 4 + size + total)
+ return 0;
+
+ /* the header must contain the document type 'matroska'. For now,
+ * we don't parse the whole header but simply check for the
+ * availability of that array of characters inside the header.
+ * Not fully fool-proof, but good enough. */
+ for (n = 4 + size; n < 4 + size + total - sizeof(probe_data); n++)
+ if (!memcmp (&p->buf[n], probe_data, sizeof(probe_data)))
+ return AVPROBE_SCORE_MAX;
+
+ return 0;
+}
+
+/*
+ * From here on, it's all XML-style DTD stuff... Needs no comments.
+ */
+
+static int
+matroska_parse_info (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "Parsing info...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* cluster timecode */
+ case MATROSKA_ID_TIMECODESCALE: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ matroska->time_scale = num;
+ break;
+ }
+
+ case MATROSKA_ID_DURATION: {
+ double num;
+ if ((res = ebml_read_float(matroska, &id, &num)) < 0)
+ break;
+ matroska->ctx->duration = num * matroska->time_scale * 1000 / AV_TIME_BASE;
+ break;
+ }
+
+ case MATROSKA_ID_TITLE: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ strncpy(matroska->ctx->title, text,
+ sizeof(matroska->ctx->title)-1);
+ av_free(text);
+ break;
+ }
+
+ case MATROSKA_ID_WRITINGAPP: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ matroska->writing_app = text;
+ break;
+ }
+
+ case MATROSKA_ID_MUXINGAPP: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ matroska->muxing_app = text;
+ break;
+ }
+
+ case MATROSKA_ID_DATEUTC: {
+ int64_t time;
+ if ((res = ebml_read_date(matroska, &id, &time)) < 0)
+ break;
+ matroska->created = time;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in info header\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_add_stream (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+ MatroskaTrack *track;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing track, adding stream..,\n");
+
+ /* Allocate a generic track. As soon as we know its type we'll realloc. */
+ track = av_mallocz(sizeof(MatroskaTrack));
+ matroska->num_tracks++;
+
+ /* start with the master */
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ return res;
+
+ /* try reading the trackentry headers */
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up > 0) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* track number (unique stream ID) */
+ case MATROSKA_ID_TRACKNUMBER: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ track->num = num;
+ break;
+ }
+
+ /* track UID (unique identifier) */
+ case MATROSKA_ID_TRACKUID: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ track->uid = num;
+ break;
+ }
+
+ /* track type (video, audio, combined, subtitle, etc.) */
+ case MATROSKA_ID_TRACKTYPE: {
+ uint64_t num;
+ if (track->type != 0) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "More than one tracktype in an entry - skip\n");
+ break;
+ }
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ track->type = num;
+
+ /* ok, so we're actually going to reallocate this thing */
+ switch (track->type) {
+ case MATROSKA_TRACK_TYPE_VIDEO:
+ track = (MatroskaTrack *)
+ av_realloc(track, sizeof(MatroskaVideoTrack));
+ break;
+ case MATROSKA_TRACK_TYPE_AUDIO:
+ track = (MatroskaTrack *)
+ av_realloc(track, sizeof(MatroskaAudioTrack));
+ ((MatroskaAudioTrack *)track)->channels = 1;
+ ((MatroskaAudioTrack *)track)->samplerate = 8000;
+ break;
+ case MATROSKA_TRACK_TYPE_SUBTITLE:
+ track = (MatroskaTrack *)
+ av_realloc(track, sizeof(MatroskaSubtitleTrack));
+ break;
+ case MATROSKA_TRACK_TYPE_COMPLEX:
+ case MATROSKA_TRACK_TYPE_LOGO:
+ case MATROSKA_TRACK_TYPE_CONTROL:
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown or unsupported track type 0x%x\n",
+ track->type);
+ track->type = 0;
+ break;
+ }
+ matroska->tracks[matroska->num_tracks - 1] = track;
+ break;
+ }
+
+ /* tracktype specific stuff for video */
+ case MATROSKA_ID_TRACKVIDEO: {
+ MatroskaVideoTrack *videotrack;
+ if (track->type != MATROSKA_TRACK_TYPE_VIDEO) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "video data in non-video track - ignoring\n");
+ res = AVERROR_INVALIDDATA;
+ break;
+ } else if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ videotrack = (MatroskaVideoTrack *)track;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up > 0) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* fixme, this should be one-up, but I get it here */
+ case MATROSKA_ID_TRACKDEFAULTDURATION: {
+ uint64_t num;
+ if ((res = ebml_read_uint (matroska, &id,
+ &num)) < 0)
+ break;
+ track->default_duration = num;
+ break;
+ }
+
+ /* video framerate */
+ case MATROSKA_ID_VIDEOFRAMERATE: {
+ double num;
+ if ((res = ebml_read_float(matroska, &id,
+ &num)) < 0)
+ break;
+ track->default_duration = 1000000000 * (1. / num);
+ break;
+ }
+
+ /* width of the size to display the video at */
+ case MATROSKA_ID_VIDEODISPLAYWIDTH: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->display_width = num;
+ break;
+ }
+
+ /* height of the size to display the video at */
+ case MATROSKA_ID_VIDEODISPLAYHEIGHT: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->display_height = num;
+ break;
+ }
+
+ /* width of the video in the file */
+ case MATROSKA_ID_VIDEOPIXELWIDTH: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->pixel_width = num;
+ break;
+ }
+
+ /* height of the video in the file */
+ case MATROSKA_ID_VIDEOPIXELHEIGHT: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->pixel_height = num;
+ break;
+ }
+
+ /* whether the video is interlaced */
+ case MATROSKA_ID_VIDEOFLAGINTERLACED: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ if (num)
+ track->flags |=
+ MATROSKA_VIDEOTRACK_INTERLACED;
+ else
+ track->flags &=
+ ~MATROSKA_VIDEOTRACK_INTERLACED;
+ break;
+ }
+
+ /* stereo mode (whether the video has two streams,
+ * where one is for the left eye and the other for
+ * the right eye, which creates a 3D-like
+ * effect) */
+ case MATROSKA_ID_VIDEOSTEREOMODE: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ if (num != MATROSKA_EYE_MODE_MONO &&
+ num != MATROSKA_EYE_MODE_LEFT &&
+ num != MATROSKA_EYE_MODE_RIGHT &&
+ num != MATROSKA_EYE_MODE_BOTH) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Ignoring unknown eye mode 0x%x\n",
+ (uint32_t) num);
+ break;
+ }
+ videotrack->eye_mode = num;
+ break;
+ }
+
+ /* aspect ratio behaviour */
+ case MATROSKA_ID_VIDEOASPECTRATIO: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ if (num != MATROSKA_ASPECT_RATIO_MODE_FREE &&
+ num != MATROSKA_ASPECT_RATIO_MODE_KEEP &&
+ num != MATROSKA_ASPECT_RATIO_MODE_FIXED) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Ignoring unknown aspect ratio 0x%x\n",
+ (uint32_t) num);
+ break;
+ }
+ videotrack->ar_mode = num;
+ break;
+ }
+
+ /* colourspace (only matters for raw video)
+ * fourcc */
+ case MATROSKA_ID_VIDEOCOLOURSPACE: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ videotrack->fourcc = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown video track header entry "
+ "0x%x - ignoring\n", id);
+ /* pass-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+ break;
+ }
+
+ /* tracktype specific stuff for audio */
+ case MATROSKA_ID_TRACKAUDIO: {
+ MatroskaAudioTrack *audiotrack;
+ if (track->type != MATROSKA_TRACK_TYPE_AUDIO) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "audio data in non-audio track - ignoring\n");
+ res = AVERROR_INVALIDDATA;
+ break;
+ } else if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ audiotrack = (MatroskaAudioTrack *)track;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up > 0) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* samplerate */
+ case MATROSKA_ID_AUDIOSAMPLINGFREQ: {
+ double num;
+ if ((res = ebml_read_float(matroska, &id,
+ &num)) < 0)
+ break;
+ audiotrack->internal_samplerate =
+ audiotrack->samplerate = num;
+ break;
+ }
+
+ case MATROSKA_ID_AUDIOOUTSAMPLINGFREQ: {
+ double num;
+ if ((res = ebml_read_float(matroska, &id,
+ &num)) < 0)
+ break;
+ audiotrack->samplerate = num;
+ break;
+ }
+
+ /* bitdepth */
+ case MATROSKA_ID_AUDIOBITDEPTH: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ audiotrack->bitdepth = num;
+ break;
+ }
+
+ /* channels */
+ case MATROSKA_ID_AUDIOCHANNELS: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id,
+ &num)) < 0)
+ break;
+ audiotrack->channels = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown audio track header entry "
+ "0x%x - ignoring\n", id);
+ /* pass-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+ break;
+ }
+
+ /* codec identifier */
+ case MATROSKA_ID_CODECID: {
+ char *text;
+ if ((res = ebml_read_ascii(matroska, &id, &text)) < 0)
+ break;
+ track->codec_id = text;
+ break;
+ }
+
+ /* codec private data */
+ case MATROSKA_ID_CODECPRIVATE: {
+ uint8_t *data;
+ int size;
+ if ((res = ebml_read_binary(matroska, &id, &data, &size) < 0))
+ break;
+ track->codec_priv = data;
+ track->codec_priv_size = size;
+ break;
+ }
+
+ /* name of the codec */
+ case MATROSKA_ID_CODECNAME: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ track->codec_name = text;
+ break;
+ }
+
+ /* name of this track */
+ case MATROSKA_ID_TRACKNAME: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ track->name = text;
+ break;
+ }
+
+ /* language (matters for audio/subtitles, mostly) */
+ case MATROSKA_ID_TRACKLANGUAGE: {
+ char *text;
+ if ((res = ebml_read_utf8(matroska, &id, &text)) < 0)
+ break;
+ track->language = text;
+ break;
+ }
+
+ /* whether this is actually used */
+ case MATROSKA_ID_TRACKFLAGENABLED: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ if (num)
+ track->flags |= MATROSKA_TRACK_ENABLED;
+ else
+ track->flags &= ~MATROSKA_TRACK_ENABLED;
+ break;
+ }
+
+ /* whether it's the default for this track type */
+ case MATROSKA_ID_TRACKFLAGDEFAULT: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ if (num)
+ track->flags |= MATROSKA_TRACK_DEFAULT;
+ else
+ track->flags &= ~MATROSKA_TRACK_DEFAULT;
+ break;
+ }
+
+ /* lacing (like MPEG, where blocks don't end/start on frame
+ * boundaries) */
+ case MATROSKA_ID_TRACKFLAGLACING: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ if (num)
+ track->flags |= MATROSKA_TRACK_LACING;
+ else
+ track->flags &= ~MATROSKA_TRACK_LACING;
+ break;
+ }
+
+ /* default length (in time) of one data block in this track */
+ case MATROSKA_ID_TRACKDEFAULTDURATION: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ track->default_duration = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown track header entry 0x%x - ignoring\n", id);
+ /* pass-through */
+
+ case EBML_ID_VOID:
+ /* we ignore these because they're nothing useful. */
+ case MATROSKA_ID_CODECINFOURL:
+ case MATROSKA_ID_CODECDOWNLOADURL:
+ case MATROSKA_ID_TRACKMINCACHE:
+ case MATROSKA_ID_TRACKMAXCACHE:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_tracks (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing tracks...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* one track within the "all-tracks" header */
+ case MATROSKA_ID_TRACKENTRY:
+ res = matroska_add_stream(matroska);
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in track header\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_index (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+ MatroskaDemuxIndex idx;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing index...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* one single index entry ('point') */
+ case MATROSKA_ID_POINTENTRY:
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+
+ /* in the end, we hope to fill one entry with a
+ * timestamp, a file position and a tracknum */
+ idx.pos = (uint64_t) -1;
+ idx.time = (uint64_t) -1;
+ idx.track = (uint16_t) -1;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* one single index entry ('point') */
+ case MATROSKA_ID_CUETIME: {
+ uint64_t time;
+ if ((res = ebml_read_uint(matroska, &id,
+ &time)) < 0)
+ break;
+ idx.time = time * matroska->time_scale;
+ break;
+ }
+
+ /* position in the file + track to which it
+ * belongs */
+ case MATROSKA_ID_CUETRACKPOSITION:
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id (matroska,
+ &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* track number */
+ case MATROSKA_ID_CUETRACK: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska,
+ &id, &num)) < 0)
+ break;
+ idx.track = num;
+ break;
+ }
+
+ /* position in file */
+ case MATROSKA_ID_CUECLUSTERPOSITION: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska,
+ &id, &num)) < 0)
+ break;
+ idx.pos = num;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in "
+ "CuesTrackPositions\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in cuespoint "
+ "index\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ /* so let's see if we got what we wanted */
+ if (idx.pos != (uint64_t) -1 &&
+ idx.time != (uint64_t) -1 &&
+ idx.track != (uint16_t) -1) {
+ if (matroska->num_indexes % 32 == 0) {
+ /* re-allocate bigger index */
+ matroska->index =
+ av_realloc(matroska->index,
+ (matroska->num_indexes + 32) *
+ sizeof(MatroskaDemuxIndex));
+ }
+ matroska->index[matroska->num_indexes] = idx;
+ matroska->num_indexes++;
+ }
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in cues header\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_metadata (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* Hm, this is unsupported... */
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in metadata header\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_seekhead (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing seekhead...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ case MATROSKA_ID_SEEKENTRY: {
+ uint32_t seek_id = 0, peek_id_cache = 0;
+ uint64_t seek_pos = (uint64_t) -1, t;
+
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ case MATROSKA_ID_SEEKID:
+ res = ebml_read_uint(matroska, &id, &t);
+ seek_id = t;
+ break;
+
+ case MATROSKA_ID_SEEKPOSITION:
+ res = ebml_read_uint(matroska, &id, &seek_pos);
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown seekhead ID 0x%x\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ if (!seek_id || seek_pos == (uint64_t) -1) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Incomplete seekhead entry (0x%x/%"PRIu64")\n",
+ seek_id, seek_pos);
+ break;
+ }
+
+ switch (seek_id) {
+ case MATROSKA_ID_CUES:
+ case MATROSKA_ID_TAGS: {
+ uint32_t level_up = matroska->level_up;
+ offset_t before_pos;
+ uint64_t length;
+ MatroskaLevel level;
+
+ /* remember the peeked ID and the current position */
+ peek_id_cache = matroska->peek_id;
+ before_pos = url_ftell(&matroska->ctx->pb);
+
+ /* seek */
+ if ((res = ebml_read_seek(matroska, seek_pos +
+ matroska->segment_start)) < 0)
+ return res;
+
+ /* we don't want to lose our seekhead level, so we add
+ * a dummy. This is a crude hack. */
+ if (matroska->num_levels == EBML_MAX_DEPTH) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Max EBML element depth (%d) reached, "
+ "cannot parse further.\n", EBML_MAX_DEPTH);
+ return AVERROR_UNKNOWN;
+ }
+
+ level.start = 0;
+ level.length = (uint64_t)-1;
+ matroska->levels[matroska->num_levels] = level;
+ matroska->num_levels++;
+
+ /* check ID */
+ if (!(id = ebml_peek_id (matroska,
+ &matroska->level_up)))
+ goto finish;
+ if (id != seek_id) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "We looked for ID=0x%x but got "
+ "ID=0x%x (pos=%"PRIu64")",
+ seek_id, id, seek_pos +
+ matroska->segment_start);
+ goto finish;
+ }
+
+ /* read master + parse */
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ goto finish;
+ switch (id) {
+ case MATROSKA_ID_CUES:
+ if (!(res = matroska_parse_index(matroska)) ||
+ url_feof(&matroska->ctx->pb)) {
+ matroska->index_parsed = 1;
+ res = 0;
+ }
+ break;
+ case MATROSKA_ID_TAGS:
+ if (!(res = matroska_parse_metadata(matroska)) ||
+ url_feof(&matroska->ctx->pb)) {
+ matroska->metadata_parsed = 1;
+ res = 0;
+ }
+ break;
+ }
+
+ finish:
+ /* remove dummy level */
+ while (matroska->num_levels) {
+ matroska->num_levels--;
+ length =
+ matroska->levels[matroska->num_levels].length;
+ if (length == (uint64_t)-1)
+ break;
+ }
+
+ /* seek back */
+ if ((res = ebml_read_seek(matroska, before_pos)) < 0)
+ return res;
+ matroska->peek_id = peek_id_cache;
+ matroska->level_up = level_up;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Ignoring seekhead entry for ID=0x%x\n",
+ seek_id);
+ break;
+ }
+
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown seekhead ID 0x%x\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof(*x))
+
+static int
+matroska_aac_profile (char *codec_id)
+{
+ static const char *aac_profiles[] = {
+ "MAIN", "LC", "SSR"
+ };
+ int profile;
+
+ for (profile=0; profile<ARRAY_SIZE(aac_profiles); profile++)
+ if (strstr(codec_id, aac_profiles[profile]))
+ break;
+ return profile + 1;
+}
+
+static int
+matroska_aac_sri (int samplerate)
+{
+ static const int aac_sample_rates[] = {
+ 96000, 88200, 64000, 48000, 44100, 32000,
+ 24000, 22050, 16000, 12000, 11025, 8000,
+ };
+ int sri;
+
+ for (sri=0; sri<ARRAY_SIZE(aac_sample_rates); sri++)
+ if (aac_sample_rates[sri] == samplerate)
+ break;
+ return sri;
+}
+
+static int
+matroska_read_header (AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MatroskaDemuxContext *matroska = s->priv_data;
+ char *doctype;
+ int version, last_level, res = 0;
+ uint32_t id;
+
+ matroska->ctx = s;
+
+ /* First read the EBML header. */
+ doctype = NULL;
+ if ((res = ebml_read_header(matroska, &doctype, &version)) < 0)
+ return res;
+ if ((doctype == NULL) || strcmp(doctype, "matroska")) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Wrong EBML doctype ('%s' != 'matroska').\n",
+ doctype ? doctype : "(none)");
+ if (doctype)
+ av_free(doctype);
+ return AVERROR_NOFMT;
+ }
+ av_free(doctype);
+ if (version != 1) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "Matroska demuxer version 1 too old for file version %d\n",
+ version);
+ return AVERROR_NOFMT;
+ }
+
+ /* The next thing is a segment. */
+ while (1) {
+ if (!(id = ebml_peek_id(matroska, &last_level)))
+ return AVERROR_IO;
+ if (id == MATROSKA_ID_SEGMENT)
+ break;
+
+ /* oi! */
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Expected a Segment ID (0x%x), but received 0x%x!\n",
+ MATROSKA_ID_SEGMENT, id);
+ if ((res = ebml_read_skip(matroska)) < 0)
+ return res;
+ }
+
+ /* We now have a Matroska segment.
+ * Seeks are from the beginning of the segment,
+ * after the segment ID/length. */
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ return res;
+ matroska->segment_start = url_ftell(&s->pb);
+
+ matroska->time_scale = 1000000;
+ /* we've found our segment, start reading the different contents in here */
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* stream info */
+ case MATROSKA_ID_INFO: {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_info(matroska);
+ break;
+ }
+
+ /* track info headers */
+ case MATROSKA_ID_TRACKS: {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_tracks(matroska);
+ break;
+ }
+
+ /* stream index */
+ case MATROSKA_ID_CUES: {
+ if (!matroska->index_parsed) {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_index(matroska);
+ } else
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ /* metadata */
+ case MATROSKA_ID_TAGS: {
+ if (!matroska->metadata_parsed) {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_metadata(matroska);
+ } else
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ /* file index (if seekable, seek to Cues/Tags to parse it) */
+ case MATROSKA_ID_SEEKHEAD: {
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_seekhead(matroska);
+ break;
+ }
+
+ case MATROSKA_ID_CLUSTER: {
+ /* Do not read the master - this will be done in the next
+ * call to matroska_read_packet. */
+ res = 1;
+ break;
+ }
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown matroska file header ID 0x%x\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ /* Have we found a cluster? */
+ if (ebml_peek_id(matroska, NULL) == MATROSKA_ID_CLUSTER) {
+ int i, j;
+ MatroskaTrack *track;
+ AVStream *st;
+
+ for (i = 0; i < matroska->num_tracks; i++) {
+ enum CodecID codec_id = CODEC_ID_NONE;
+ uint8_t *extradata = NULL;
+ int extradata_size = 0;
+ int extradata_offset = 0;
+ track = matroska->tracks[i];
+
+ /* libavformat does not really support subtitles.
+ * Also apply some sanity checks. */
+ if ((track->type == MATROSKA_TRACK_TYPE_SUBTITLE) ||
+ (track->codec_id == NULL))
+ continue;
+
+ for(j=0; codec_tags[j].str; j++){
+ if(!strncmp(codec_tags[j].str, track->codec_id,
+ strlen(codec_tags[j].str))){
+ codec_id= codec_tags[j].id;
+ break;
+ }
+ }
+
+ /* Set the FourCC from the CodecID. */
+ /* This is the MS compatibility mode which stores a
+ * BITMAPINFOHEADER in the CodecPrivate. */
+ if (!strcmp(track->codec_id,
+ MATROSKA_CODEC_ID_VIDEO_VFW_FOURCC) &&
+ (track->codec_priv_size >= 40) &&
+ (track->codec_priv != NULL)) {
+ unsigned char *p;
+
+ /* Offset of biCompression. Stored in LE. */
+ p = (unsigned char *)track->codec_priv + 16;
+ ((MatroskaVideoTrack *)track)->fourcc = (p[3] << 24) |
+ (p[2] << 16) | (p[1] << 8) | p[0];
+ codec_id = codec_get_bmp_id(((MatroskaVideoTrack *)track)->fourcc);
+
+ }
+
+ /* This is the MS compatibility mode which stores a
+ * WAVEFORMATEX in the CodecPrivate. */
+ else if (!strcmp(track->codec_id,
+ MATROSKA_CODEC_ID_AUDIO_ACM) &&
+ (track->codec_priv_size >= 18) &&
+ (track->codec_priv != NULL)) {
+ unsigned char *p;
+ uint16_t tag;
+
+ /* Offset of wFormatTag. Stored in LE. */
+ p = (unsigned char *)track->codec_priv;
+ tag = (p[1] << 8) | p[0];
+ codec_id = codec_get_wav_id(tag);
+
+ }
+
+ else if (codec_id == CODEC_ID_AAC && !track->codec_priv_size) {
+ MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *) track;
+ int profile = matroska_aac_profile(track->codec_id);
+ int sri = matroska_aac_sri(audiotrack->internal_samplerate);
+ extradata = av_malloc(5);
+ if (extradata == NULL)
+ return AVERROR_NOMEM;
+ extradata[0] = (profile << 3) | ((sri&0x0E) >> 1);
+ extradata[1] = ((sri&0x01) << 7) | (audiotrack->channels<<3);
+ if (strstr(track->codec_id, "SBR")) {
+ sri = matroska_aac_sri(audiotrack->samplerate);
+ extradata[2] = 0x56;
+ extradata[3] = 0xE5;
+ extradata[4] = 0x80 | (sri<<3);
+ extradata_size = 5;
+ } else {
+ extradata_size = 2;
+ }
+ }
+
+ else if (codec_id == CODEC_ID_TTA) {
+ MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *) track;
+ ByteIOContext b;
+ extradata_size = 30;
+ extradata = av_mallocz(extradata_size);
+ if (extradata == NULL)
+ return AVERROR_NOMEM;
+ init_put_byte(&b, extradata, extradata_size, 1,
+ NULL, NULL, NULL, NULL);
+ put_buffer(&b, (uint8_t *) "TTA1", 4);
+ put_le16(&b, 1);
+ put_le16(&b, audiotrack->channels);
+ put_le16(&b, audiotrack->bitdepth);
+ put_le32(&b, audiotrack->samplerate);
+ put_le32(&b, matroska->ctx->duration * audiotrack->samplerate);
+ }
+
+ else if (codec_id == CODEC_ID_RV10 || codec_id == CODEC_ID_RV20 ||
+ codec_id == CODEC_ID_RV30 || codec_id == CODEC_ID_RV40) {
+ extradata_offset = 26;
+ track->codec_priv_size -= extradata_offset;
+ track->flags |= MATROSKA_TRACK_REAL_V;
+ }
+
+ if (codec_id == CODEC_ID_NONE) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown/unsupported CodecID %s.\n",
+ track->codec_id);
+ }
+
+ track->stream_index = matroska->num_streams;
+
+ matroska->num_streams++;
+ st = av_new_stream(s, track->stream_index);
+ if (st == NULL)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 64, matroska->time_scale, 1000*1000*1000); /* 64 bit pts in ns */
+
+ st->codec->codec_id = codec_id;
+
+ if (track->default_duration)
+ av_reduce(&st->codec->time_base.num, &st->codec->time_base.den,
+ track->default_duration, 1000000000, 30000);
+
+ if(extradata){
+ st->codec->extradata = extradata;
+ st->codec->extradata_size = extradata_size;
+ } else if(track->codec_priv && track->codec_priv_size > 0){
+ st->codec->extradata = av_malloc(track->codec_priv_size);
+ if(st->codec->extradata == NULL)
+ return AVERROR_NOMEM;
+ st->codec->extradata_size = track->codec_priv_size;
+ memcpy(st->codec->extradata,track->codec_priv+extradata_offset,
+ track->codec_priv_size);
+ }
+
+ if (track->type == MATROSKA_TRACK_TYPE_VIDEO) {
+ MatroskaVideoTrack *videotrack = (MatroskaVideoTrack *)track;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_tag = videotrack->fourcc;
+ st->codec->width = videotrack->pixel_width;
+ st->codec->height = videotrack->pixel_height;
+ if (videotrack->display_width == 0)
+ videotrack->display_width= videotrack->pixel_width;
+ if (videotrack->display_height == 0)
+ videotrack->display_height= videotrack->pixel_height;
+ av_reduce(&st->codec->sample_aspect_ratio.num,
+ &st->codec->sample_aspect_ratio.den,
+ st->codec->height * videotrack->display_width,
+ st->codec-> width * videotrack->display_height,
+ 255);
+ } else if (track->type == MATROSKA_TRACK_TYPE_AUDIO) {
+ MatroskaAudioTrack *audiotrack = (MatroskaAudioTrack *)track;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->sample_rate = audiotrack->samplerate;
+ st->codec->channels = audiotrack->channels;
+ } else if (track->type == MATROSKA_TRACK_TYPE_SUBTITLE) {
+ st->codec->codec_type = CODEC_TYPE_SUBTITLE;
+ }
+
+ /* What do we do with private data? E.g. for Vorbis. */
+ }
+ res = 0;
+ }
+
+ return res;
+}
+
+static int
+matroska_find_track_by_num (MatroskaDemuxContext *matroska,
+ int num)
+{
+ int i;
+
+ for (i = 0; i < matroska->num_tracks; i++)
+ if (matroska->tracks[i]->num == num)
+ return i;
+
+ return -1;
+}
+
+static inline int
+rv_offset(uint8_t *data, int slice, int slices)
+{
+ return LE_32(data+8*slice+4) + 8*slices;
+}
+
+static int
+matroska_parse_blockgroup (MatroskaDemuxContext *matroska,
+ uint64_t cluster_time)
+{
+ int res = 0;
+ uint32_t id;
+ AVPacket *pkt = NULL;
+ int is_keyframe = PKT_FLAG_KEY, last_num_packets = matroska->num_packets;
+ uint64_t duration = AV_NOPTS_VALUE;
+ int track = -1;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG, "parsing blockgroup...\n");
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* one block inside the group. Note, block parsing is one
+ * of the harder things, so this code is a bit complicated.
+ * See http://www.matroska.org/ for documentation. */
+ case MATROSKA_ID_BLOCK: {
+ uint8_t *data, *origdata;
+ int size;
+ int16_t block_time;
+ uint32_t *lace_size = NULL;
+ int n, flags, laces = 0;
+ uint64_t num;
+ int64_t pos= url_ftell(&matroska->ctx->pb);
+
+ if ((res = ebml_read_binary(matroska, &id, &data, &size)) < 0)
+ break;
+ origdata = data;
+
+ /* first byte(s): tracknum */
+ if ((n = matroska_ebmlnum_uint(data, size, &num)) < 0) {
+ av_log(matroska->ctx, AV_LOG_ERROR,
+ "EBML block data error\n");
+ av_free(origdata);
+ break;
+ }
+ data += n;
+ size -= n;
+
+ /* fetch track from num */
+ track = matroska_find_track_by_num(matroska, num);
+ if (size <= 3 || track < 0 || track >= matroska->num_tracks) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Invalid stream %d or size %u\n", track, size);
+ av_free(origdata);
+ break;
+ }
+ if(matroska->ctx->streams[ matroska->tracks[track]->stream_index ]->discard >= AVDISCARD_ALL){
+ av_free(origdata);
+ break;
+ }
+
+ /* block_time (relative to cluster time) */
+ block_time = (data[0] << 8) | data[1];
+ data += 2;
+ size -= 2;
+ flags = *data;
+ data += 1;
+ size -= 1;
+ switch ((flags & 0x06) >> 1) {
+ case 0x0: /* no lacing */
+ laces = 1;
+ lace_size = av_mallocz(sizeof(int));
+ lace_size[0] = size;
+ break;
+
+ case 0x1: /* xiph lacing */
+ case 0x2: /* fixed-size lacing */
+ case 0x3: /* EBML lacing */
+ if (size == 0) {
+ res = -1;
+ break;
+ }
+ laces = (*data) + 1;
+ data += 1;
+ size -= 1;
+ lace_size = av_mallocz(laces * sizeof(int));
+
+ switch ((flags & 0x06) >> 1) {
+ case 0x1: /* xiph lacing */ {
+ uint8_t temp;
+ uint32_t total = 0;
+ for (n = 0; res == 0 && n < laces - 1; n++) {
+ while (1) {
+ if (size == 0) {
+ res = -1;
+ break;
+ }
+ temp = *data;
+ lace_size[n] += temp;
+ data += 1;
+ size -= 1;
+ if (temp != 0xff)
+ break;
+ }
+ total += lace_size[n];
+ }
+ lace_size[n] = size - total;
+ break;
+ }
+
+ case 0x2: /* fixed-size lacing */
+ for (n = 0; n < laces; n++)
+ lace_size[n] = size / laces;
+ break;
+
+ case 0x3: /* EBML lacing */ {
+ uint32_t total;
+ n = matroska_ebmlnum_uint(data, size, &num);
+ if (n < 0) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "EBML block data error\n");
+ break;
+ }
+ data += n;
+ size -= n;
+ total = lace_size[0] = num;
+ for (n = 1; res == 0 && n < laces - 1; n++) {
+ int64_t snum;
+ int r;
+ r = matroska_ebmlnum_sint (data, size,
+ &snum);
+ if (r < 0) {
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "EBML block data error\n");
+ break;
+ }
+ data += r;
+ size -= r;
+ lace_size[n] = lace_size[n - 1] + snum;
+ total += lace_size[n];
+ }
+ lace_size[n] = size - total;
+ break;
+ }
+ }
+ break;
+ }
+
+ if (res == 0) {
+ int real_v = matroska->tracks[track]->flags & MATROSKA_TRACK_REAL_V;
+ for (n = 0; n < laces; n++) {
+ uint64_t timecode = AV_NOPTS_VALUE;
+ int slice, slices = 1;
+
+ if (real_v) {
+ slices = *data++ + 1;
+ lace_size[n]--;
+ }
+ if (cluster_time != (uint64_t)-1 && n == 0) {
+ if (cluster_time + block_time >= 0)
+ timecode = (cluster_time + block_time) * matroska->time_scale;
+ }
+ /* FIXME: duration */
+
+ for (slice=0; slice<slices; slice++) {
+ int slice_size, slice_offset = 0;
+ if (real_v)
+ slice_offset = rv_offset(data, slice, slices);
+ if (slice+1 == slices)
+ slice_size = lace_size[n] - slice_offset;
+ else
+ slice_size = rv_offset(data, slice+1, slices) - slice_offset;
+ pkt = av_mallocz(sizeof(AVPacket));
+ /* XXX: prevent data copy... */
+ if (av_new_packet(pkt, slice_size) < 0) {
+ res = AVERROR_NOMEM;
+ n = laces-1;
+ break;
+ }
+ memcpy (pkt->data, data+slice_offset, slice_size);
+
+ if (n == 0)
+ pkt->flags = is_keyframe;
+ pkt->stream_index =
+ matroska->tracks[track]->stream_index;
+
+ pkt->pts = timecode;
+ pkt->pos = pos;
+
+ matroska_queue_packet(matroska, pkt);
+ }
+ data += lace_size[n];
+ }
+ }
+
+ av_free(lace_size);
+ av_free(origdata);
+ break;
+ }
+
+ case MATROSKA_ID_BLOCKDURATION: {
+ if ((res = ebml_read_uint(matroska, &id, &duration)) < 0)
+ break;
+ break;
+ }
+
+ case MATROSKA_ID_BLOCKREFERENCE:
+ /* We've found a reference, so not even the first frame in
+ * the lace is a key frame. */
+ is_keyframe = 0;
+ if (last_num_packets != matroska->num_packets)
+ matroska->packets[last_num_packets]->flags = 0;
+ res = ebml_read_skip(matroska);
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in blockgroup data\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ if (pkt)
+ {
+ if (duration != AV_NOPTS_VALUE)
+ pkt->duration = duration;
+ else if (track >= 0 && track < matroska->num_tracks)
+ pkt->duration = matroska->tracks[track]->default_duration / matroska->time_scale;
+ }
+
+ return res;
+}
+
+static int
+matroska_parse_cluster (MatroskaDemuxContext *matroska)
+{
+ int res = 0;
+ uint32_t id;
+ uint64_t cluster_time = 0;
+
+ av_log(matroska->ctx, AV_LOG_DEBUG,
+ "parsing cluster at %"PRId64"\n", url_ftell(&matroska->ctx->pb));
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ /* cluster timecode */
+ case MATROSKA_ID_CLUSTERTIMECODE: {
+ uint64_t num;
+ if ((res = ebml_read_uint(matroska, &id, &num)) < 0)
+ break;
+ cluster_time = num;
+ break;
+ }
+
+ /* a group of blocks inside a cluster */
+ case MATROSKA_ID_BLOCKGROUP:
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ res = matroska_parse_blockgroup(matroska, cluster_time);
+ break;
+
+ default:
+ av_log(matroska->ctx, AV_LOG_INFO,
+ "Unknown entry 0x%x in cluster data\n", id);
+ /* fall-through */
+
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ return res;
+}
+
+static int
+matroska_read_packet (AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MatroskaDemuxContext *matroska = s->priv_data;
+ int res = 0;
+ uint32_t id;
+
+ /* Do we still have a packet queued? */
+ if (matroska_deliver_packet(matroska, pkt) == 0)
+ return 0;
+
+ /* Have we already reached the end? */
+ if (matroska->done)
+ return AVERROR_IO;
+
+ while (res == 0) {
+ if (!(id = ebml_peek_id(matroska, &matroska->level_up))) {
+ res = AVERROR_IO;
+ break;
+ } else if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+
+ switch (id) {
+ case MATROSKA_ID_CLUSTER:
+ if ((res = ebml_read_master(matroska, &id)) < 0)
+ break;
+ if ((res = matroska_parse_cluster(matroska)) == 0)
+ res = 1; /* Parsed one cluster, let's get out. */
+ break;
+
+ default:
+ case EBML_ID_VOID:
+ res = ebml_read_skip(matroska);
+ break;
+ }
+
+ if (matroska->level_up) {
+ matroska->level_up--;
+ break;
+ }
+ }
+
+ if (res == -1)
+ matroska->done = 1;
+
+ return matroska_deliver_packet(matroska, pkt);
+}
+
+static int
+matroska_read_close (AVFormatContext *s)
+{
+ MatroskaDemuxContext *matroska = s->priv_data;
+ int n = 0;
+
+ av_free(matroska->writing_app);
+ av_free(matroska->muxing_app);
+ av_free(matroska->index);
+
+ if (matroska->packets != NULL) {
+ for (n = 0; n < matroska->num_packets; n++) {
+ av_free_packet(matroska->packets[n]);
+ av_free(matroska->packets[n]);
+ }
+ av_free(matroska->packets);
+ }
+
+ for (n = 0; n < matroska->num_tracks; n++) {
+ MatroskaTrack *track = matroska->tracks[n];
+ av_free(track->codec_id);
+ av_free(track->codec_name);
+ av_free(track->codec_priv);
+ av_free(track->name);
+ av_free(track->language);
+
+ av_free(track);
+ }
+
+ return 0;
+}
+
+AVInputFormat matroska_demuxer = {
+ "matroska",
+ "Matroska file format",
+ sizeof(MatroskaDemuxContext),
+ matroska_probe,
+ matroska_read_header,
+ matroska_read_packet,
+ matroska_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/mm.c b/contrib/ffmpeg/libavformat/mm.c
new file mode 100644
index 000000000..a3c637fb2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mm.c
@@ -0,0 +1,212 @@
+/*
+ * American Laser Games MM Format Demuxer
+ * Copyright (c) 2006 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mm.c
+ * American Laser Games MM Format Demuxer
+ * by Peter Ross (suxen_drol at hotmail dot com)
+ *
+ * The MM format was used by IBM-PC ports of ALG's "arcade shooter" games,
+ * including Mad Dog McCree and Crime Patrol.
+ *
+ * Technical details here:
+ * http://wiki.multimedia.cx/index.php?title=American_Laser_Games_MM
+ */
+
+#include "avformat.h"
+
+#define MM_PREAMBLE_SIZE 6
+
+#define MM_TYPE_HEADER 0x0
+#define MM_TYPE_INTER 0x5
+#define MM_TYPE_INTRA 0x8
+#define MM_TYPE_INTRA_HH 0xc
+#define MM_TYPE_INTER_HH 0xd
+#define MM_TYPE_INTRA_HHV 0xe
+#define MM_TYPE_INTER_HHV 0xf
+#define MM_TYPE_AUDIO 0x15
+#define MM_TYPE_PALETTE 0x31
+
+#define MM_HEADER_LEN_V 0x16 /* video only */
+#define MM_HEADER_LEN_AV 0x18 /* video + audio */
+
+#define MM_PALETTE_COUNT 128
+#define MM_PALETTE_SIZE (MM_PALETTE_COUNT*3)
+
+typedef struct {
+ AVPaletteControl palette_control;
+ unsigned int audio_pts, video_pts;
+} MmDemuxContext;
+
+static int mm_probe(AVProbeData *p)
+{
+ /* the first chunk is always the header */
+ if (p->buf_size < MM_PREAMBLE_SIZE)
+ return 0;
+ if (LE_16(&p->buf[0]) != MM_TYPE_HEADER)
+ return 0;
+ if (LE_32(&p->buf[2]) != MM_HEADER_LEN_V && LE_32(&p->buf[2]) != MM_HEADER_LEN_AV)
+ return 0;
+
+ /* only return half certainty since this check is a bit sketchy */
+ return AVPROBE_SCORE_MAX / 2;
+}
+
+static int mm_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MmDemuxContext *mm = (MmDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+
+ unsigned int type, length;
+ unsigned int frame_rate, width, height;
+
+ type = get_le16(pb);
+ length = get_le32(pb);
+
+ if (type != MM_TYPE_HEADER)
+ return AVERROR_INVALIDDATA;
+
+ /* read header */
+ get_le16(pb); /* total number of chunks */
+ frame_rate = get_le16(pb);
+ get_le16(pb); /* ibm-pc video bios mode */
+ width = get_le16(pb);
+ height = get_le16(pb);
+ url_fseek(pb, length - 10, SEEK_CUR); /* unknown data */
+
+ /* video stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MMVIDEO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = width;
+ st->codec->height = height;
+ st->codec->palctrl = &mm->palette_control;
+ av_set_pts_info(st, 64, 1, frame_rate);
+
+ /* audio stream */
+ if (length == MM_HEADER_LEN_AV) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->codec_id = CODEC_ID_PCM_U8;
+ st->codec->channels = 1;
+ st->codec->sample_rate = 8000;
+ av_set_pts_info(st, 64, 1, 8000); /* 8000 hz */
+ }
+
+ mm->palette_control.palette_changed = 0;
+ mm->audio_pts = 0;
+ mm->video_pts = 0;
+ return 0;
+}
+
+static int mm_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MmDemuxContext *mm = (MmDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned char preamble[MM_PREAMBLE_SIZE];
+ unsigned char pal[MM_PALETTE_SIZE];
+ unsigned int type, length;
+ int i;
+
+ while(1) {
+
+ if (get_buffer(pb, preamble, MM_PREAMBLE_SIZE) != MM_PREAMBLE_SIZE) {
+ return AVERROR_IO;
+ }
+
+ type = LE_16(&preamble[0]);
+ length = LE_16(&preamble[2]);
+
+ switch(type) {
+ case MM_TYPE_PALETTE :
+ url_fseek(pb, 4, SEEK_CUR); /* unknown data */
+ if (get_buffer(pb, pal, MM_PALETTE_SIZE) != MM_PALETTE_SIZE)
+ return AVERROR_IO;
+ url_fseek(pb, length - (4 + MM_PALETTE_SIZE), SEEK_CUR);
+
+ for (i=0; i<MM_PALETTE_COUNT; i++) {
+ int r = pal[i*3 + 0];
+ int g = pal[i*3 + 1];
+ int b = pal[i*3 + 2];
+ mm->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
+ /* repeat palette, where each components is multiplied by four */
+ mm->palette_control.palette[i+128] = (r << 18) | (g << 10) | (b<<2);
+ }
+ mm->palette_control.palette_changed = 1;
+ break;
+
+ case MM_TYPE_INTER :
+ case MM_TYPE_INTRA :
+ case MM_TYPE_INTRA_HH :
+ case MM_TYPE_INTER_HH :
+ case MM_TYPE_INTRA_HHV :
+ case MM_TYPE_INTER_HHV :
+ /* output preamble + data */
+ if (av_new_packet(pkt, length + MM_PREAMBLE_SIZE))
+ return AVERROR_NOMEM;
+ memcpy(pkt->data, preamble, MM_PREAMBLE_SIZE);
+ if (get_buffer(pb, pkt->data + MM_PREAMBLE_SIZE, length) != length)
+ return AVERROR_IO;
+ pkt->size = length + MM_PREAMBLE_SIZE;
+ pkt->stream_index = 0;
+ pkt->pts = mm->video_pts++;
+ return 0;
+
+ case MM_TYPE_AUDIO :
+ if (av_get_packet(&s->pb, pkt, length)<0)
+ return AVERROR_NOMEM;
+ pkt->size = length;
+ pkt->stream_index = 1;
+ pkt->pts = mm->audio_pts++;
+ return 0;
+
+ default :
+ av_log(NULL, AV_LOG_INFO, "mm: unknown chunk type 0x%x\n", type);
+ url_fseek(pb, length, SEEK_CUR);
+ }
+ }
+
+ return 0;
+}
+
+static int mm_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVInputFormat mm_demuxer = {
+ "mm",
+ "American Laser Games MM format",
+ sizeof(MmDemuxContext),
+ mm_probe,
+ mm_read_header,
+ mm_read_packet,
+ mm_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/mmf.c b/contrib/ffmpeg/libavformat/mmf.c
new file mode 100644
index 000000000..40b1a497c
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mmf.c
@@ -0,0 +1,331 @@
+/*
+ * Yamaha SMAF format
+ * Copyright (c) 2005 Vidar Madsen
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+
+typedef struct {
+ offset_t atrpos, atsqpos, awapos;
+ offset_t data_size;
+} MMFContext;
+
+static int mmf_rates[] = { 4000, 8000, 11025, 22050, 44100 };
+
+static int mmf_rate_code(int rate)
+{
+ int i;
+ for(i = 0; i < 5; i++)
+ if(mmf_rates[i] == rate)
+ return i;
+ return -1;
+}
+
+static int mmf_rate(int code)
+{
+ if((code < 0) || (code > 4))
+ return -1;
+ return mmf_rates[code];
+}
+
+#ifdef CONFIG_MUXERS
+/* Copy of end_tag() from avienc.c, but for big-endian chunk size */
+static void end_tag_be(ByteIOContext *pb, offset_t start)
+{
+ offset_t pos;
+
+ pos = url_ftell(pb);
+ url_fseek(pb, start - 4, SEEK_SET);
+ put_be32(pb, (uint32_t)(pos - start));
+ url_fseek(pb, pos, SEEK_SET);
+}
+
+static int mmf_write_header(AVFormatContext *s)
+{
+ MMFContext *mmf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ offset_t pos;
+ int rate;
+
+ rate = mmf_rate_code(s->streams[0]->codec->sample_rate);
+ if(rate < 0) {
+ av_log(s, AV_LOG_ERROR, "Unsupported sample rate %d\n", s->streams[0]->codec->sample_rate);
+ return -1;
+ }
+
+ put_tag(pb, "MMMD");
+ put_be32(pb, 0);
+ pos = start_tag(pb, "CNTI");
+ put_byte(pb, 0); /* class */
+ put_byte(pb, 0); /* type */
+ put_byte(pb, 0); /* code type */
+ put_byte(pb, 0); /* status */
+ put_byte(pb, 0); /* counts */
+ put_tag(pb, "VN:libavcodec,"); /* metadata ("ST:songtitle,VN:version,...") */
+ end_tag_be(pb, pos);
+
+ put_buffer(pb, "ATR\x00", 4);
+ put_be32(pb, 0);
+ mmf->atrpos = url_ftell(pb);
+ put_byte(pb, 0); /* format type */
+ put_byte(pb, 0); /* sequence type */
+ put_byte(pb, (0 << 7) | (1 << 4) | rate); /* (channel << 7) | (format << 4) | rate */
+ put_byte(pb, 0); /* wave base bit */
+ put_byte(pb, 2); /* time base d */
+ put_byte(pb, 2); /* time base g */
+
+ put_tag(pb, "Atsq");
+ put_be32(pb, 16);
+ mmf->atsqpos = url_ftell(pb);
+ /* Will be filled on close */
+ put_buffer(pb, "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", 16);
+
+ mmf->awapos = start_tag(pb, "Awa\x01");
+
+ av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int mmf_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+/* Write a variable-length symbol */
+static void put_varlength(ByteIOContext *pb, int val)
+{
+ if(val < 128)
+ put_byte(pb, val);
+ else {
+ val -= 128;
+ put_byte(pb, 0x80 | val >> 7);
+ put_byte(pb, 0x7f & val);
+ }
+}
+
+static int mmf_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ MMFContext *mmf = s->priv_data;
+ offset_t pos, size;
+ int gatetime;
+
+ if (!url_is_streamed(&s->pb)) {
+ /* Fill in length fields */
+ end_tag_be(pb, mmf->awapos);
+ end_tag_be(pb, mmf->atrpos);
+ end_tag_be(pb, 8);
+
+ pos = url_ftell(pb);
+ size = pos - mmf->awapos;
+
+ /* Fill Atsq chunk */
+ url_fseek(pb, mmf->atsqpos, SEEK_SET);
+
+ /* "play wav" */
+ put_byte(pb, 0); /* start time */
+ put_byte(pb, 1); /* (channel << 6) | wavenum */
+ gatetime = size * 500 / s->streams[0]->codec->sample_rate;
+ put_varlength(pb, gatetime); /* duration */
+
+ /* "nop" */
+ put_varlength(pb, gatetime); /* start time */
+ put_buffer(pb, "\xff\x00", 2); /* nop */
+
+ /* "end of sequence" */
+ put_buffer(pb, "\x00\x00\x00\x00", 4);
+
+ url_fseek(pb, pos, SEEK_SET);
+
+ put_flush_packet(pb);
+ }
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+static int mmf_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'M' && p->buf[1] == 'M' &&
+ p->buf[2] == 'M' && p->buf[3] == 'D' &&
+ p->buf[8] == 'C' && p->buf[9] == 'N' &&
+ p->buf[10] == 'T' && p->buf[11] == 'I')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* mmf input */
+static int mmf_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MMFContext *mmf = s->priv_data;
+ unsigned int tag;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ offset_t file_size, size;
+ int rate, params;
+
+ tag = get_le32(pb);
+ if (tag != MKTAG('M', 'M', 'M', 'D'))
+ return -1;
+ file_size = get_be32(pb);
+
+ /* Skip some unused chunks that may or may not be present */
+ for(;; url_fseek(pb, size, SEEK_CUR)) {
+ tag = get_le32(pb);
+ size = get_be32(pb);
+ if(tag == MKTAG('C','N','T','I')) continue;
+ if(tag == MKTAG('O','P','D','A')) continue;
+ break;
+ }
+
+ /* Tag = "ATRx", where "x" = track number */
+ if ((tag & 0xffffff) == MKTAG('M', 'T', 'R', 0)) {
+ av_log(s, AV_LOG_ERROR, "MIDI like format found, unsupported\n");
+ return -1;
+ }
+ if ((tag & 0xffffff) != MKTAG('A', 'T', 'R', 0)) {
+ av_log(s, AV_LOG_ERROR, "Unsupported SMAF chunk %08x\n", tag);
+ return -1;
+ }
+
+ get_byte(pb); /* format type */
+ get_byte(pb); /* sequence type */
+ params = get_byte(pb); /* (channel << 7) | (format << 4) | rate */
+ rate = mmf_rate(params & 0x0f);
+ if(rate < 0) {
+ av_log(s, AV_LOG_ERROR, "Invalid sample rate\n");
+ return -1;
+ }
+ get_byte(pb); /* wave base bit */
+ get_byte(pb); /* time base d */
+ get_byte(pb); /* time base g */
+
+ /* Skip some unused chunks that may or may not be present */
+ for(;; url_fseek(pb, size, SEEK_CUR)) {
+ tag = get_le32(pb);
+ size = get_be32(pb);
+ if(tag == MKTAG('A','t','s','q')) continue;
+ if(tag == MKTAG('A','s','p','I')) continue;
+ break;
+ }
+
+ /* Make sure it's followed by an Awa chunk, aka wave data */
+ if ((tag & 0xffffff) != MKTAG('A', 'w', 'a', 0)) {
+ av_log(s, AV_LOG_ERROR, "Unexpected SMAF chunk %08x\n", tag);
+ return -1;
+ }
+ mmf->data_size = size;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_ADPCM_YAMAHA;
+ st->codec->sample_rate = rate;
+ st->codec->channels = 1;
+ st->codec->bits_per_sample = 4;
+ st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_sample;
+
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int mmf_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MMFContext *mmf = s->priv_data;
+ AVStream *st;
+ int ret, size;
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+ st = s->streams[0];
+
+ size = MAX_SIZE;
+ if(size > mmf->data_size)
+ size = mmf->data_size;
+
+ if(!size)
+ return AVERROR_IO;
+
+ if (av_new_packet(pkt, size))
+ return AVERROR_IO;
+ pkt->stream_index = 0;
+
+ ret = get_buffer(&s->pb, pkt->data, pkt->size);
+ if (ret < 0)
+ av_free_packet(pkt);
+
+ mmf->data_size -= ret;
+
+ pkt->size = ret;
+ return ret;
+}
+
+static int mmf_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int mmf_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ return pcm_read_seek(s, stream_index, timestamp, flags);
+}
+
+#ifdef CONFIG_MMF_DEMUXER
+AVInputFormat mmf_demuxer = {
+ "mmf",
+ "mmf format",
+ sizeof(MMFContext),
+ mmf_probe,
+ mmf_read_header,
+ mmf_read_packet,
+ mmf_read_close,
+ mmf_read_seek,
+};
+#endif
+#ifdef CONFIG_MMF_MUXER
+AVOutputFormat mmf_muxer = {
+ "mmf",
+ "mmf format",
+ "application/vnd.smaf",
+ "mmf",
+ sizeof(MMFContext),
+ CODEC_ID_ADPCM_YAMAHA,
+ CODEC_ID_NONE,
+ mmf_write_header,
+ mmf_write_packet,
+ mmf_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/mov.c b/contrib/ffmpeg/libavformat/mov.c
new file mode 100644
index 000000000..3ceac64b1
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mov.c
@@ -0,0 +1,1798 @@
+/*
+ * MOV demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <limits.h>
+
+//#define DEBUG
+
+#include "avformat.h"
+#include "riff.h"
+#include "isom.h"
+#include "dv.h"
+
+#ifdef CONFIG_ZLIB
+#include <zlib.h>
+#endif
+
+/*
+ * First version by Francois Revol revol@free.fr
+ * Seek function by Gael Chardon gael.dev@4now.net
+ *
+ * Features and limitations:
+ * - reads most of the QT files I have (at least the structure),
+ * the exceptions are .mov with zlib compressed headers ('cmov' section). It shouldn't be hard to implement.
+ * FIXED, Francois Revol, 07/17/2002
+ * - ffmpeg has nearly none of the usual QuickTime codecs,
+ * although I succesfully dumped raw and mp3 audio tracks off .mov files.
+ * Sample QuickTime files with mp3 audio can be found at: http://www.3ivx.com/showcase.html
+ * - .mp4 parsing is still hazardous, although the format really is QuickTime with some minor changes
+ * (to make .mov parser crash maybe ?), despite what they say in the MPEG FAQ at
+ * http://mpeg.telecomitalialab.com/faq.htm
+ * - the code is quite ugly... maybe I won't do it recursive next time :-)
+ * - seek is not supported with files that contain edit list
+ *
+ * Funny I didn't know about http://sourceforge.net/projects/qt-ffmpeg/
+ * when coding this :) (it's a writer anyway)
+ *
+ * Reference documents:
+ * http://www.geocities.com/xhelmboyx/quicktime/formats/qtm-layout.txt
+ * Apple:
+ * http://developer.apple.com/documentation/QuickTime/QTFF/
+ * http://developer.apple.com/documentation/QuickTime/QTFF/qtff.pdf
+ * QuickTime is a trademark of Apple (AFAIK :))
+ */
+
+#include "qtpalette.h"
+
+
+#undef NDEBUG
+#include <assert.h>
+
+static const CodecTag mov_video_tags[] = {
+/* { CODEC_ID_, MKTAG('c', 'v', 'i', 'd') }, *//* Cinepak */
+/* { CODEC_ID_H263, MKTAG('r', 'a', 'w', ' ') }, *//* Uncompressed RGB */
+/* { CODEC_ID_H263, MKTAG('Y', 'u', 'v', '2') }, *//* Uncompressed YUV422 */
+/* { CODEC_ID_RAWVIDEO, MKTAG('A', 'V', 'U', 'I') }, *//* YUV with alpha-channel (AVID Uncompressed) */
+/* Graphics */
+/* Animation */
+/* Apple video */
+/* Kodak Photo CD */
+ { CODEC_ID_MJPEG, MKTAG('j', 'p', 'e', 'g') }, /* PhotoJPEG */
+ { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'e', 'g') }, /* MPEG */
+ { CODEC_ID_MJPEG, MKTAG('m', 'j', 'p', 'a') }, /* Motion-JPEG (format A) */
+ { CODEC_ID_MJPEGB, MKTAG('m', 'j', 'p', 'b') }, /* Motion-JPEG (format B) */
+ { CODEC_ID_MJPEG, MKTAG('A', 'V', 'D', 'J') }, /* MJPEG with alpha-channel (AVID JFIF meridien compressed) */
+/* { CODEC_ID_MJPEG, MKTAG('A', 'V', 'R', 'n') }, *//* MJPEG with alpha-channel (AVID ABVB/Truevision NuVista) */
+ { CODEC_ID_GIF, MKTAG('g', 'i', 'f', ' ') }, /* embedded gif files as frames (usually one "click to play movie" frame) */
+/* Sorenson video */
+ { CODEC_ID_SVQ1, MKTAG('S', 'V', 'Q', '1') }, /* Sorenson Video v1 */
+ { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') }, /* Sorenson Video v1 */
+ { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', 'i') }, /* Sorenson Video v1 (from QT specs)*/
+ { CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') }, /* Sorenson Video v3 */
+ { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
+ { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X') }, /* OpenDiVX *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
+ { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D') },
+ { CODEC_ID_MPEG4, MKTAG('3', 'I', 'V', '2') }, /* experimental: 3IVX files before ivx D4 4.5.1 */
+/* { CODEC_ID_, MKTAG('I', 'V', '5', '0') }, *//* Indeo 5.0 */
+ { CODEC_ID_H263, MKTAG('h', '2', '6', '3') }, /* H263 */
+ { CODEC_ID_H263, MKTAG('s', '2', '6', '3') }, /* H263 ?? works */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, /* DV NTSC */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 'p') }, /* DV PAL */
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') }, /* On2 VP3 */
+ { CODEC_ID_RPZA, MKTAG('r', 'p', 'z', 'a') }, /* Apple Video (RPZA) */
+ { CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') }, /* Cinepak */
+ { CODEC_ID_8BPS, MKTAG('8', 'B', 'P', 'S') }, /* Planar RGB (8BPS) */
+ { CODEC_ID_SMC, MKTAG('s', 'm', 'c', ' ') }, /* Apple Graphics (SMC) */
+ { CODEC_ID_QTRLE, MKTAG('r', 'l', 'e', ' ') }, /* Apple Animation (RLE) */
+ { CODEC_ID_QDRAW, MKTAG('q', 'd', 'r', 'w') }, /* QuickDraw */
+ { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') }, /* AVC-1/H.264 */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '2') }, /* MPEG2 produced by Sony HD camera */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('h', 'd', 'v', '3') }, /* HDV produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'n') }, /* MPEG2 IMX NTSC 525/60 50mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '5', 'p') }, /* MPEG2 IMX PAL 625/50 50mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'n') }, /* MPEG2 IMX NTSC 525/60 30mb/s produced by FCP */
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'x', '3', 'p') }, /* MPEG2 IMX PAL 625/50 30mb/s produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'p', 'p') }, /* DVCPRO PAL produced by FCP */
+ //{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '5') }, /* DVCPRO HD 50i produced by FCP */
+ //{ CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', '6') }, /* DVCPRO HD 60i produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'p') }, /* DVCPRO50 PAL produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'n') }, /* DVCPRO50 NTSC produced by FCP */
+ { CODEC_ID_DVVIDEO, MKTAG('A', 'V', 'd', 'v') }, /* AVID DV */
+ //{ CODEC_ID_JPEG2000, MKTAG('m', 'j', 'p', '2') }, /* JPEG 2000 produced by FCP */
+ { CODEC_ID_TARGA, MKTAG('t', 'g', 'a', ' ') }, /* Truevision Targa */
+ { CODEC_ID_TIFF, MKTAG('t', 'i', 'f', 'f') }, /* TIFF embedded in MOV */
+ { CODEC_ID_RAWVIDEO, MKTAG('2', 'v', 'u', 'y') }, /* UNCOMPRESSED 8BIT 4:2:2 */
+ { CODEC_ID_NONE, 0 },
+};
+
+static const CodecTag mov_audio_tags[] = {
+ { CODEC_ID_PCM_S32BE, MKTAG('i', 'n', '3', '2') },
+ { CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
+ { CODEC_ID_PCM_S16BE, MKTAG('N', 'O', 'N', 'E') }, /* uncompressed */
+ { CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') }, /* 16 bits */
+ { CODEC_ID_PCM_U8, MKTAG('r', 'a', 'w', ' ') }, /* 8 bits unsigned */
+ { CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') }, /* */
+ { CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') }, /* */
+ { CODEC_ID_PCM_ALAW, MKTAG('a', 'l', 'a', 'w') }, /* */
+ { CODEC_ID_ADPCM_IMA_QT, MKTAG('i', 'm', 'a', '4') }, /* IMA-4 ADPCM */
+ { CODEC_ID_ADPCM_MS, MKTAG('m', 's', 0x00, 0x02) }, /* MS ADPCM */
+ { CODEC_ID_MACE3, MKTAG('M', 'A', 'C', '3') }, /* Macintosh Audio Compression and Expansion 3:1 */
+ { CODEC_ID_MACE6, MKTAG('M', 'A', 'C', '6') }, /* Macintosh Audio Compression and Expansion 6:1 */
+
+ { CODEC_ID_MP3, MKTAG('.', 'm', 'p', '3') }, /* MPEG layer 3 */ /* sample files at http://www.3ivx.com/showcase.html use this tag */
+ { CODEC_ID_MP2, 0x6D730055 }, /* MPEG layer 3 */
+ { CODEC_ID_MP2, 0x5500736D }, /* MPEG layer 3 *//* XXX: check endianness */
+/* { CODEC_ID_OGG_VORBIS, MKTAG('O', 'g', 'g', 'S') }, *//* sample files at http://heroinewarrior.com/xmovie.php3 use this tag */
+/* MP4 tags */
+ { CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') }, /* MPEG-4 AAC */
+ /* The standard for mpeg4 audio is still not normalised AFAIK anyway */
+ { CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') }, /* AMR-NB 3gp */
+ { CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') }, /* AMR-WB 3gp */
+ { CODEC_ID_AC3, MKTAG('m', 's', 0x20, 0x00) }, /* Dolby AC-3 */
+ { CODEC_ID_ALAC,MKTAG('a', 'l', 'a', 'c') }, /* Apple Lossless */
+ { CODEC_ID_QDM2,MKTAG('Q', 'D', 'M', '2') }, /* QDM2 */
+ { CODEC_ID_DVAUDIO, MKTAG('v', 'd', 'v', 'a') },
+ { CODEC_ID_DVAUDIO, MKTAG('d', 'v', 'c', 'a') },
+ { CODEC_ID_PCM_S16LE, MKTAG('l', 'p', 'c', 'm') },
+ { CODEC_ID_NONE, 0 },
+};
+
+/* the QuickTime file format is quite convoluted...
+ * it has lots of index tables, each indexing something in another one...
+ * Here we just use what is needed to read the chunks
+ */
+
+typedef struct MOV_sample_to_chunk_tbl {
+ long first;
+ long count;
+ long id;
+} MOV_sample_to_chunk_tbl;
+
+typedef struct {
+ uint32_t type;
+ int64_t offset;
+ int64_t size; /* total size (excluding the size and type fields) */
+} MOV_atom_t;
+
+typedef struct {
+ int seed;
+ int flags;
+ int size;
+ void* clrs;
+} MOV_ctab_t;
+
+typedef struct MOV_mdat_atom_s {
+ offset_t offset;
+ int64_t size;
+} MOV_mdat_atom_t;
+
+typedef struct {
+ uint8_t version;
+ uint32_t flags; // 24bit
+
+ /* 0x03 ESDescrTag */
+ uint16_t es_id;
+#define MP4ODescrTag 0x01
+#define MP4IODescrTag 0x02
+#define MP4ESDescrTag 0x03
+#define MP4DecConfigDescrTag 0x04
+#define MP4DecSpecificDescrTag 0x05
+#define MP4SLConfigDescrTag 0x06
+#define MP4ContentIdDescrTag 0x07
+#define MP4SupplContentIdDescrTag 0x08
+#define MP4IPIPtrDescrTag 0x09
+#define MP4IPMPPtrDescrTag 0x0A
+#define MP4IPMPDescrTag 0x0B
+#define MP4RegistrationDescrTag 0x0D
+#define MP4ESIDIncDescrTag 0x0E
+#define MP4ESIDRefDescrTag 0x0F
+#define MP4FileIODescrTag 0x10
+#define MP4FileODescrTag 0x11
+#define MP4ExtProfileLevelDescrTag 0x13
+#define MP4ExtDescrTagsStart 0x80
+#define MP4ExtDescrTagsEnd 0xFE
+ uint8_t stream_priority;
+
+ /* 0x04 DecConfigDescrTag */
+ uint8_t object_type_id;
+ uint8_t stream_type;
+ /* XXX: really streamType is
+ * only 6bit, followed by:
+ * 1bit upStream
+ * 1bit reserved
+ */
+ uint32_t buffer_size_db; // 24
+ uint32_t max_bitrate;
+ uint32_t avg_bitrate;
+
+ /* 0x05 DecSpecificDescrTag */
+ uint8_t decoder_cfg_len;
+ uint8_t *decoder_cfg;
+
+ /* 0x06 SLConfigDescrTag */
+ uint8_t sl_config_len;
+ uint8_t *sl_config;
+} MOV_esds_t;
+
+struct MOVParseTableEntry;
+
+typedef struct MOVStreamContext {
+ int ffindex; /* the ffmpeg stream id */
+ long next_chunk;
+ long chunk_count;
+ int64_t *chunk_offsets;
+ int stts_count;
+ Time2Sample *stts_data;
+ int ctts_count;
+ Time2Sample *ctts_data;
+ int edit_count; /* number of 'edit' (elst atom) */
+ long sample_to_chunk_sz;
+ MOV_sample_to_chunk_tbl *sample_to_chunk;
+ int sample_to_ctime_index;
+ int sample_to_ctime_sample;
+ long sample_size;
+ long sample_count;
+ long *sample_sizes;
+ long keyframe_count;
+ long *keyframes;
+ int time_scale;
+ int time_rate;
+ long current_sample;
+ MOV_esds_t esds;
+ AVRational sample_size_v1;
+ int dv_audio_container;
+} MOVStreamContext;
+
+typedef struct MOVContext {
+ AVFormatContext *fc;
+ int time_scale;
+ int64_t duration; /* duration of the longest track */
+ int found_moov; /* when both 'moov' and 'mdat' sections has been found */
+ int found_mdat; /* we suppose we have enough data to read the file */
+ int64_t mdat_size;
+ int64_t mdat_offset;
+ int total_streams;
+ /* some streams listed here aren't presented to the ffmpeg API, since they aren't either video nor audio
+ * but we need the info to be able to skip data from those streams in the 'mdat' section
+ */
+ MOVStreamContext *streams[MAX_STREAMS];
+
+ int ctab_size;
+ MOV_ctab_t **ctab; /* color tables */
+ const struct MOVParseTableEntry *parse_table; /* could be eventually used to change the table */
+ /* NOTE: for recursion save to/ restore from local variable! */
+
+ AVPaletteControl palette_control;
+ MOV_mdat_atom_t *mdat_list;
+ int mdat_count;
+ DVDemuxContext *dv_demux;
+ AVFormatContext *dv_fctx;
+ int isom; /* 1 if file is ISO Media (mp4/3gp) */
+} MOVContext;
+
+
+/* XXX: it's the first time I make a recursive parser I think... sorry if it's ugly :P */
+
+/* those functions parse an atom */
+/* return code:
+ 1: found what I wanted, exit
+ 0: continue to parse next atom
+ -1: error occured, exit
+ */
+typedef int (*mov_parse_function)(MOVContext *ctx, ByteIOContext *pb, MOV_atom_t atom);
+
+/* links atom IDs to parse functions */
+typedef struct MOVParseTableEntry {
+ uint32_t type;
+ mov_parse_function func;
+} MOVParseTableEntry;
+
+static int mov_read_default(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int64_t total_size = 0;
+ MOV_atom_t a;
+ int i;
+ int err = 0;
+
+ a.offset = atom.offset;
+
+ if (atom.size < 0)
+ atom.size = 0x7fffffffffffffffLL;
+ while(((total_size + 8) < atom.size) && !url_feof(pb) && !err) {
+ a.size = atom.size;
+ a.type=0L;
+ if(atom.size >= 8) {
+ a.size = get_be32(pb);
+ a.type = get_le32(pb);
+ }
+ total_size += 8;
+ a.offset += 8;
+ dprintf("type: %08x %.4s sz: %"PRIx64" %"PRIx64" %"PRIx64"\n", a.type, (char*)&a.type, a.size, atom.size, total_size);
+ if (a.size == 1) { /* 64 bit extended size */
+ a.size = get_be64(pb) - 8;
+ a.offset += 8;
+ total_size += 8;
+ }
+ if (a.size == 0) {
+ a.size = atom.size - total_size;
+ if (a.size <= 8)
+ break;
+ }
+ for (i = 0; c->parse_table[i].type != 0L
+ && c->parse_table[i].type != a.type; i++)
+ /* empty */;
+
+ a.size -= 8;
+
+ if(a.size < 0)
+ break;
+
+ if (c->parse_table[i].type == 0) { /* skip leaf atoms data */
+ url_fskip(pb, a.size);
+ } else {
+ offset_t start_pos = url_ftell(pb);
+ int64_t left;
+ err = (c->parse_table[i].func)(c, pb, a);
+ left = a.size - url_ftell(pb) + start_pos;
+ if (left > 0) /* skip garbage at atom end */
+ url_fskip(pb, left);
+ }
+
+ a.offset += a.size;
+ total_size += a.size;
+ }
+
+ if (!err && total_size < atom.size && atom.size < 0x7ffff) {
+ url_fskip(pb, atom.size - total_size);
+ }
+
+ return err;
+}
+
+static int mov_read_ctab(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+#if 1
+ url_fskip(pb, atom.size); // for now
+#else
+ VERY VERY BROKEN, NEVER execute this, needs rewrite
+ unsigned int len;
+ MOV_ctab_t *t;
+ c->ctab = av_realloc(c->ctab, ++c->ctab_size);
+ t = c->ctab[c->ctab_size];
+ t->seed = get_be32(pb);
+ t->flags = get_be16(pb);
+ t->size = get_be16(pb) + 1;
+ len = 2 * t->size * 4;
+ if (len > 0) {
+ t->clrs = av_malloc(len); // 16bit A R G B
+ if (t->clrs)
+ get_buffer(pb, t->clrs, len);
+ }
+#endif
+
+ return 0;
+}
+
+static int mov_read_hdlr(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ uint32_t type;
+ uint32_t ctype;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ /* component type */
+ ctype = get_le32(pb);
+ type = get_le32(pb); /* component subtype */
+
+ dprintf("ctype= %c%c%c%c (0x%08lx)\n", *((char *)&ctype), ((char *)&ctype)[1], ((char *)&ctype)[2], ((char *)&ctype)[3], (long) ctype);
+ dprintf("stype= %c%c%c%c\n", *((char *)&type), ((char *)&type)[1], ((char *)&type)[2], ((char *)&type)[3]);
+ if(!ctype)
+ c->isom = 1;
+ if(type == MKTAG('v', 'i', 'd', 'e'))
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ else if(type == MKTAG('s', 'o', 'u', 'n'))
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ else if(type == MKTAG('m', '1', 'a', ' '))
+ st->codec->codec_id = CODEC_ID_MP2;
+ else if(type == MKTAG('s', 'u', 'b', 'p')) {
+ st->codec->codec_type = CODEC_TYPE_SUBTITLE;
+ st->codec->codec_id = CODEC_ID_DVD_SUBTITLE;
+ }
+ get_be32(pb); /* component manufacture */
+ get_be32(pb); /* component flags */
+ get_be32(pb); /* component flags mask */
+
+ if(atom.size <= 24)
+ return 0; /* nothing left to read */
+
+ url_fskip(pb, atom.size - (url_ftell(pb) - atom.offset));
+ return 0;
+}
+
+static int mov_mp4_read_descr_len(ByteIOContext *pb)
+{
+ int len = 0;
+ int count = 4;
+ while (count--) {
+ int c = get_byte(pb);
+ len = (len << 7) | (c & 0x7f);
+ if (!(c & 0x80))
+ break;
+ }
+ return len;
+}
+
+static int mov_mp4_read_descr(ByteIOContext *pb, int *tag)
+{
+ int len;
+ *tag = get_byte(pb);
+ len = mov_mp4_read_descr_len(pb);
+ dprintf("MPEG4 description: tag=0x%02x len=%d\n", *tag, len);
+ return len;
+}
+
+static int mov_read_esds(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ int tag, len;
+
+ /* Well, broken but suffisant for some MP4 streams */
+ get_be32(pb); /* version + flags */
+ len = mov_mp4_read_descr(pb, &tag);
+ if (tag == MP4ESDescrTag) {
+ get_be16(pb); /* ID */
+ get_byte(pb); /* priority */
+ } else
+ get_be16(pb); /* ID */
+
+ len = mov_mp4_read_descr(pb, &tag);
+ if (tag == MP4DecConfigDescrTag) {
+ sc->esds.object_type_id = get_byte(pb);
+ sc->esds.stream_type = get_byte(pb);
+ sc->esds.buffer_size_db = get_be24(pb);
+ sc->esds.max_bitrate = get_be32(pb);
+ sc->esds.avg_bitrate = get_be32(pb);
+
+ st->codec->codec_id= codec_get_id(ff_mov_obj_type, sc->esds.object_type_id);
+ dprintf("esds object type id %d\n", sc->esds.object_type_id);
+ len = mov_mp4_read_descr(pb, &tag);
+ if (tag == MP4DecSpecificDescrTag) {
+ dprintf("Specific MPEG4 header len=%d\n", len);
+ st->codec->extradata = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (st->codec->extradata) {
+ get_buffer(pb, st->codec->extradata, len);
+ st->codec->extradata_size = len;
+ /* from mplayer */
+ if ((*st->codec->extradata >> 3) == 29) {
+ st->codec->codec_id = CODEC_ID_MP3ON4;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/* this atom contains actual media data */
+static int mov_read_mdat(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ if(atom.size == 0) /* wrong one (MP4) */
+ return 0;
+ c->mdat_list = av_realloc(c->mdat_list, (c->mdat_count + 1) * sizeof(*c->mdat_list));
+ c->mdat_list[c->mdat_count].offset = atom.offset;
+ c->mdat_list[c->mdat_count].size = atom.size;
+ c->mdat_count++;
+ c->found_mdat=1;
+ c->mdat_offset = atom.offset;
+ c->mdat_size = atom.size;
+ if(c->found_moov)
+ return 1; /* found both, just go */
+ url_fskip(pb, atom.size);
+ return 0; /* now go for moov */
+}
+
+static int mov_read_ftyp(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ uint32_t type = get_le32(pb);
+
+ if (type != MKTAG('q','t',' ',' '))
+ c->isom = 1;
+ av_log(c->fc, AV_LOG_DEBUG, "ISO: File Type Major Brand: %.4s\n",(char *)&type);
+ get_be32(pb); /* minor version */
+ url_fskip(pb, atom.size - 8);
+ return 0;
+}
+
+/* this atom should contain all header atoms */
+static int mov_read_moov(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int err;
+
+ err = mov_read_default(c, pb, atom);
+ /* we parsed the 'moov' atom, we can terminate the parsing as soon as we find the 'mdat' */
+ /* so we don't parse the whole file if over a network */
+ c->found_moov=1;
+ if(c->found_mdat)
+ return 1; /* found both, just go */
+ return 0; /* now go for mdat */
+}
+
+
+static int mov_read_mdhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ int version = get_byte(pb);
+ int lang;
+
+ if (version > 1)
+ return 1; /* unsupported */
+
+ get_byte(pb); get_byte(pb);
+ get_byte(pb); /* flags */
+
+ if (version == 1) {
+ get_be64(pb);
+ get_be64(pb);
+ } else {
+ get_be32(pb); /* creation time */
+ get_be32(pb); /* modification time */
+ }
+
+ sc->time_scale = get_be32(pb);
+ st->duration = (version == 1) ? get_be64(pb) : get_be32(pb); /* duration */
+
+ lang = get_be16(pb); /* language */
+ ff_mov_lang_to_iso639(lang, st->language);
+ get_be16(pb); /* quality */
+
+ return 0;
+}
+
+static int mov_read_mvhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int version = get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ if (version == 1) {
+ get_be64(pb);
+ get_be64(pb);
+ } else {
+ get_be32(pb); /* creation time */
+ get_be32(pb); /* modification time */
+ }
+ c->time_scale = get_be32(pb); /* time scale */
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "time scale = %i\n", c->time_scale);
+#endif
+ c->duration = (version == 1) ? get_be64(pb) : get_be32(pb); /* duration */
+ get_be32(pb); /* preferred scale */
+
+ get_be16(pb); /* preferred volume */
+
+ url_fskip(pb, 10); /* reserved */
+
+ url_fskip(pb, 36); /* display matrix */
+
+ get_be32(pb); /* preview time */
+ get_be32(pb); /* preview duration */
+ get_be32(pb); /* poster time */
+ get_be32(pb); /* selection time */
+ get_be32(pb); /* selection duration */
+ get_be32(pb); /* current time */
+ get_be32(pb); /* next track ID */
+
+ return 0;
+}
+
+static int mov_read_smi(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ if((uint64_t)atom.size > (1<<30))
+ return -1;
+
+ // currently SVQ3 decoder expect full STSD header - so let's fake it
+ // this should be fixed and just SMI header should be passed
+ av_free(st->codec->extradata);
+ st->codec->extradata_size = 0x5a + atom.size;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (st->codec->extradata) {
+ strcpy(st->codec->extradata, "SVQ3"); // fake
+ get_buffer(pb, st->codec->extradata + 0x5a, atom.size);
+ dprintf("Reading SMI %"PRId64" %s\n", atom.size, st->codec->extradata + 0x5a);
+ } else
+ url_fskip(pb, atom.size);
+
+ return 0;
+}
+
+static int mov_read_enda(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ int little_endian = get_be16(pb);
+
+ if (little_endian) {
+ switch (st->codec->codec_id) {
+ case CODEC_ID_PCM_S24BE:
+ st->codec->codec_id = CODEC_ID_PCM_S24LE;
+ break;
+ case CODEC_ID_PCM_S32BE:
+ st->codec->codec_id = CODEC_ID_PCM_S32LE;
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int mov_read_alac(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ // currently ALAC decoder expect full atom header - so let's fake it
+ // this should be fixed and just ALAC header should be passed
+
+ av_free(st->codec->extradata);
+ st->codec->extradata_size = 36;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (st->codec->extradata) {
+ strcpy(st->codec->extradata + 4, "alac"); // fake
+ get_buffer(pb, st->codec->extradata + 8, 36 - 8);
+ dprintf("Reading alac %d %s\n", st->codec->extradata_size, st->codec->extradata);
+ } else
+ url_fskip(pb, atom.size);
+ return 0;
+}
+
+static int mov_read_wave(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ if((uint64_t)atom.size > (1<<30))
+ return -1;
+
+ if (st->codec->codec_id == CODEC_ID_QDM2) {
+ // pass all frma atom to codec, needed at least for QDM2
+ av_free(st->codec->extradata);
+ st->codec->extradata_size = atom.size;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (st->codec->extradata) {
+ get_buffer(pb, st->codec->extradata, atom.size);
+ } else
+ url_fskip(pb, atom.size);
+ } else if (atom.size > 8) { /* to read frma, esds atoms */
+ mov_read_default(c, pb, atom);
+ } else
+ url_fskip(pb, atom.size);
+ return 0;
+}
+
+static int mov_read_jp2h(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ if((uint64_t)atom.size > (1<<30))
+ return -1;
+
+ av_free(st->codec->extradata);
+
+ st->codec->extradata_size = atom.size + 8;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ /* pass all jp2h atom to codec */
+ if (st->codec->extradata) {
+ strcpy(st->codec->extradata + 4, "jp2h");
+ get_buffer(pb, st->codec->extradata + 8, atom.size);
+ } else
+ url_fskip(pb, atom.size);
+ return 0;
+}
+
+static int mov_read_avcC(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+
+ if((uint64_t)atom.size > (1<<30))
+ return -1;
+
+ av_free(st->codec->extradata);
+
+ st->codec->extradata_size = atom.size;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (st->codec->extradata) {
+ get_buffer(pb, st->codec->extradata, atom.size);
+ } else
+ url_fskip(pb, atom.size);
+
+ return 0;
+}
+
+static int mov_read_stco(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ entries = get_be32(pb);
+
+ if(entries >= UINT_MAX/sizeof(int64_t))
+ return -1;
+
+ sc->chunk_count = entries;
+ sc->chunk_offsets = av_malloc(entries * sizeof(int64_t));
+ if (!sc->chunk_offsets)
+ return -1;
+ if (atom.type == MKTAG('s', 't', 'c', 'o')) {
+ for(i=0; i<entries; i++) {
+ sc->chunk_offsets[i] = get_be32(pb);
+ }
+ } else if (atom.type == MKTAG('c', 'o', '6', '4')) {
+ for(i=0; i<entries; i++) {
+ sc->chunk_offsets[i] = get_be64(pb);
+ }
+ } else
+ return -1;
+
+ return 0;
+}
+
+static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ int entries, frames_per_sample;
+ uint32_t format;
+ uint8_t codec_name[32];
+
+ /* for palette traversal */
+ int color_depth;
+ int color_start;
+ int color_count;
+ int color_end;
+ int color_index;
+ int color_dec;
+ int color_greyscale;
+ unsigned char *color_table;
+ int j;
+ unsigned char r, g, b;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ entries = get_be32(pb);
+
+ while(entries--) { //Parsing Sample description table
+ enum CodecID id;
+ MOV_atom_t a = { 0, 0, 0 };
+ offset_t start_pos = url_ftell(pb);
+ int size = get_be32(pb); /* size */
+ format = get_le32(pb); /* data format */
+
+ get_be32(pb); /* reserved */
+ get_be16(pb); /* reserved */
+ get_be16(pb); /* index */
+
+ if (st->codec->codec_tag) {
+ /* multiple fourcc, just skip for now */
+ url_fskip(pb, size - (url_ftell(pb) - start_pos));
+ continue;
+ }
+
+ st->codec->codec_tag = format;
+ id = codec_get_id(mov_audio_tags, format);
+ if (st->codec->codec_type != CODEC_TYPE_VIDEO && id > 0) {
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ } else if (st->codec->codec_type != CODEC_TYPE_AUDIO && /* do not overwrite codec type */
+ format && format != MKTAG('m', 'p', '4', 's')) { /* skip old asf mpeg4 tag */
+ id = codec_get_id(mov_video_tags, format);
+ if (id <= 0)
+ id = codec_get_id(codec_bmp_tags, format);
+ if (id > 0)
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ }
+
+ dprintf("size=%d 4CC= %c%c%c%c codec_type=%d\n",
+ size,
+ (format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff, (format >> 24) & 0xff,
+ st->codec->codec_type);
+
+ if(st->codec->codec_type==CODEC_TYPE_VIDEO) {
+ st->codec->codec_id = id;
+ get_be16(pb); /* version */
+ get_be16(pb); /* revision level */
+ get_be32(pb); /* vendor */
+ get_be32(pb); /* temporal quality */
+ get_be32(pb); /* spacial quality */
+
+ st->codec->width = get_be16(pb); /* width */
+ st->codec->height = get_be16(pb); /* height */
+
+ get_be32(pb); /* horiz resolution */
+ get_be32(pb); /* vert resolution */
+ get_be32(pb); /* data size, always 0 */
+ frames_per_sample = get_be16(pb); /* frames per samples */
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "frames/samples = %d\n", frames_per_sample);
+#endif
+ get_buffer(pb, codec_name, 32); /* codec name, pascal string (FIXME: true for mp4?) */
+ if (codec_name[0] <= 31) {
+ memcpy(st->codec->codec_name, &codec_name[1],codec_name[0]);
+ st->codec->codec_name[codec_name[0]] = 0;
+ }
+
+ st->codec->bits_per_sample = get_be16(pb); /* depth */
+ st->codec->color_table_id = get_be16(pb); /* colortable id */
+
+ /* figure out the palette situation */
+ color_depth = st->codec->bits_per_sample & 0x1F;
+ color_greyscale = st->codec->bits_per_sample & 0x20;
+
+ /* if the depth is 2, 4, or 8 bpp, file is palettized */
+ if ((color_depth == 2) || (color_depth == 4) ||
+ (color_depth == 8)) {
+
+ if (color_greyscale) {
+
+ /* compute the greyscale palette */
+ color_count = 1 << color_depth;
+ color_index = 255;
+ color_dec = 256 / (color_count - 1);
+ for (j = 0; j < color_count; j++) {
+ r = g = b = color_index;
+ c->palette_control.palette[j] =
+ (r << 16) | (g << 8) | (b);
+ color_index -= color_dec;
+ if (color_index < 0)
+ color_index = 0;
+ }
+
+ } else if (st->codec->color_table_id & 0x08) {
+
+ /* if flag bit 3 is set, use the default palette */
+ color_count = 1 << color_depth;
+ if (color_depth == 2)
+ color_table = ff_qt_default_palette_4;
+ else if (color_depth == 4)
+ color_table = ff_qt_default_palette_16;
+ else
+ color_table = ff_qt_default_palette_256;
+
+ for (j = 0; j < color_count; j++) {
+ r = color_table[j * 4 + 0];
+ g = color_table[j * 4 + 1];
+ b = color_table[j * 4 + 2];
+ c->palette_control.palette[j] =
+ (r << 16) | (g << 8) | (b);
+ }
+
+ } else {
+
+ /* load the palette from the file */
+ color_start = get_be32(pb);
+ color_count = get_be16(pb);
+ color_end = get_be16(pb);
+ for (j = color_start; j <= color_end; j++) {
+ /* each R, G, or B component is 16 bits;
+ * only use the top 8 bits; skip alpha bytes
+ * up front */
+ get_byte(pb);
+ get_byte(pb);
+ r = get_byte(pb);
+ get_byte(pb);
+ g = get_byte(pb);
+ get_byte(pb);
+ b = get_byte(pb);
+ get_byte(pb);
+ c->palette_control.palette[j] =
+ (r << 16) | (g << 8) | (b);
+ }
+ }
+
+ st->codec->palctrl = &c->palette_control;
+ st->codec->palctrl->palette_changed = 1;
+ } else
+ st->codec->palctrl = NULL;
+ } else if(st->codec->codec_type==CODEC_TYPE_AUDIO) {
+ int bits_per_sample;
+ uint16_t version = get_be16(pb);
+
+ st->codec->codec_id = id;
+ get_be16(pb); /* revision level */
+ get_be32(pb); /* vendor */
+
+ st->codec->channels = get_be16(pb); /* channel count */
+ dprintf("audio channels %d\n", st->codec->channels);
+ st->codec->bits_per_sample = get_be16(pb); /* sample size */
+ /* do we need to force to 16 for AMR ? */
+
+ /* handle specific s8 codec */
+ get_be16(pb); /* compression id = 0*/
+ get_be16(pb); /* packet size = 0 */
+
+ st->codec->sample_rate = ((get_be32(pb) >> 16));
+
+ switch (st->codec->codec_id) {
+ case CODEC_ID_PCM_S8:
+ case CODEC_ID_PCM_U8:
+ if (st->codec->bits_per_sample == 16)
+ st->codec->codec_id = CODEC_ID_PCM_S16BE;
+ break;
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S16BE:
+ if (st->codec->bits_per_sample == 8)
+ st->codec->codec_id = CODEC_ID_PCM_S8;
+ else if (st->codec->bits_per_sample == 24)
+ st->codec->codec_id = CODEC_ID_PCM_S24BE;
+ break;
+ default:
+ break;
+ }
+
+ //Read QT version 1 fields. In version 0 theese dont exist
+ dprintf("version =%d, isom =%d\n",version,c->isom);
+ if(!c->isom) {
+ if(version==1) {
+ sc->sample_size_v1.den = get_be32(pb); /* samples per packet */
+ get_be32(pb); /* bytes per packet */
+ sc->sample_size_v1.num = get_be32(pb); /* bytes per frame */
+ get_be32(pb); /* bytes per sample */
+ } else if(version==2) {
+ get_be32(pb); /* sizeof struct only */
+ st->codec->sample_rate = av_int2dbl(get_be64(pb)); /* float 64 */
+ st->codec->channels = get_be32(pb);
+ get_be32(pb); /* always 0x7F000000 */
+ get_be32(pb); /* bits per channel if sound is uncompressed */
+ get_be32(pb); /* lcpm format specific flag */
+ get_be32(pb); /* bytes per audio packet if constant */
+ get_be32(pb); /* lpcm frames per audio packet if constant */
+ }
+ }
+
+ bits_per_sample = av_get_bits_per_sample(st->codec->codec_id);
+ if (bits_per_sample) {
+ st->codec->bits_per_sample = bits_per_sample;
+ sc->sample_size = (bits_per_sample >> 3) * st->codec->channels;
+ }
+ } else {
+ /* other codec type, just skip (rtp, mp4s, tmcd ...) */
+ url_fskip(pb, size - (url_ftell(pb) - start_pos));
+ }
+ /* this will read extra atoms at the end (wave, alac, damr, avcC, SMI ...) */
+ a.size = size - (url_ftell(pb) - start_pos);
+ if (a.size > 8)
+ mov_read_default(c, pb, a);
+ else if (a.size > 0)
+ url_fskip(pb, a.size);
+ }
+
+ if(st->codec->codec_type==CODEC_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1) {
+ st->codec->sample_rate= sc->time_scale;
+ }
+
+ /* special codec parameters handling */
+ switch (st->codec->codec_id) {
+#ifdef CONFIG_H261_DECODER
+ case CODEC_ID_H261:
+#endif
+#ifdef CONFIG_H263_DECODER
+ case CODEC_ID_H263:
+#endif
+#ifdef CONFIG_MPEG4_DECODER
+ case CODEC_ID_MPEG4:
+#endif
+ st->codec->width= 0; /* let decoder init width/height */
+ st->codec->height= 0;
+ break;
+#ifdef CONFIG_FAAD
+ case CODEC_ID_AAC:
+#endif
+#ifdef CONFIG_VORBIS_DECODER
+ case CODEC_ID_VORBIS:
+#endif
+ case CODEC_ID_MP3ON4:
+ st->codec->sample_rate= 0; /* let decoder init parameters properly */
+ break;
+#ifdef CONFIG_DV_DEMUXER
+ case CODEC_ID_DVAUDIO:
+ c->dv_fctx = av_alloc_format_context();
+ c->dv_demux = dv_init_demux(c->dv_fctx);
+ if (!c->dv_demux) {
+ av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n");
+ return -1;
+ }
+ sc->dv_audio_container = 1;
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ break;
+#endif
+ /* no ifdef since parameters are always those */
+ case CODEC_ID_AMR_WB:
+ st->codec->sample_rate= 16000;
+ st->codec->channels= 1; /* really needed */
+ break;
+ case CODEC_ID_AMR_NB:
+ st->codec->sample_rate= 8000;
+ st->codec->channels= 1; /* really needed */
+ break;
+ case CODEC_ID_MP2:
+ st->codec->codec_type = CODEC_TYPE_AUDIO; /* force type after stsd for m1a hdlr */
+ st->need_parsing = 1;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mov_read_stsc(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ entries = get_be32(pb);
+
+ if(entries >= UINT_MAX / sizeof(MOV_sample_to_chunk_tbl))
+ return -1;
+
+#ifdef DEBUG
+av_log(NULL, AV_LOG_DEBUG, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries);
+#endif
+ sc->sample_to_chunk_sz = entries;
+ sc->sample_to_chunk = av_malloc(entries * sizeof(MOV_sample_to_chunk_tbl));
+ if (!sc->sample_to_chunk)
+ return -1;
+ for(i=0; i<entries; i++) {
+ sc->sample_to_chunk[i].first = get_be32(pb);
+ sc->sample_to_chunk[i].count = get_be32(pb);
+ sc->sample_to_chunk[i].id = get_be32(pb);
+ }
+ return 0;
+}
+
+static int mov_read_stss(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ entries = get_be32(pb);
+
+ if(entries >= UINT_MAX / sizeof(long))
+ return -1;
+
+ sc->keyframe_count = entries;
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "keyframe_count = %ld\n", sc->keyframe_count);
+#endif
+ sc->keyframes = av_malloc(entries * sizeof(long));
+ if (!sc->keyframes)
+ return -1;
+ for(i=0; i<entries; i++) {
+ sc->keyframes[i] = get_be32(pb);
+#ifdef DEBUG
+/* av_log(NULL, AV_LOG_DEBUG, "keyframes[]=%ld\n", sc->keyframes[i]); */
+#endif
+ }
+ return 0;
+}
+
+static int mov_read_stsz(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries, sample_size;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+
+ sample_size = get_be32(pb);
+ if (!sc->sample_size) /* do not overwrite value computed in stsd */
+ sc->sample_size = sample_size;
+ entries = get_be32(pb);
+ if(entries >= UINT_MAX / sizeof(long))
+ return -1;
+
+ sc->sample_count = entries;
+ if (sample_size)
+ return 0;
+
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "sample_size = %ld sample_count = %ld\n", sc->sample_size, sc->sample_count);
+#endif
+ sc->sample_sizes = av_malloc(entries * sizeof(long));
+ if (!sc->sample_sizes)
+ return -1;
+ for(i=0; i<entries; i++) {
+ sc->sample_sizes[i] = get_be32(pb);
+#ifdef DEBUG
+ av_log(NULL, AV_LOG_DEBUG, "sample_sizes[]=%ld\n", sc->sample_sizes[i]);
+#endif
+ }
+ return 0;
+}
+
+static int mov_read_stts(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+ int64_t duration=0;
+ int64_t total_sample_count=0;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+ entries = get_be32(pb);
+ if(entries >= UINT_MAX / sizeof(Time2Sample))
+ return -1;
+
+ sc->stts_count = entries;
+ sc->stts_data = av_malloc(entries * sizeof(Time2Sample));
+
+#ifdef DEBUG
+av_log(NULL, AV_LOG_DEBUG, "track[%i].stts.entries = %i\n", c->fc->nb_streams-1, entries);
+#endif
+
+ sc->time_rate=0;
+
+ for(i=0; i<entries; i++) {
+ int sample_duration;
+ int sample_count;
+
+ sample_count=get_be32(pb);
+ sample_duration = get_be32(pb);
+ sc->stts_data[i].count= sample_count;
+ sc->stts_data[i].duration= sample_duration;
+
+ sc->time_rate= ff_gcd(sc->time_rate, sample_duration);
+
+ dprintf("sample_count=%d, sample_duration=%d\n",sample_count,sample_duration);
+
+ duration+=(int64_t)sample_duration*sample_count;
+ total_sample_count+=sample_count;
+ }
+
+ st->nb_frames= total_sample_count;
+ if(duration)
+ st->duration= duration;
+ return 0;
+}
+
+static int mov_read_ctts(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ MOVStreamContext *sc = (MOVStreamContext *)st->priv_data;
+ unsigned int i, entries;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+ entries = get_be32(pb);
+ if(entries >= UINT_MAX / sizeof(Time2Sample))
+ return -1;
+
+ sc->ctts_count = entries;
+ sc->ctts_data = av_malloc(entries * sizeof(Time2Sample));
+
+ dprintf("track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries);
+
+ for(i=0; i<entries; i++) {
+ int count =get_be32(pb);
+ int duration =get_be32(pb);
+
+ if (duration < 0) {
+ av_log(c->fc, AV_LOG_ERROR, "negative ctts, ignoring\n");
+ sc->ctts_count = 0;
+ url_fskip(pb, 8 * (entries - i - 1));
+ break;
+ }
+ sc->ctts_data[i].count = count;
+ sc->ctts_data[i].duration= duration;
+
+ sc->time_rate= ff_gcd(sc->time_rate, duration);
+ }
+ return 0;
+}
+
+static int mov_read_trak(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st;
+ MOVStreamContext *sc;
+
+ st = av_new_stream(c->fc, c->fc->nb_streams);
+ if (!st) return -2;
+ sc = av_mallocz(sizeof(MOVStreamContext));
+ if (!sc) {
+ av_free(st);
+ return -1;
+ }
+
+ st->priv_data = sc;
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ st->start_time = 0; /* XXX: check */
+ c->streams[c->fc->nb_streams-1] = sc;
+
+ return mov_read_default(c, pb, atom);
+}
+
+static int mov_read_tkhd(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ AVStream *st = c->fc->streams[c->fc->nb_streams-1];
+ int version = get_byte(pb);
+
+ get_byte(pb); get_byte(pb);
+ get_byte(pb); /* flags */
+ /*
+ MOV_TRACK_ENABLED 0x0001
+ MOV_TRACK_IN_MOVIE 0x0002
+ MOV_TRACK_IN_PREVIEW 0x0004
+ MOV_TRACK_IN_POSTER 0x0008
+ */
+
+ if (version == 1) {
+ get_be64(pb);
+ get_be64(pb);
+ } else {
+ get_be32(pb); /* creation time */
+ get_be32(pb); /* modification time */
+ }
+ st->id = (int)get_be32(pb); /* track id (NOT 0 !)*/
+ get_be32(pb); /* reserved */
+ st->start_time = 0; /* check */
+ (version == 1) ? get_be64(pb) : get_be32(pb); /* highlevel (considering edits) duration in movie timebase */
+ get_be32(pb); /* reserved */
+ get_be32(pb); /* reserved */
+
+ get_be16(pb); /* layer */
+ get_be16(pb); /* alternate group */
+ get_be16(pb); /* volume */
+ get_be16(pb); /* reserved */
+
+ url_fskip(pb, 36); /* display matrix */
+
+ /* those are fixed-point */
+ get_be32(pb); /* track width */
+ get_be32(pb); /* track height */
+
+ return 0;
+}
+
+/* this atom should be null (from specs), but some buggy files put the 'moov' atom inside it... */
+/* like the files created with Adobe Premiere 5.0, for samples see */
+/* http://graphics.tudelft.nl/~wouter/publications/soundtests/ */
+static int mov_read_wide(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int err;
+
+ if (atom.size < 8)
+ return 0; /* continue */
+ if (get_be32(pb) != 0) { /* 0 sized mdat atom... use the 'wide' atom size */
+ url_fskip(pb, atom.size - 4);
+ return 0;
+ }
+ atom.type = get_le32(pb);
+ atom.offset += 8;
+ atom.size -= 8;
+ if (atom.type != MKTAG('m', 'd', 'a', 't')) {
+ url_fskip(pb, atom.size);
+ return 0;
+ }
+ err = mov_read_mdat(c, pb, atom);
+ return err;
+}
+
+static int mov_read_cmov(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+#ifdef CONFIG_ZLIB
+ ByteIOContext ctx;
+ uint8_t *cmov_data;
+ uint8_t *moov_data; /* uncompressed data */
+ long cmov_len, moov_len;
+ int ret;
+
+ get_be32(pb); /* dcom atom */
+ if (get_le32(pb) != MKTAG( 'd', 'c', 'o', 'm' ))
+ return -1;
+ if (get_le32(pb) != MKTAG( 'z', 'l', 'i', 'b' )) {
+ av_log(NULL, AV_LOG_ERROR, "unknown compression for cmov atom !");
+ return -1;
+ }
+ get_be32(pb); /* cmvd atom */
+ if (get_le32(pb) != MKTAG( 'c', 'm', 'v', 'd' ))
+ return -1;
+ moov_len = get_be32(pb); /* uncompressed size */
+ cmov_len = atom.size - 6 * 4;
+
+ cmov_data = av_malloc(cmov_len);
+ if (!cmov_data)
+ return -1;
+ moov_data = av_malloc(moov_len);
+ if (!moov_data) {
+ av_free(cmov_data);
+ return -1;
+ }
+ get_buffer(pb, cmov_data, cmov_len);
+ if(uncompress (moov_data, (uLongf *) &moov_len, (const Bytef *)cmov_data, cmov_len) != Z_OK)
+ return -1;
+ if(init_put_byte(&ctx, moov_data, moov_len, 0, NULL, NULL, NULL, NULL) != 0)
+ return -1;
+ atom.type = MKTAG( 'm', 'o', 'o', 'v' );
+ atom.offset = 0;
+ atom.size = moov_len;
+#ifdef DEBUG
+// { int fd = open("/tmp/uncompheader.mov", O_WRONLY | O_CREAT); write(fd, moov_data, moov_len); close(fd); }
+#endif
+ ret = mov_read_default(c, &ctx, atom);
+ av_free(moov_data);
+ av_free(cmov_data);
+ return ret;
+#else
+ av_log(c->fc, AV_LOG_ERROR, "this file requires zlib support compiled in\n");
+ return -1;
+#endif
+}
+
+/* edit list atom */
+static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOV_atom_t atom)
+{
+ int i, edit_count;
+
+ get_byte(pb); /* version */
+ get_byte(pb); get_byte(pb); get_byte(pb); /* flags */
+ edit_count= c->streams[c->fc->nb_streams-1]->edit_count = get_be32(pb); /* entries */
+
+ for(i=0; i<edit_count; i++){
+ get_be32(pb); /* Track duration */
+ get_be32(pb); /* Media time */
+ get_be32(pb); /* Media rate */
+ }
+ dprintf("track[%i].edit_count = %i\n", c->fc->nb_streams-1, c->streams[c->fc->nb_streams-1]->edit_count);
+ return 0;
+}
+
+static const MOVParseTableEntry mov_default_parse_table[] = {
+/* mp4 atoms */
+{ MKTAG( 'c', 'o', '6', '4' ), mov_read_stco },
+{ MKTAG( 'c', 't', 't', 's' ), mov_read_ctts }, /* composition time to sample */
+{ MKTAG( 'e', 'd', 't', 's' ), mov_read_default },
+{ MKTAG( 'e', 'l', 's', 't' ), mov_read_elst },
+{ MKTAG( 'e', 'n', 'd', 'a' ), mov_read_enda },
+{ MKTAG( 'f', 't', 'y', 'p' ), mov_read_ftyp },
+{ MKTAG( 'h', 'd', 'l', 'r' ), mov_read_hdlr },
+{ MKTAG( 'j', 'p', '2', 'h' ), mov_read_jp2h },
+{ MKTAG( 'm', 'd', 'a', 't' ), mov_read_mdat },
+{ MKTAG( 'm', 'd', 'h', 'd' ), mov_read_mdhd },
+{ MKTAG( 'm', 'd', 'i', 'a' ), mov_read_default },
+{ MKTAG( 'm', 'i', 'n', 'f' ), mov_read_default },
+{ MKTAG( 'm', 'o', 'o', 'v' ), mov_read_moov },
+{ MKTAG( 'm', 'v', 'h', 'd' ), mov_read_mvhd },
+{ MKTAG( 'S', 'M', 'I', ' ' ), mov_read_smi }, /* Sorenson extension ??? */
+{ MKTAG( 'a', 'l', 'a', 'c' ), mov_read_alac }, /* alac specific atom */
+{ MKTAG( 'a', 'v', 'c', 'C' ), mov_read_avcC },
+{ MKTAG( 's', 't', 'b', 'l' ), mov_read_default },
+{ MKTAG( 's', 't', 'c', 'o' ), mov_read_stco },
+{ MKTAG( 's', 't', 's', 'c' ), mov_read_stsc },
+{ MKTAG( 's', 't', 's', 'd' ), mov_read_stsd }, /* sample description */
+{ MKTAG( 's', 't', 's', 's' ), mov_read_stss }, /* sync sample */
+{ MKTAG( 's', 't', 's', 'z' ), mov_read_stsz }, /* sample size */
+{ MKTAG( 's', 't', 't', 's' ), mov_read_stts },
+{ MKTAG( 't', 'k', 'h', 'd' ), mov_read_tkhd }, /* track header */
+{ MKTAG( 't', 'r', 'a', 'k' ), mov_read_trak },
+{ MKTAG( 'w', 'a', 'v', 'e' ), mov_read_wave },
+{ MKTAG( 'c', 't', 'a', 'b' ), mov_read_ctab },
+{ MKTAG( 'e', 's', 'd', 's' ), mov_read_esds },
+{ MKTAG( 'w', 'i', 'd', 'e' ), mov_read_wide }, /* place holder */
+{ MKTAG( 'c', 'm', 'o', 'v' ), mov_read_cmov },
+{ 0L, NULL }
+};
+
+static void mov_free_stream_context(MOVStreamContext *sc)
+{
+ if(sc) {
+ av_freep(&sc->ctts_data);
+ av_freep(&sc);
+ }
+}
+
+/* XXX: is it sufficient ? */
+static int mov_probe(AVProbeData *p)
+{
+ unsigned int offset;
+ uint32_t tag;
+ int score = 0;
+
+ /* check file header */
+ if (p->buf_size <= 12)
+ return 0;
+ offset = 0;
+ for(;;) {
+ /* ignore invalid offset */
+ if ((offset + 8) > (unsigned int)p->buf_size)
+ return score;
+ tag = LE_32(p->buf + offset + 4);
+ switch(tag) {
+ /* check for obvious tags */
+ case MKTAG( 'j', 'P', ' ', ' ' ): /* jpeg 2000 signature */
+ case MKTAG( 'm', 'o', 'o', 'v' ):
+ case MKTAG( 'm', 'd', 'a', 't' ):
+ case MKTAG( 'p', 'n', 'o', 't' ): /* detect movs with preview pics like ew.mov and april.mov */
+ case MKTAG( 'u', 'd', 't', 'a' ): /* Packet Video PVAuthor adds this and a lot of more junk */
+ return AVPROBE_SCORE_MAX;
+ /* those are more common words, so rate then a bit less */
+ case MKTAG( 'w', 'i', 'd', 'e' ):
+ case MKTAG( 'f', 'r', 'e', 'e' ):
+ case MKTAG( 'j', 'u', 'n', 'k' ):
+ case MKTAG( 'p', 'i', 'c', 't' ):
+ return AVPROBE_SCORE_MAX - 5;
+ case MKTAG( 'f', 't', 'y', 'p' ):
+ case MKTAG( 's', 'k', 'i', 'p' ):
+ case MKTAG( 'u', 'u', 'i', 'd' ):
+ offset = BE_32(p->buf+offset) + offset;
+ /* if we only find those cause probedata is too small at least rate them */
+ score = AVPROBE_SCORE_MAX - 50;
+ break;
+ default:
+ /* unrecognized tag */
+ return score;
+ }
+ }
+ return score;
+}
+
+static void mov_build_index(MOVContext *mov, AVStream *st)
+{
+ MOVStreamContext *sc = st->priv_data;
+ offset_t current_offset;
+ int64_t current_dts = 0;
+ int stts_index = 0;
+ int stsc_index = 0;
+ int stss_index = 0;
+ int i, j, k;
+
+ if (sc->sample_sizes || st->codec->codec_type == CODEC_TYPE_VIDEO || sc->dv_audio_container) {
+ int keyframe, sample_size;
+ int current_sample = 0;
+ int stts_sample = 0;
+ int distance = 0;
+
+ st->nb_frames = sc->sample_count;
+ for (i = 0; i < sc->chunk_count; i++) {
+ current_offset = sc->chunk_offsets[i];
+ if (stsc_index + 1 < sc->sample_to_chunk_sz && i + 1 == sc->sample_to_chunk[stsc_index + 1].first)
+ stsc_index++;
+ for (j = 0; j < sc->sample_to_chunk[stsc_index].count; j++) {
+ keyframe = !sc->keyframe_count || current_sample + 1 == sc->keyframes[stss_index];
+ if (keyframe) {
+ distance = 0;
+ if (stss_index + 1 < sc->keyframe_count)
+ stss_index++;
+ }
+ sample_size = sc->sample_size > 0 ? sc->sample_size : sc->sample_sizes[current_sample];
+ dprintf("AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", size %d, distance %d, keyframe %d\n",
+ st->index, current_sample, current_offset, current_dts, sample_size, distance, keyframe);
+ av_add_index_entry(st, current_offset, current_dts, sample_size, distance, keyframe ? AVINDEX_KEYFRAME : 0);
+ current_offset += sample_size;
+ assert(sc->stts_data[stts_index].duration % sc->time_rate == 0);
+ current_dts += sc->stts_data[stts_index].duration / sc->time_rate;
+ distance++;
+ stts_sample++;
+ if (current_sample + 1 < sc->sample_count)
+ current_sample++;
+ if (stts_index + 1 < sc->stts_count && stts_sample == sc->stts_data[stts_index].count) {
+ stts_sample = 0;
+ stts_index++;
+ }
+ }
+ }
+ } else { /* read whole chunk */
+ int chunk_samples, chunk_size, chunk_duration;
+
+ for (i = 0; i < sc->chunk_count; i++) {
+ current_offset = sc->chunk_offsets[i];
+ if (stsc_index + 1 < sc->sample_to_chunk_sz && i + 1 == sc->sample_to_chunk[stsc_index + 1].first)
+ stsc_index++;
+ chunk_samples = sc->sample_to_chunk[stsc_index].count;
+ /* get chunk size */
+ if (sc->sample_size > 1 || st->codec->codec_id == CODEC_ID_PCM_U8 || st->codec->codec_id == CODEC_ID_PCM_S8)
+ chunk_size = chunk_samples * sc->sample_size;
+ else if (sc->sample_size_v1.den > 0 && (chunk_samples * sc->sample_size_v1.num % sc->sample_size_v1.den == 0))
+ chunk_size = chunk_samples * sc->sample_size_v1.num / sc->sample_size_v1.den;
+ else { /* workaround to find nearest next chunk offset */
+ chunk_size = INT_MAX;
+ for (j = 0; j < mov->total_streams; j++) {
+ MOVStreamContext *msc = mov->streams[j];
+
+ for (k = msc->next_chunk; k < msc->chunk_count; k++) {
+ if (msc->chunk_offsets[k] > current_offset && msc->chunk_offsets[k] - current_offset < chunk_size) {
+ chunk_size = msc->chunk_offsets[k] - current_offset;
+ msc->next_chunk = k;
+ break;
+ }
+ }
+ }
+ /* check for last chunk */
+ if (chunk_size == INT_MAX)
+ for (j = 0; j < mov->mdat_count; j++) {
+ dprintf("mdat %d, offset %"PRIx64", size %"PRId64", current offset %"PRIx64"\n",
+ j, mov->mdat_list[j].offset, mov->mdat_list[j].size, current_offset);
+ if (mov->mdat_list[j].offset <= current_offset && mov->mdat_list[j].offset + mov->mdat_list[j].size > current_offset)
+ chunk_size = mov->mdat_list[j].offset + mov->mdat_list[j].size - current_offset;
+ }
+ assert(chunk_size != INT_MAX);
+ for (j = 0; j < mov->total_streams; j++) {
+ mov->streams[j]->next_chunk = 0;
+ }
+ }
+ av_add_index_entry(st, current_offset, current_dts, chunk_size, 0, AVINDEX_KEYFRAME);
+ /* get chunk duration */
+ chunk_duration = 0;
+ while (chunk_samples > 0) {
+ if (chunk_samples < sc->stts_data[stts_index].count) {
+ chunk_duration += sc->stts_data[stts_index].duration * chunk_samples;
+ sc->stts_data[stts_index].count -= chunk_samples;
+ break;
+ } else {
+ chunk_duration += sc->stts_data[stts_index].duration * chunk_samples;
+ chunk_samples -= sc->stts_data[stts_index].count;
+ if (stts_index + 1 < sc->stts_count) {
+ stts_index++;
+ }
+ }
+ }
+ dprintf("AVIndex stream %d, chunk %d, offset %"PRIx64", dts %"PRId64", size %d, duration %d\n",
+ st->index, i, current_offset, current_dts, chunk_size, chunk_duration);
+ assert(chunk_duration % sc->time_rate == 0);
+ current_dts += chunk_duration / sc->time_rate;
+ }
+ }
+ /* adjust sample count to avindex entries */
+ sc->sample_count = st->nb_index_entries;
+}
+
+static int mov_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ MOVContext *mov = (MOVContext *) s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int i, err;
+ MOV_atom_t atom = { 0, 0, 0 };
+
+ mov->fc = s;
+ mov->parse_table = mov_default_parse_table;
+
+ if(!url_is_streamed(pb)) /* .mov and .mp4 aren't streamable anyway (only progressive download if moov is before mdat) */
+ atom.size = url_fsize(pb);
+ else
+ atom.size = 0x7FFFFFFFFFFFFFFFLL;
+
+ /* check MOV header */
+ err = mov_read_default(mov, pb, atom);
+ if (err<0 || (!mov->found_moov && !mov->found_mdat)) {
+ av_log(s, AV_LOG_ERROR, "mov: header not found !!! (err:%d, moov:%d, mdat:%d) pos:%"PRId64"\n",
+ err, mov->found_moov, mov->found_mdat, url_ftell(pb));
+ return -1;
+ }
+ dprintf("on_parse_exit_offset=%d\n", (int) url_ftell(pb));
+
+ /* some cleanup : make sure we are on the mdat atom */
+ if(!url_is_streamed(pb) && (url_ftell(pb) != mov->mdat_offset))
+ url_fseek(pb, mov->mdat_offset, SEEK_SET);
+
+ mov->total_streams = s->nb_streams;
+
+ for(i=0; i<mov->total_streams; i++) {
+ MOVStreamContext *sc = mov->streams[i];
+
+ if(!sc->time_rate)
+ sc->time_rate=1;
+ if(!sc->time_scale)
+ sc->time_scale= mov->time_scale;
+ av_set_pts_info(s->streams[i], 64, sc->time_rate, sc->time_scale);
+
+ if(s->streams[i]->duration != AV_NOPTS_VALUE){
+ assert(s->streams[i]->duration % sc->time_rate == 0);
+ s->streams[i]->duration /= sc->time_rate;
+ }
+ sc->ffindex = i;
+ mov_build_index(mov, s->streams[i]);
+ }
+
+ for(i=0; i<mov->total_streams; i++) {
+ /* dont need those anymore */
+ av_freep(&mov->streams[i]->chunk_offsets);
+ av_freep(&mov->streams[i]->sample_to_chunk);
+ av_freep(&mov->streams[i]->sample_sizes);
+ av_freep(&mov->streams[i]->keyframes);
+ av_freep(&mov->streams[i]->stts_data);
+ }
+ av_freep(&mov->mdat_list);
+ return 0;
+}
+
+static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ MOVContext *mov = s->priv_data;
+ MOVStreamContext *sc = 0;
+ AVIndexEntry *sample = 0;
+ int64_t best_dts = INT64_MAX;
+ int i;
+
+ for (i = 0; i < mov->total_streams; i++) {
+ MOVStreamContext *msc = mov->streams[i];
+
+ if (s->streams[i]->discard != AVDISCARD_ALL && msc->current_sample < msc->sample_count) {
+ AVIndexEntry *current_sample = &s->streams[i]->index_entries[msc->current_sample];
+ int64_t dts = av_rescale(current_sample->timestamp * (int64_t)msc->time_rate, AV_TIME_BASE, msc->time_scale);
+
+ dprintf("stream %d, sample %ld, dts %"PRId64"\n", i, msc->current_sample, dts);
+ if (dts < best_dts) {
+ sample = current_sample;
+ best_dts = dts;
+ sc = msc;
+ }
+ }
+ }
+ if (!sample)
+ return -1;
+ /* must be done just before reading, to avoid infinite loop on sample */
+ sc->current_sample++;
+ if (sample->pos >= url_fsize(&s->pb)) {
+ av_log(mov->fc, AV_LOG_ERROR, "stream %d, offset 0x%"PRIx64": partial file\n", sc->ffindex, sample->pos);
+ return -1;
+ }
+#ifdef CONFIG_DV_DEMUXER
+ if (sc->dv_audio_container) {
+ dv_get_packet(mov->dv_demux, pkt);
+ dprintf("dv audio pkt size %d\n", pkt->size);
+ } else {
+#endif
+ url_fseek(&s->pb, sample->pos, SEEK_SET);
+ av_get_packet(&s->pb, pkt, sample->size);
+#ifdef CONFIG_DV_DEMUXER
+ if (mov->dv_demux) {
+ void *pkt_destruct_func = pkt->destruct;
+ dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size);
+ pkt->destruct = pkt_destruct_func;
+ }
+ }
+#endif
+ pkt->stream_index = sc->ffindex;
+ pkt->dts = sample->timestamp;
+ if (sc->ctts_data) {
+ assert(sc->ctts_data[sc->sample_to_ctime_index].duration % sc->time_rate == 0);
+ pkt->pts = pkt->dts + sc->ctts_data[sc->sample_to_ctime_index].duration / sc->time_rate;
+ /* update ctts context */
+ sc->sample_to_ctime_sample++;
+ if (sc->sample_to_ctime_index < sc->ctts_count && sc->ctts_data[sc->sample_to_ctime_index].count == sc->sample_to_ctime_sample) {
+ sc->sample_to_ctime_index++;
+ sc->sample_to_ctime_sample = 0;
+ }
+ } else {
+ pkt->pts = pkt->dts;
+ }
+ pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? PKT_FLAG_KEY : 0;
+ pkt->pos = sample->pos;
+ dprintf("stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n", pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration);
+ return 0;
+}
+
+static int mov_seek_stream(AVStream *st, int64_t timestamp, int flags)
+{
+ MOVStreamContext *sc = st->priv_data;
+ int sample, time_sample;
+ int i;
+
+ sample = av_index_search_timestamp(st, timestamp, flags);
+ dprintf("stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample);
+ if (sample < 0) /* not sure what to do */
+ return -1;
+ sc->current_sample = sample;
+ dprintf("stream %d, found sample %ld\n", st->index, sc->current_sample);
+ /* adjust ctts index */
+ if (sc->ctts_data) {
+ time_sample = 0;
+ for (i = 0; i < sc->ctts_count; i++) {
+ time_sample += sc->ctts_data[i].count;
+ if (time_sample >= sc->current_sample) {
+ sc->sample_to_ctime_index = i;
+ sc->sample_to_ctime_sample = time_sample - sc->current_sample;
+ break;
+ }
+ }
+ }
+ return sample;
+}
+
+static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
+{
+ AVStream *st;
+ int64_t seek_timestamp, timestamp;
+ int sample;
+ int i;
+
+ if (stream_index >= s->nb_streams)
+ return -1;
+
+ st = s->streams[stream_index];
+ sample = mov_seek_stream(st, sample_time, flags);
+ if (sample < 0)
+ return -1;
+
+ /* adjust seek timestamp to found sample timestamp */
+ seek_timestamp = st->index_entries[sample].timestamp;
+
+ for (i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ if (stream_index == i || st->discard == AVDISCARD_ALL)
+ continue;
+
+ timestamp = av_rescale_q(seek_timestamp, s->streams[stream_index]->time_base, st->time_base);
+ mov_seek_stream(st, timestamp, flags);
+ }
+ return 0;
+}
+
+static int mov_read_close(AVFormatContext *s)
+{
+ int i;
+ MOVContext *mov = (MOVContext *) s->priv_data;
+ for(i=0; i<mov->total_streams; i++)
+ mov_free_stream_context(mov->streams[i]);
+ /* free color tabs */
+ for(i=0; i<mov->ctab_size; i++)
+ av_freep(&mov->ctab[i]);
+ if(mov->dv_demux){
+ for(i=0; i<mov->dv_fctx->nb_streams; i++){
+ av_freep(&mov->dv_fctx->streams[i]->codec);
+ av_freep(&mov->dv_fctx->streams[i]);
+ }
+ av_freep(&mov->dv_fctx);
+ av_freep(&mov->dv_demux);
+ }
+ av_freep(&mov->ctab);
+ return 0;
+}
+
+AVInputFormat mov_demuxer = {
+ "mov,mp4,m4a,3gp,3g2,mj2",
+ "QuickTime/MPEG4/Motion JPEG 2000 format",
+ sizeof(MOVContext),
+ mov_probe,
+ mov_read_header,
+ mov_read_packet,
+ mov_read_close,
+ mov_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/movenc.c b/contrib/ffmpeg/libavformat/movenc.c
new file mode 100644
index 000000000..736d1594a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/movenc.c
@@ -0,0 +1,1724 @@
+/*
+ * MOV, 3GP, MP4 muxer
+ * Copyright (c) 2003 Thomas Raivio.
+ * Copyright (c) 2004 Gildas Bazin <gbazin at videolan dot org>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+#include "avio.h"
+#include "isom.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#define MOV_INDEX_CLUSTER_SIZE 16384
+#define globalTimescale 1000
+
+#define MODE_MP4 0
+#define MODE_MOV 1
+#define MODE_3GP 2
+#define MODE_PSP 3 // example working PSP command line:
+// ffmpeg -i testinput.avi -f psp -r 14.985 -s 320x240 -b 768 -ar 24000 -ab 32 M4V00001.MP4
+#define MODE_3G2 4
+
+typedef struct MOVIentry {
+ unsigned int flags, size;
+ uint64_t pos;
+ unsigned int samplesInChunk;
+ char key_frame;
+ unsigned int entries;
+ int64_t cts;
+ int64_t dts;
+} MOVIentry;
+
+typedef struct MOVIndex {
+ int mode;
+ int entry;
+ long timescale;
+ long time;
+ int64_t trackDuration;
+ long sampleCount;
+ long sampleDuration;
+ long sampleSize;
+ int hasKeyframes;
+ int hasBframes;
+ int language;
+ int trackID;
+ int tag;
+ AVCodecContext *enc;
+
+ int vosLen;
+ uint8_t *vosData;
+ MOVIentry *cluster;
+ int audio_vbr;
+} MOVTrack;
+
+typedef struct MOVContext {
+ int mode;
+ int64_t time;
+ int nb_streams;
+ offset_t mdat_pos;
+ uint64_t mdat_size;
+ long timescale;
+ MOVTrack tracks[MAX_STREAMS];
+} MOVContext;
+
+//FIXME supprt 64bit varaint with wide placeholders
+static offset_t updateSize (ByteIOContext *pb, offset_t pos)
+{
+ offset_t curpos = url_ftell(pb);
+ url_fseek(pb, pos, SEEK_SET);
+ put_be32(pb, curpos - pos); /* rewrite size */
+ url_fseek(pb, curpos, SEEK_SET);
+
+ return curpos - pos;
+}
+
+/* Chunk offset atom */
+static int mov_write_stco_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int i;
+ int mode64 = 0; // use 32 bit size variant if possible
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ if (pos > UINT32_MAX) {
+ mode64 = 1;
+ put_tag(pb, "co64");
+ } else
+ put_tag(pb, "stco");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, track->entry); /* entry count */
+ for (i=0; i<track->entry; i++) {
+ if(mode64 == 1)
+ put_be64(pb, track->cluster[i].pos);
+ else
+ put_be32(pb, track->cluster[i].pos);
+ }
+ return updateSize (pb, pos);
+}
+
+/* Sample size atom */
+static int mov_write_stsz_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int equalChunks = 1;
+ int i, j, entries = 0, tst = -1, oldtst = -1;
+
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "stsz");
+ put_be32(pb, 0); /* version & flags */
+
+ for (i=0; i<track->entry; i++) {
+ tst = track->cluster[i].size/track->cluster[i].entries;
+ if(oldtst != -1 && tst != oldtst) {
+ equalChunks = 0;
+ }
+ oldtst = tst;
+ entries += track->cluster[i].entries;
+ }
+ if (equalChunks) {
+ int sSize = track->cluster[0].size/track->cluster[0].entries;
+ put_be32(pb, sSize); // sample size
+ put_be32(pb, entries); // sample count
+ }
+ else {
+ put_be32(pb, 0); // sample size
+ put_be32(pb, entries); // sample count
+ for (i=0; i<track->entry; i++) {
+ for ( j=0; j<track->cluster[i].entries; j++) {
+ put_be32(pb, track->cluster[i].size /
+ track->cluster[i].entries);
+ }
+ }
+ }
+ return updateSize (pb, pos);
+}
+
+/* Sample to chunk atom */
+static int mov_write_stsc_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int index = 0, oldval = -1, i;
+ offset_t entryPos, curpos;
+
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "stsc");
+ put_be32(pb, 0); // version & flags
+ entryPos = url_ftell(pb);
+ put_be32(pb, track->entry); // entry count
+ for (i=0; i<track->entry; i++) {
+ if(oldval != track->cluster[i].samplesInChunk)
+ {
+ put_be32(pb, i+1); // first chunk
+ put_be32(pb, track->cluster[i].samplesInChunk); // samples per chunk
+ put_be32(pb, 0x1); // sample description index
+ oldval = track->cluster[i].samplesInChunk;
+ index++;
+ }
+ }
+ curpos = url_ftell(pb);
+ url_fseek(pb, entryPos, SEEK_SET);
+ put_be32(pb, index); // rewrite size
+ url_fseek(pb, curpos, SEEK_SET);
+
+ return updateSize (pb, pos);
+}
+
+/* Sync sample atom */
+static int mov_write_stss_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t curpos, entryPos;
+ int i, index = 0;
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); // size
+ put_tag(pb, "stss");
+ put_be32(pb, 0); // version & flags
+ entryPos = url_ftell(pb);
+ put_be32(pb, track->entry); // entry count
+ for (i=0; i<track->entry; i++) {
+ if(track->cluster[i].key_frame == 1) {
+ put_be32(pb, i+1);
+ index++;
+ }
+ }
+ curpos = url_ftell(pb);
+ url_fseek(pb, entryPos, SEEK_SET);
+ put_be32(pb, index); // rewrite size
+ url_fseek(pb, curpos, SEEK_SET);
+ return updateSize (pb, pos);
+}
+
+static int mov_write_amr_tag(ByteIOContext *pb, MOVTrack *track)
+{
+ put_be32(pb, 0x11); /* size */
+ if (track->mode == MODE_MOV) put_tag(pb, "samr");
+ else put_tag(pb, "damr");
+ put_tag(pb, "FFMP");
+ put_byte(pb, 0); /* decoder version */
+
+ put_be16(pb, 0x81FF); /* Mode set (all modes for AMR_NB) */
+ put_byte(pb, 0x00); /* Mode change period (no restriction) */
+ put_byte(pb, 0x01); /* Frames per sample */
+ return 0x11;
+}
+
+static int mov_write_enda_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 10);
+ put_tag(pb, "enda");
+ put_be16(pb, 1); /* little endian */
+ return 10;
+}
+
+static unsigned int descrLength(unsigned int len)
+{
+ int i;
+ for(i=1; len>>(7*i); i++);
+ return len + 1 + i;
+}
+
+static void putDescr(ByteIOContext *pb, int tag, unsigned int size)
+{
+ int i= descrLength(size) - size - 2;
+ put_byte(pb, tag);
+ for(; i>0; i--)
+ put_byte(pb, (size>>(7*i)) | 0x80);
+ put_byte(pb, size & 0x7F);
+}
+
+static int mov_write_esds_tag(ByteIOContext *pb, MOVTrack* track) // Basic
+{
+ offset_t pos = url_ftell(pb);
+ int decoderSpecificInfoLen = track->vosLen ? descrLength(track->vosLen):0;
+
+ put_be32(pb, 0); // size
+ put_tag(pb, "esds");
+ put_be32(pb, 0); // Version
+
+ // ES descriptor
+ putDescr(pb, 0x03, 3 + descrLength(13 + decoderSpecificInfoLen) +
+ descrLength(1));
+ put_be16(pb, track->trackID);
+ put_byte(pb, 0x00); // flags (= no flags)
+
+ // DecoderConfig descriptor
+ putDescr(pb, 0x04, 13 + decoderSpecificInfoLen);
+
+ // Object type indication
+ put_byte(pb, codec_get_tag(ff_mov_obj_type, track->enc->codec_id));
+
+ // the following fields is made of 6 bits to identify the streamtype (4 for video, 5 for audio)
+ // plus 1 bit to indicate upstream and 1 bit set to 1 (reserved)
+ if(track->enc->codec_type == CODEC_TYPE_AUDIO)
+ put_byte(pb, 0x15); // flags (= Audiostream)
+ else
+ put_byte(pb, 0x11); // flags (= Visualstream)
+
+ put_byte(pb, track->enc->rc_buffer_size>>(3+16)); // Buffersize DB (24 bits)
+ put_be16(pb, (track->enc->rc_buffer_size>>3)&0xFFFF); // Buffersize DB
+
+ put_be32(pb, FFMAX(track->enc->bit_rate, track->enc->rc_max_rate)); // maxbitrate (FIXME should be max rate in any 1 sec window)
+ if(track->enc->rc_max_rate != track->enc->rc_min_rate || track->enc->rc_min_rate==0)
+ put_be32(pb, 0); // vbr
+ else
+ put_be32(pb, track->enc->rc_max_rate); // avg bitrate
+
+ if (track->vosLen)
+ {
+ // DecoderSpecific info descriptor
+ putDescr(pb, 0x05, track->vosLen);
+ put_buffer(pb, track->vosData, track->vosLen);
+ }
+
+
+ // SL descriptor
+ putDescr(pb, 0x06, 1);
+ put_byte(pb, 0x02);
+ return updateSize (pb, pos);
+}
+
+static int mov_write_wave_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "wave");
+
+ put_be32(pb, 12); /* size */
+ put_tag(pb, "frma");
+ put_le32(pb, track->tag);
+
+ if (track->enc->codec_id == CODEC_ID_AAC) {
+ /* useless atom needed by mplayer, ipod, not needed by quicktime */
+ put_be32(pb, 12); /* size */
+ put_tag(pb, "mp4a");
+ put_be32(pb, 0);
+ mov_write_esds_tag(pb, track);
+ } else if (track->enc->codec_id == CODEC_ID_PCM_S24LE ||
+ track->enc->codec_id == CODEC_ID_PCM_S32LE) {
+ mov_write_enda_tag(pb);
+ } else if (track->enc->codec_id == CODEC_ID_AMR_NB) {
+ mov_write_amr_tag(pb, track);
+ }
+
+ put_be32(pb, 8); /* size */
+ put_be32(pb, 0); /* null tag */
+
+ return updateSize (pb, pos);
+}
+
+static const CodecTag codec_movaudio_tags[] = {
+ { CODEC_ID_PCM_MULAW, MKTAG('u', 'l', 'a', 'w') },
+ { CODEC_ID_PCM_ALAW, MKTAG('a', 'l', 'a', 'w') },
+ { CODEC_ID_ADPCM_IMA_QT, MKTAG('i', 'm', 'a', '4') },
+ { CODEC_ID_MACE3, MKTAG('M', 'A', 'C', '3') },
+ { CODEC_ID_MACE6, MKTAG('M', 'A', 'C', '6') },
+ { CODEC_ID_AAC, MKTAG('m', 'p', '4', 'a') },
+ { CODEC_ID_AMR_NB, MKTAG('s', 'a', 'm', 'r') },
+ { CODEC_ID_AMR_WB, MKTAG('s', 'a', 'w', 'b') },
+ { CODEC_ID_PCM_S16BE, MKTAG('t', 'w', 'o', 's') },
+ { CODEC_ID_PCM_S16LE, MKTAG('s', 'o', 'w', 't') },
+ { CODEC_ID_PCM_S24BE, MKTAG('i', 'n', '2', '4') },
+ { CODEC_ID_PCM_S24LE, MKTAG('i', 'n', '2', '4') },
+ { CODEC_ID_PCM_S32BE, MKTAG('i', 'n', '3', '2') },
+ { CODEC_ID_PCM_S32LE, MKTAG('i', 'n', '3', '2') },
+ { CODEC_ID_MP3, MKTAG('.', 'm', 'p', '3') },
+ { CODEC_ID_NONE, 0 },
+};
+
+static int mov_write_audio_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ int version = track->mode == MODE_MOV &&
+ (track->audio_vbr ||
+ track->enc->codec_id == CODEC_ID_PCM_S32LE ||
+ track->enc->codec_id == CODEC_ID_PCM_S24LE);
+
+ put_be32(pb, 0); /* size */
+ put_le32(pb, track->tag); // store it byteswapped
+ put_be32(pb, 0); /* Reserved */
+ put_be16(pb, 0); /* Reserved */
+ put_be16(pb, 1); /* Data-reference index, XXX == 1 */
+
+ /* SoundDescription */
+ put_be16(pb, version); /* Version */
+ put_be16(pb, 0); /* Revision level */
+ put_be32(pb, 0); /* Reserved */
+
+ put_be16(pb, track->mode == MODE_MOV ? track->enc->channels : 2); /* Number of channels */
+ /* FIXME 8 bit for 'raw ' in mov */
+ put_be16(pb, 16); /* Reserved */
+
+ put_be16(pb, track->mode == MODE_MOV && track->audio_vbr ? -2 : 0); /* compression ID */
+ put_be16(pb, 0); /* packet size (= 0) */
+ put_be16(pb, track->timescale); /* Time scale */
+ put_be16(pb, 0); /* Reserved */
+
+ if(version == 1) { /* SoundDescription V1 extended info */
+ put_be32(pb, track->enc->frame_size); /* Samples per packet */
+ put_be32(pb, track->sampleSize / track->enc->channels); /* Bytes per packet */
+ put_be32(pb, track->sampleSize); /* Bytes per frame */
+ put_be32(pb, 2); /* Bytes per sample */
+ }
+
+ if(track->mode == MODE_MOV &&
+ (track->enc->codec_id == CODEC_ID_AAC ||
+ track->enc->codec_id == CODEC_ID_AMR_NB ||
+ track->enc->codec_id == CODEC_ID_PCM_S24LE ||
+ track->enc->codec_id == CODEC_ID_PCM_S32LE))
+ mov_write_wave_tag(pb, track);
+ else if(track->enc->codec_id == CODEC_ID_AAC)
+ mov_write_esds_tag(pb, track);
+ else if(track->enc->codec_id == CODEC_ID_AMR_NB)
+ mov_write_amr_tag(pb, track);
+
+ return updateSize (pb, pos);
+}
+
+static int mov_write_d263_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 0xf); /* size */
+ put_tag(pb, "d263");
+ put_tag(pb, "FFMP");
+ put_byte(pb, 0); /* decoder version */
+ /* FIXME use AVCodecContext level/profile, when encoder will set values */
+ put_byte(pb, 0xa); /* level */
+ put_byte(pb, 0); /* profile */
+ return 0xf;
+}
+
+/* TODO: No idea about these values */
+static int mov_write_svq3_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 0x15);
+ put_tag(pb, "SMI ");
+ put_tag(pb, "SEQH");
+ put_be32(pb, 0x5);
+ put_be32(pb, 0xe2c0211d);
+ put_be32(pb, 0xc0000000);
+ put_byte(pb, 0);
+ return 0x15;
+}
+
+static uint8_t *avc_find_startcode( uint8_t *p, uint8_t *end )
+{
+ uint8_t *a = p + 4 - ((int)p & 3);
+
+ for( end -= 3; p < a && p < end; p++ ) {
+ if( p[0] == 0 && p[1] == 0 && p[2] == 1 )
+ return p;
+ }
+
+ for( end -= 3; p < end; p += 4 ) {
+ uint32_t x = *(uint32_t*)p;
+// if( (x - 0x01000100) & (~x) & 0x80008000 ) // little endian
+// if( (x - 0x00010001) & (~x) & 0x00800080 ) // big endian
+ if( (x - 0x01010101) & (~x) & 0x80808080 ) { // generic
+ if( p[1] == 0 ) {
+ if( p[0] == 0 && p[2] == 1 )
+ return p-1;
+ if( p[2] == 0 && p[3] == 1 )
+ return p;
+ }
+ if( p[3] == 0 ) {
+ if( p[2] == 0 && p[4] == 1 )
+ return p+1;
+ if( p[4] == 0 && p[5] == 1 )
+ return p+2;
+ }
+ }
+ }
+
+ for( end += 3; p < end; p++ ) {
+ if( p[0] == 0 && p[1] == 0 && p[2] == 1 )
+ return p;
+ }
+
+ return end + 3;
+}
+
+static void avc_parse_nal_units(uint8_t **buf, int *size)
+{
+ ByteIOContext pb;
+ uint8_t *p = *buf;
+ uint8_t *end = p + *size;
+ uint8_t *nal_start, *nal_end;
+
+ url_open_dyn_buf(&pb);
+ nal_start = avc_find_startcode(p, end);
+ while (nal_start < end) {
+ while(!*(nal_start++));
+ nal_end = avc_find_startcode(nal_start, end);
+ put_be32(&pb, nal_end - nal_start);
+ put_buffer(&pb, nal_start, nal_end - nal_start);
+ nal_start = nal_end;
+ }
+ av_freep(buf);
+ *size = url_close_dyn_buf(&pb, buf);
+}
+
+static int mov_write_avcc_tag(ByteIOContext *pb, MOVTrack *track)
+{
+ offset_t pos = url_ftell(pb);
+
+ put_be32(pb, 0);
+ put_tag(pb, "avcC");
+ if (track->vosLen > 6) {
+ /* check for h264 start code */
+ if (BE_32(track->vosData) == 0x00000001) {
+ uint8_t *buf, *end;
+ uint32_t sps_size=0, pps_size=0;
+ uint8_t *sps=0, *pps=0;
+
+ avc_parse_nal_units(&track->vosData, &track->vosLen);
+ buf = track->vosData;
+ end = track->vosData + track->vosLen;
+
+ /* look for sps and pps */
+ while (buf < end) {
+ unsigned int size;
+ uint8_t nal_type;
+ size = BE_32(buf);
+ nal_type = buf[4] & 0x1f;
+ if (nal_type == 7) { /* SPS */
+ sps = buf + 4;
+ sps_size = size;
+ } else if (nal_type == 8) { /* PPS */
+ pps = buf + 4;
+ pps_size = size;
+ }
+ buf += size + 4;
+ }
+ assert(sps);
+ assert(pps);
+
+ put_byte(pb, 1); /* version */
+ put_byte(pb, sps[1]); /* profile */
+ put_byte(pb, sps[2]); /* profile compat */
+ put_byte(pb, sps[3]); /* level */
+ put_byte(pb, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 1 (11) */
+ put_byte(pb, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
+
+ put_be16(pb, sps_size);
+ put_buffer(pb, sps, sps_size);
+ put_byte(pb, 1); /* number of pps */
+ put_be16(pb, pps_size);
+ put_buffer(pb, pps, pps_size);
+ } else {
+ put_buffer(pb, track->vosData, track->vosLen);
+ }
+ }
+ return updateSize(pb, pos);
+}
+
+static const CodecTag codec_movvideo_tags[] = {
+ { CODEC_ID_SVQ1, MKTAG('S', 'V', 'Q', '1') },
+ { CODEC_ID_SVQ3, MKTAG('S', 'V', 'Q', '3') },
+ { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
+ { CODEC_ID_H263, MKTAG('h', '2', '6', '3') },
+ { CODEC_ID_H263, MKTAG('s', '2', '6', '3') },
+ { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') },
+ /* special handling in mov_find_video_codec_tag */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', ' ') }, /* DV NTSC */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'c', 'p') }, /* DV PAL */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'p', 'p') }, /* DVCPRO PAL */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'n') }, /* DVCPRO50 NTSC */
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '5', 'p') }, /* DVCPRO50 PAL */
+ { CODEC_ID_NONE, 0 },
+};
+
+static int mov_find_video_codec_tag(AVFormatContext *s, MOVTrack *track)
+{
+ int tag = track->enc->codec_tag;
+ if (!tag) {
+ if (track->enc->codec_id == CODEC_ID_DVVIDEO) {
+ if (track->enc->height == 480) { /* NTSC */
+ if (track->enc->pix_fmt == PIX_FMT_YUV422P)
+ tag = MKTAG('d', 'v', '5', 'n');
+ else
+ tag = MKTAG('d', 'v', 'c', ' ');
+ } else { /* assume PAL */
+ if (track->enc->pix_fmt == PIX_FMT_YUV422P)
+ tag = MKTAG('d', 'v', '5', 'p');
+ else if (track->enc->pix_fmt == PIX_FMT_YUV420P)
+ tag = MKTAG('d', 'v', 'c', 'p');
+ else
+ tag = MKTAG('d', 'v', 'p', 'p');
+ }
+ } else if (track->enc->codec_id == CODEC_ID_H263) {
+ if (track->mode == MODE_MOV)
+ tag = MKTAG('h', '2', '6', '3');
+ else
+ tag = MKTAG('s', '2', '6', '3');
+ } else {
+ tag = codec_get_tag(codec_movvideo_tags, track->enc->codec_id);
+ }
+ }
+ // if no mac fcc found, try with Microsoft tags
+ if (!tag) {
+ tag = codec_get_tag(codec_bmp_tags, track->enc->codec_id);
+ if (tag) {
+ av_log(s, AV_LOG_INFO, "Warning, using MS style video codec tag, the file may be unplayable!\n");
+ }
+ }
+ assert(tag);
+ return tag;
+}
+
+static int mov_find_audio_codec_tag(AVFormatContext *s, MOVTrack *track)
+{
+ int tag = track->enc->codec_tag;
+ if (!tag) {
+ tag = codec_get_tag(codec_movaudio_tags, track->enc->codec_id);
+ }
+ // if no mac fcc found, try with Microsoft tags
+ if (!tag) {
+ int ms_tag = codec_get_tag(codec_wav_tags, track->enc->codec_id);
+ if (ms_tag) {
+ tag = MKTAG('m', 's', ((ms_tag >> 8) & 0xff), (ms_tag & 0xff));
+ av_log(s, AV_LOG_INFO, "Warning, using MS style audio codec tag, the file may be unplayable!\n");
+ }
+ }
+ assert(tag);
+ return tag;
+}
+
+static int mov_write_video_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ char compressor_name[32];
+
+ put_be32(pb, 0); /* size */
+ put_le32(pb, track->tag); // store it byteswapped
+ put_be32(pb, 0); /* Reserved */
+ put_be16(pb, 0); /* Reserved */
+ put_be16(pb, 1); /* Data-reference index */
+
+ put_be16(pb, 0); /* Codec stream version */
+ put_be16(pb, 0); /* Codec stream revision (=0) */
+ if (track->mode == MODE_MOV) {
+ put_tag(pb, "FFMP"); /* Vendor */
+ if(track->enc->codec_id == CODEC_ID_RAWVIDEO) {
+ put_be32(pb, 0); /* Temporal Quality */
+ put_be32(pb, 0x400); /* Spatial Quality = lossless*/
+ } else {
+ put_be32(pb, 0x200); /* Temporal Quality = normal */
+ put_be32(pb, 0x200); /* Spatial Quality = normal */
+ }
+ } else {
+ put_be32(pb, 0); /* Reserved */
+ put_be32(pb, 0); /* Reserved */
+ put_be32(pb, 0); /* Reserved */
+ }
+ put_be16(pb, track->enc->width); /* Video width */
+ put_be16(pb, track->enc->height); /* Video height */
+ put_be32(pb, 0x00480000); /* Horizontal resolution 72dpi */
+ put_be32(pb, 0x00480000); /* Vertical resolution 72dpi */
+ put_be32(pb, 0); /* Data size (= 0) */
+ put_be16(pb, 1); /* Frame count (= 1) */
+
+ memset(compressor_name,0,32);
+ /* FIXME not sure, ISO 14496-1 draft where it shall be set to 0 */
+ if (track->mode == MODE_MOV && track->enc->codec && track->enc->codec->name)
+ strncpy(compressor_name,track->enc->codec->name,31);
+ put_byte(pb, strlen(compressor_name));
+ put_buffer(pb, compressor_name, 31);
+
+ put_be16(pb, 0x18); /* Reserved */
+ put_be16(pb, 0xffff); /* Reserved */
+ if(track->enc->codec_id == CODEC_ID_MPEG4)
+ mov_write_esds_tag(pb, track);
+ else if(track->enc->codec_id == CODEC_ID_H263)
+ mov_write_d263_tag(pb);
+ else if(track->enc->codec_id == CODEC_ID_SVQ3)
+ mov_write_svq3_tag(pb);
+ else if(track->enc->codec_id == CODEC_ID_H264)
+ mov_write_avcc_tag(pb, track);
+
+ return updateSize (pb, pos);
+}
+
+static int mov_write_stsd_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "stsd");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, 1); /* entry count */
+ if (track->enc->codec_type == CODEC_TYPE_VIDEO)
+ mov_write_video_tag(pb, track);
+ else if (track->enc->codec_type == CODEC_TYPE_AUDIO)
+ mov_write_audio_tag(pb, track);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_ctts_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ Time2Sample *ctts_entries;
+ uint32_t entries = 0;
+ uint32_t atom_size;
+ int i;
+
+ ctts_entries = av_malloc((track->entry + 1) * sizeof(*ctts_entries)); /* worst case */
+ ctts_entries[0].count = 1;
+ ctts_entries[0].duration = track->cluster[0].cts;
+ for (i=1; i<track->entry; i++) {
+ if (track->cluster[i].cts == ctts_entries[entries].duration) {
+ ctts_entries[entries].count++; /* compress */
+ } else {
+ entries++;
+ ctts_entries[entries].duration = track->cluster[i].cts;
+ ctts_entries[entries].count = 1;
+ }
+ }
+ entries++; /* last one */
+ atom_size = 16 + (entries * 8);
+ put_be32(pb, atom_size); /* size */
+ put_tag(pb, "ctts");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, entries); /* entry count */
+ for (i=0; i<entries; i++) {
+ put_be32(pb, ctts_entries[i].count);
+ put_be32(pb, ctts_entries[i].duration);
+ }
+ av_free(ctts_entries);
+ return atom_size;
+}
+
+/* Time to sample atom */
+static int mov_write_stts_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ Time2Sample *stts_entries;
+ uint32_t entries = -1;
+ uint32_t atom_size;
+ int i;
+
+ if (track->enc->codec_type == CODEC_TYPE_AUDIO && !track->audio_vbr) {
+ stts_entries = av_malloc(sizeof(*stts_entries)); /* one entry */
+ stts_entries[0].count = track->sampleCount;
+ stts_entries[0].duration = 1;
+ entries = 1;
+ } else {
+ stts_entries = av_malloc(track->entry * sizeof(*stts_entries)); /* worst case */
+ for (i=0; i<track->entry; i++) {
+ int64_t duration = i + 1 == track->entry ?
+ track->trackDuration - track->cluster[i].dts + track->cluster[0].dts : /* readjusting */
+ track->cluster[i+1].dts - track->cluster[i].dts;
+ if (i && duration == stts_entries[entries].duration) {
+ stts_entries[entries].count++; /* compress */
+ } else {
+ entries++;
+ stts_entries[entries].duration = duration;
+ stts_entries[entries].count = 1;
+ }
+ }
+ entries++; /* last one */
+ }
+ atom_size = 16 + (entries * 8);
+ put_be32(pb, atom_size); /* size */
+ put_tag(pb, "stts");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, entries); /* entry count */
+ for (i=0; i<entries; i++) {
+ put_be32(pb, stts_entries[i].count);
+ put_be32(pb, stts_entries[i].duration);
+ }
+ av_free(stts_entries);
+ return atom_size;
+}
+
+static int mov_write_dref_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 28); /* size */
+ put_tag(pb, "dref");
+ put_be32(pb, 0); /* version & flags */
+ put_be32(pb, 1); /* entry count */
+
+ put_be32(pb, 0xc); /* size */
+ put_tag(pb, "url ");
+ put_be32(pb, 1); /* version & flags */
+
+ return 28;
+}
+
+static int mov_write_stbl_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "stbl");
+ mov_write_stsd_tag(pb, track);
+ mov_write_stts_tag(pb, track);
+ if (track->enc->codec_type == CODEC_TYPE_VIDEO &&
+ track->hasKeyframes < track->entry)
+ mov_write_stss_tag(pb, track);
+ if (track->enc->codec_type == CODEC_TYPE_VIDEO &&
+ track->hasBframes)
+ mov_write_ctts_tag(pb, track);
+ mov_write_stsc_tag(pb, track);
+ mov_write_stsz_tag(pb, track);
+ mov_write_stco_tag(pb, track);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_dinf_tag(ByteIOContext *pb)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "dinf");
+ mov_write_dref_tag(pb);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_smhd_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 16); /* size */
+ put_tag(pb, "smhd");
+ put_be32(pb, 0); /* version & flags */
+ put_be16(pb, 0); /* reserved (balance, normally = 0) */
+ put_be16(pb, 0); /* reserved */
+ return 16;
+}
+
+static int mov_write_vmhd_tag(ByteIOContext *pb)
+{
+ put_be32(pb, 0x14); /* size (always 0x14) */
+ put_tag(pb, "vmhd");
+ put_be32(pb, 0x01); /* version & flags */
+ put_be64(pb, 0); /* reserved (graphics mode = copy) */
+ return 0x14;
+}
+
+static int mov_write_hdlr_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ const char *descr, *hdlr, *hdlr_type;
+ offset_t pos = url_ftell(pb);
+
+ if (!track) { /* no media --> data handler */
+ hdlr = "dhlr";
+ hdlr_type = "url ";
+ descr = "DataHandler";
+ } else {
+ hdlr = (track->mode == MODE_MOV) ? "mhlr" : "\0\0\0\0";
+ if (track->enc->codec_type == CODEC_TYPE_VIDEO) {
+ hdlr_type = "vide";
+ descr = "VideoHandler";
+ } else {
+ hdlr_type = "soun";
+ descr = "SoundHandler";
+ }
+ }
+
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "hdlr");
+ put_be32(pb, 0); /* Version & flags */
+ put_buffer(pb, hdlr, 4); /* handler */
+ put_tag(pb, hdlr_type); /* handler type */
+ put_be32(pb ,0); /* reserved */
+ put_be32(pb ,0); /* reserved */
+ put_be32(pb ,0); /* reserved */
+ put_byte(pb, strlen(descr)); /* string counter */
+ put_buffer(pb, descr, strlen(descr)); /* handler description */
+ return updateSize(pb, pos);
+}
+
+static int mov_write_minf_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "minf");
+ if(track->enc->codec_type == CODEC_TYPE_VIDEO)
+ mov_write_vmhd_tag(pb);
+ else
+ mov_write_smhd_tag(pb);
+ if (track->mode == MODE_MOV) /* FIXME: Why do it for MODE_MOV only ? */
+ mov_write_hdlr_tag(pb, NULL);
+ mov_write_dinf_tag(pb);
+ mov_write_stbl_tag(pb, track);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_mdhd_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int version = track->trackDuration < INT32_MAX ? 0 : 1;
+
+ (version == 1) ? put_be32(pb, 44) : put_be32(pb, 32); /* size */
+ put_tag(pb, "mdhd");
+ put_byte(pb, version);
+ put_be24(pb, 0); /* flags */
+ if (version == 1) {
+ put_be64(pb, track->time);
+ put_be64(pb, track->time);
+ } else {
+ put_be32(pb, track->time); /* creation time */
+ put_be32(pb, track->time); /* modification time */
+ }
+ put_be32(pb, track->timescale); /* time scale (sample rate for audio) */
+ (version == 1) ? put_be64(pb, track->trackDuration) : put_be32(pb, track->trackDuration); /* duration */
+ put_be16(pb, track->language); /* language */
+ put_be16(pb, 0); /* reserved (quality) */
+ return 32;
+}
+
+static int mov_write_mdia_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "mdia");
+ mov_write_mdhd_tag(pb, track);
+ mov_write_hdlr_tag(pb, track);
+ mov_write_minf_tag(pb, track);
+ return updateSize(pb, pos);
+}
+
+static int mov_write_tkhd_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ int64_t duration = av_rescale_rnd(track->trackDuration, globalTimescale, track->timescale, AV_ROUND_UP);
+ int version = duration < INT32_MAX ? 0 : 1;
+
+ (version == 1) ? put_be32(pb, 104) : put_be32(pb, 92); /* size */
+ put_tag(pb, "tkhd");
+ put_byte(pb, version);
+ put_be24(pb, 0xf); /* flags (track enabled) */
+ if (version == 1) {
+ put_be64(pb, track->time);
+ put_be64(pb, track->time);
+ } else {
+ put_be32(pb, track->time); /* creation time */
+ put_be32(pb, track->time); /* modification time */
+ }
+ put_be32(pb, track->trackID); /* track-id */
+ put_be32(pb, 0); /* reserved */
+ (version == 1) ? put_be64(pb, duration) : put_be32(pb, duration);
+
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0x0); /* reserved (Layer & Alternate group) */
+ /* Volume, only for audio */
+ if(track->enc->codec_type == CODEC_TYPE_AUDIO)
+ put_be16(pb, 0x0100);
+ else
+ put_be16(pb, 0);
+ put_be16(pb, 0); /* reserved */
+
+ /* Matrix structure */
+ put_be32(pb, 0x00010000); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x00010000); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x40000000); /* reserved */
+
+ /* Track width and height, for visual only */
+ if(track->enc->codec_type == CODEC_TYPE_VIDEO) {
+ double sample_aspect_ratio = av_q2d(track->enc->sample_aspect_ratio);
+ if( !sample_aspect_ratio ) sample_aspect_ratio = 1;
+ put_be32(pb, sample_aspect_ratio * track->enc->width*0x10000);
+ put_be32(pb, track->enc->height*0x10000);
+ }
+ else {
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+ }
+ return 0x5c;
+}
+
+// This box seems important for the psp playback ... without it the movie seems to hang
+static int mov_write_edts_tag(ByteIOContext *pb, MOVTrack *track)
+{
+ put_be32(pb, 0x24); /* size */
+ put_tag(pb, "edts");
+ put_be32(pb, 0x1c); /* size */
+ put_tag(pb, "elst");
+ put_be32(pb, 0x0);
+ put_be32(pb, 0x1);
+
+ put_be32(pb, av_rescale_rnd(track->trackDuration, globalTimescale, track->timescale, AV_ROUND_UP)); /* duration ... doesn't seem to effect psp */
+
+ put_be32(pb, track->cluster[0].cts); /* first pts is cts since dts is 0 */
+ put_be32(pb, 0x00010000);
+ return 0x24;
+}
+
+// goes at the end of each track! ... Critical for PSP playback ("Incompatible data" without it)
+static int mov_write_uuid_tag_psp(ByteIOContext *pb, MOVTrack *mov)
+{
+ put_be32(pb, 0x34); /* size ... reports as 28 in mp4box! */
+ put_tag(pb, "uuid");
+ put_tag(pb, "USMT");
+ put_be32(pb, 0x21d24fce);
+ put_be32(pb, 0xbb88695c);
+ put_be32(pb, 0xfac9c740);
+ put_be32(pb, 0x1c); // another size here!
+ put_tag(pb, "MTDT");
+ put_be32(pb, 0x00010012);
+ put_be32(pb, 0x0a);
+ put_be32(pb, 0x55c40000);
+ put_be32(pb, 0x1);
+ put_be32(pb, 0x0);
+ return 0x34;
+}
+
+static int mov_write_trak_tag(ByteIOContext *pb, MOVTrack* track)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "trak");
+ mov_write_tkhd_tag(pb, track);
+ if (track->mode == MODE_PSP || track->hasBframes)
+ mov_write_edts_tag(pb, track); // PSP Movies require edts box
+ mov_write_mdia_tag(pb, track);
+ if (track->mode == MODE_PSP)
+ mov_write_uuid_tag_psp(pb,track); // PSP Movies require this uuid box
+ return updateSize(pb, pos);
+}
+
+#if 0
+/* TODO: Not sorted out, but not necessary either */
+static int mov_write_iods_tag(ByteIOContext *pb, MOVContext *mov)
+{
+ put_be32(pb, 0x15); /* size */
+ put_tag(pb, "iods");
+ put_be32(pb, 0); /* version & flags */
+ put_be16(pb, 0x1007);
+ put_byte(pb, 0);
+ put_be16(pb, 0x4fff);
+ put_be16(pb, 0xfffe);
+ put_be16(pb, 0x01ff);
+ return 0x15;
+}
+#endif
+
+static int mov_write_mvhd_tag(ByteIOContext *pb, MOVContext *mov)
+{
+ int maxTrackID = 1, i;
+ int64_t maxTrackLenTemp, maxTrackLen = 0;
+ int version;
+
+ for (i=0; i<mov->nb_streams; i++) {
+ if(mov->tracks[i].entry > 0) {
+ maxTrackLenTemp = av_rescale_rnd(mov->tracks[i].trackDuration, globalTimescale, mov->tracks[i].timescale, AV_ROUND_UP);
+ if(maxTrackLen < maxTrackLenTemp)
+ maxTrackLen = maxTrackLenTemp;
+ if(maxTrackID < mov->tracks[i].trackID)
+ maxTrackID = mov->tracks[i].trackID;
+ }
+ }
+
+ version = maxTrackLen < UINT32_MAX ? 0 : 1;
+ (version == 1) ? put_be32(pb, 120) : put_be32(pb, 108); /* size */
+ put_tag(pb, "mvhd");
+ put_byte(pb, version);
+ put_be24(pb, 0); /* flags */
+ if (version == 1) {
+ put_be64(pb, mov->time);
+ put_be64(pb, mov->time);
+ } else {
+ put_be32(pb, mov->time); /* creation time */
+ put_be32(pb, mov->time); /* modification time */
+ }
+ put_be32(pb, mov->timescale); /* timescale */
+ (version == 1) ? put_be64(pb, maxTrackLen) : put_be32(pb, maxTrackLen); /* duration of longest track */
+
+ put_be32(pb, 0x00010000); /* reserved (preferred rate) 1.0 = normal */
+ put_be16(pb, 0x0100); /* reserved (preferred volume) 1.0 = normal */
+ put_be16(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+ put_be32(pb, 0); /* reserved */
+
+ /* Matrix structure */
+ put_be32(pb, 0x00010000); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x00010000); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x0); /* reserved */
+ put_be32(pb, 0x40000000); /* reserved */
+
+ put_be32(pb, 0); /* reserved (preview time) */
+ put_be32(pb, 0); /* reserved (preview duration) */
+ put_be32(pb, 0); /* reserved (poster time) */
+ put_be32(pb, 0); /* reserved (selection time) */
+ put_be32(pb, 0); /* reserved (selection duration) */
+ put_be32(pb, 0); /* reserved (current time) */
+ put_be32(pb, maxTrackID+1); /* Next track id */
+ return 0x6c;
+}
+
+static int mov_write_itunes_hdlr_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "hdlr");
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+ put_tag(pb, "mdir");
+ put_tag(pb, "appl");
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+ put_be16(pb, 0);
+ return updateSize(pb, pos);
+}
+
+/* helper function to write a data tag with the specified string as data */
+static int mov_write_string_data_tag(ByteIOContext *pb, const char *data, int long_style)
+{
+ if(long_style){
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "data");
+ put_be32(pb, 1);
+ put_be32(pb, 0);
+ put_buffer(pb, data, strlen(data));
+ return updateSize(pb, pos);
+ }else{
+ put_be16(pb, strlen(data)); /* string length */
+ put_be16(pb, 0);
+ put_buffer(pb, data, strlen(data));
+ return strlen(data) + 4;
+ }
+}
+
+static int mov_write_string_tag(ByteIOContext *pb, const char *name, const char *value, int long_style){
+ int size = 0;
+ if ( value && value[0] ) {
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, name);
+ mov_write_string_data_tag(pb, value, long_style);
+ size= updateSize(pb, pos);
+ }
+ return size;
+}
+
+/* iTunes year */
+static int mov_write_day_tag(ByteIOContext *pb, int year, int long_style)
+{
+ if(year){
+ char year_str[5];
+ snprintf(year_str, sizeof(year_str), "%04d", year);
+ return mov_write_string_tag(pb, "\251day", year_str, long_style);
+ }else
+ return 0;
+}
+
+/* iTunes track number */
+static int mov_write_trkn_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ int size = 0;
+ if ( s->track ) {
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "trkn");
+ {
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "data");
+ put_be32(pb, 0); // 8 bytes empty
+ put_be32(pb, 0);
+ put_be16(pb, 0); // empty
+ put_be16(pb, s->track); // track number
+ put_be16(pb, 0); // total track number
+ put_be16(pb, 0); // empty
+ updateSize(pb, pos);
+ }
+ size = updateSize(pb, pos);
+ }
+ return size;
+}
+
+/* iTunes meta data list */
+static int mov_write_ilst_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "ilst");
+ mov_write_string_tag(pb, "\251nam", s->title , 1);
+ mov_write_string_tag(pb, "\251ART", s->author , 1);
+ mov_write_string_tag(pb, "\251wrt", s->author , 1);
+ mov_write_string_tag(pb, "\251alb", s->album , 1);
+ mov_write_day_tag(pb, s->year ,1);
+ if(mov->tracks[0].enc && !(mov->tracks[0].enc->flags & CODEC_FLAG_BITEXACT))
+ mov_write_string_tag(pb, "\251too", LIBAVFORMAT_IDENT, 1);
+ mov_write_string_tag(pb, "\251cmt", s->comment , 1);
+ mov_write_string_tag(pb, "\251gen", s->genre , 1);
+ mov_write_trkn_tag(pb, mov, s);
+ return updateSize(pb, pos);
+}
+
+/* iTunes meta data tag */
+static int mov_write_meta_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ int size = 0;
+
+ // only save meta tag if required
+ if ( s->title[0] || s->author[0] || s->album[0] || s->year ||
+ s->comment[0] || s->genre[0] || s->track ) {
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "meta");
+ put_be32(pb, 0);
+ mov_write_itunes_hdlr_tag(pb, mov, s);
+ mov_write_ilst_tag(pb, mov, s);
+ size = updateSize(pb, pos);
+ }
+ return size;
+}
+
+static int mov_write_udta_tag(ByteIOContext *pb, MOVContext* mov,
+ AVFormatContext *s)
+{
+ offset_t pos = url_ftell(pb);
+ int i;
+
+ put_be32(pb, 0); /* size */
+ put_tag(pb, "udta");
+
+ /* iTunes meta data */
+ mov_write_meta_tag(pb, mov, s);
+
+ if(mov->mode == MODE_MOV){ // the title field breaks gtkpod with mp4 and my suspicion is that stuff isnt valid in mp4
+ /* Requirements */
+ for (i=0; i<mov->nb_streams; i++) {
+ if(mov->tracks[i].entry <= 0) continue;
+ if (mov->tracks[i].enc->codec_id == CODEC_ID_AAC ||
+ mov->tracks[i].enc->codec_id == CODEC_ID_MPEG4) {
+ mov_write_string_tag(pb, "\251req", "QuickTime 6.0 or greater", 0);
+ break;
+ }
+ }
+
+ mov_write_string_tag(pb, "\251nam", s->title , 0);
+ mov_write_string_tag(pb, "\251aut", s->author , 0);
+ mov_write_string_tag(pb, "\251alb", s->album , 0);
+ mov_write_day_tag(pb, s->year, 0);
+ if(mov->tracks[0].enc && !(mov->tracks[0].enc->flags & CODEC_FLAG_BITEXACT))
+ mov_write_string_tag(pb, "\251enc", LIBAVFORMAT_IDENT, 0);
+ mov_write_string_tag(pb, "\251des", s->comment , 0);
+ mov_write_string_tag(pb, "\251gen", s->genre , 0);
+ }
+
+ return updateSize(pb, pos);
+}
+
+static int utf8len(uint8_t *b){
+ int len=0;
+ int val;
+ while(*b){
+ GET_UTF8(val, *b++, return -1;)
+ len++;
+ }
+ return len;
+}
+
+static int ascii_to_wc (ByteIOContext *pb, uint8_t *b)
+{
+ int val;
+ while(*b){
+ GET_UTF8(val, *b++, return -1;)
+ put_be16(pb, val);
+ }
+ put_be16(pb, 0x00);
+ return 0;
+}
+
+static uint16_t language_code (const char *str)
+{
+ return ((((str[0]-0x60) & 0x1F)<<10) + (((str[1]-0x60) & 0x1F)<<5) + ((str[2]-0x60) & 0x1F));
+}
+
+static int mov_write_uuidusmt_tag (ByteIOContext *pb, AVFormatContext *s)
+{
+ size_t len, size;
+ offset_t pos, curpos;
+
+ size = 0;
+ if (s->title[0]) {
+ pos = url_ftell(pb);
+ put_be32(pb, 0); /* size placeholder*/
+ put_tag(pb, "uuid");
+ put_tag(pb, "USMT");
+ put_be32(pb, 0x21d24fce ); /* 96 bit UUID */
+ put_be32(pb, 0xbb88695c );
+ put_be32(pb, 0xfac9c740 );
+ size += 24;
+
+ put_be32(pb, 0); /* size placeholder*/
+ put_tag(pb, "MTDT");
+ put_be16(pb, 4);
+ size += 10;
+
+ // ?
+ put_be16(pb, 0x0C); /* size */
+ put_be32(pb, 0x0B); /* type */
+ put_be16(pb, language_code("und")); /* language */
+ put_be16(pb, 0x0); /* ? */
+ put_be16(pb, 0x021C); /* data */
+ size += 12;
+
+ // Encoder
+ len = utf8len(LIBAVCODEC_IDENT)+1;
+ if(len<=0)
+ goto not_utf8;
+ put_be16(pb, len*2+10); /* size */
+ put_be32(pb, 0x04); /* type */
+ put_be16(pb, language_code("eng")); /* language */
+ put_be16(pb, 0x01); /* ? */
+ ascii_to_wc(pb, LIBAVCODEC_IDENT);
+ size += len*2+10;
+
+ // Title
+ len = utf8len(s->title)+1;
+ if(len<=0)
+ goto not_utf8;
+ put_be16(pb, len*2+10); /* size */
+ put_be32(pb, 0x01); /* type */
+ put_be16(pb, language_code("eng")); /* language */
+ put_be16(pb, 0x01); /* ? */
+ ascii_to_wc (pb, s->title);
+ size += len*2+10;
+
+ // Date
+// snprintf(dt,32,"%04d/%02d/%02d %02d:%02d:%02d",t_st->tm_year+1900,t_st->tm_mon+1,t_st->tm_mday,t_st->tm_hour,t_st->tm_min,t_st->tm_sec);
+ len = utf8len("2006/04/01 11:11:11")+1;
+ if(len<=0)
+ goto not_utf8;
+ put_be16(pb, len*2+10); /* size */
+ put_be32(pb, 0x03); /* type */
+ put_be16(pb, language_code("und")); /* language */
+ put_be16(pb, 0x01); /* ? */
+ ascii_to_wc (pb, "2006/04/01 11:11:11");
+ size += len*2+10;
+
+ // size
+ curpos = url_ftell(pb);
+ url_fseek(pb, pos, SEEK_SET);
+ put_be32(pb, size);
+ url_fseek(pb, pos+24, SEEK_SET);
+ put_be32(pb, size-24);
+ url_fseek(pb, curpos, SEEK_SET);
+ }
+
+ return size;
+not_utf8:
+ av_log(s, AV_LOG_ERROR, "not utf8\n");
+ return -1;
+}
+
+static int mov_write_moov_tag(ByteIOContext *pb, MOVContext *mov,
+ AVFormatContext *s)
+{
+ int i;
+ offset_t pos = url_ftell(pb);
+ put_be32(pb, 0); /* size placeholder*/
+ put_tag(pb, "moov");
+ mov->timescale = globalTimescale;
+
+ for (i=0; i<mov->nb_streams; i++) {
+ if(mov->tracks[i].entry <= 0) continue;
+
+ mov->tracks[i].time = mov->time;
+ mov->tracks[i].trackID = i+1;
+ }
+
+ mov_write_mvhd_tag(pb, mov);
+ //mov_write_iods_tag(pb, mov);
+ for (i=0; i<mov->nb_streams; i++) {
+ if(mov->tracks[i].entry > 0) {
+ mov_write_trak_tag(pb, &(mov->tracks[i]));
+ }
+ }
+
+ if (mov->mode == MODE_PSP)
+ mov_write_uuidusmt_tag(pb, s);
+ else
+ mov_write_udta_tag(pb, mov, s);
+
+ return updateSize(pb, pos);
+}
+
+static int mov_write_mdat_tag(ByteIOContext *pb, MOVContext* mov)
+{
+ put_be32(pb, 8); // placeholder for extended size field (64 bit)
+ put_tag(pb, mov->mode == MODE_MOV ? "wide" : "free");
+
+ mov->mdat_pos = url_ftell(pb);
+ put_be32(pb, 0); /* size placeholder*/
+ put_tag(pb, "mdat");
+ return 0;
+}
+
+/* TODO: This needs to be more general */
+static void mov_write_ftyp_tag (ByteIOContext *pb, AVFormatContext *s)
+{
+ MOVContext *mov = s->priv_data;
+
+ put_be32(pb, 0x14 ); /* size */
+ put_tag(pb, "ftyp");
+
+ if ( mov->mode == MODE_3GP )
+ put_tag(pb, "3gp4");
+ else if ( mov->mode == MODE_3G2 )
+ put_tag(pb, "3g2a");
+ else if ( mov->mode == MODE_PSP )
+ put_tag(pb, "MSNV");
+ else if ( mov->mode == MODE_MP4 )
+ put_tag(pb, "isom");
+ else
+ put_tag(pb, "qt ");
+
+ put_be32(pb, 0x200 );
+
+ if ( mov->mode == MODE_3GP )
+ put_tag(pb, "3gp4");
+ else if ( mov->mode == MODE_3G2 )
+ put_tag(pb, "3g2a");
+ else if ( mov->mode == MODE_PSP )
+ put_tag(pb, "MSNV");
+ else if ( mov->mode == MODE_MP4 )
+ put_tag(pb, "mp41");
+ else
+ put_tag(pb, "qt ");
+}
+
+static void mov_write_uuidprof_tag(ByteIOContext *pb, AVFormatContext *s)
+{
+ AVCodecContext *VideoCodec = s->streams[0]->codec;
+ AVCodecContext *AudioCodec = s->streams[1]->codec;
+ int AudioRate = AudioCodec->sample_rate;
+ int FrameRate = ((VideoCodec->time_base.den) * (0x10000))/ (VideoCodec->time_base.num);
+ int audio_kbitrate= AudioCodec->bit_rate / 1000;
+ int video_kbitrate= FFMIN(VideoCodec->bit_rate / 1000, 800 - audio_kbitrate);
+
+ put_be32(pb, 0x94 ); /* size */
+ put_tag(pb, "uuid");
+ put_tag(pb, "PROF");
+
+ put_be32(pb, 0x21d24fce ); /* 96 bit UUID */
+ put_be32(pb, 0xbb88695c );
+ put_be32(pb, 0xfac9c740 );
+
+ put_be32(pb, 0x0 ); /* ? */
+ put_be32(pb, 0x3 ); /* 3 sections ? */
+
+ put_be32(pb, 0x14 ); /* size */
+ put_tag(pb, "FPRF");
+ put_be32(pb, 0x0 ); /* ? */
+ put_be32(pb, 0x0 ); /* ? */
+ put_be32(pb, 0x0 ); /* ? */
+
+ put_be32(pb, 0x2c ); /* size */
+ put_tag(pb, "APRF"); /* audio */
+ put_be32(pb, 0x0 );
+ put_be32(pb, 0x2 ); /* TrackID */
+ put_tag(pb, "mp4a");
+ put_be32(pb, 0x20f );
+ put_be32(pb, 0x0 );
+ put_be32(pb, audio_kbitrate);
+ put_be32(pb, audio_kbitrate);
+ put_be32(pb, AudioRate );
+ put_be32(pb, AudioCodec->channels );
+
+ put_be32(pb, 0x34 ); /* size */
+ put_tag(pb, "VPRF"); /* video */
+ put_be32(pb, 0x0 );
+ put_be32(pb, 0x1 ); /* TrackID */
+ if (VideoCodec->codec_id == CODEC_ID_H264) {
+ put_tag(pb, "avc1");
+ put_be16(pb, 0x014D );
+ put_be16(pb, 0x0015 );
+ } else {
+ put_tag(pb, "mp4v");
+ put_be16(pb, 0x0000 );
+ put_be16(pb, 0x0103 );
+ }
+ put_be32(pb, 0x0 );
+ put_be32(pb, video_kbitrate);
+ put_be32(pb, video_kbitrate);
+ put_be32(pb, FrameRate);
+ put_be32(pb, FrameRate);
+ put_be16(pb, VideoCodec->width);
+ put_be16(pb, VideoCodec->height);
+ put_be32(pb, 0x010001); /* ? */
+}
+
+static int mov_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ MOVContext *mov = s->priv_data;
+ int i;
+
+ /* Default mode == MP4 */
+ mov->mode = MODE_MP4;
+
+ if (s->oformat != NULL) {
+ if (!strcmp("3gp", s->oformat->name)) mov->mode = MODE_3GP;
+ else if (!strcmp("3g2", s->oformat->name)) mov->mode = MODE_3G2;
+ else if (!strcmp("mov", s->oformat->name)) mov->mode = MODE_MOV;
+ else if (!strcmp("psp", s->oformat->name)) mov->mode = MODE_PSP;
+
+ mov_write_ftyp_tag(pb,s);
+ if ( mov->mode == MODE_PSP ) {
+ if ( s->nb_streams != 2 ) {
+ av_log(s, AV_LOG_ERROR, "PSP mode need one video and one audio stream\n");
+ return -1;
+ }
+ mov_write_uuidprof_tag(pb,s);
+ }
+ }
+
+ for(i=0; i<s->nb_streams; i++){
+ AVStream *st= s->streams[i];
+ MOVTrack *track= &mov->tracks[i];
+
+ track->enc = st->codec;
+ track->language = ff_mov_iso639_to_lang(st->language, mov->mode != MODE_MOV);
+ track->mode = mov->mode;
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO){
+ track->tag = mov_find_video_codec_tag(s, track);
+ track->timescale = st->codec->time_base.den;
+ track->sampleDuration = st->codec->time_base.num;
+ av_set_pts_info(st, 64, 1, st->codec->time_base.den);
+ }else if(st->codec->codec_type == CODEC_TYPE_AUDIO){
+ track->tag = mov_find_audio_codec_tag(s, track);
+ track->timescale = st->codec->sample_rate;
+ track->sampleDuration = st->codec->frame_size;
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+ switch(track->enc->codec_id){
+ case CODEC_ID_MP3:
+ case CODEC_ID_AAC:
+ case CODEC_ID_AMR_NB:
+ case CODEC_ID_AMR_WB:
+ track->audio_vbr = 1;
+ break;
+ default:
+ track->sampleSize = (av_get_bits_per_sample(st->codec->codec_id) >> 3) * st->codec->channels;
+ }
+ }
+ if (!track->sampleDuration) {
+ av_log(s, AV_LOG_ERROR, "track %d: sample duration is not set\n", i);
+ return -1;
+ }
+ }
+
+ mov_write_mdat_tag(pb, mov);
+ mov->time = s->timestamp + 0x7C25B080; //1970 based -> 1904 based
+ mov->nb_streams = s->nb_streams;
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ MOVContext *mov = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ MOVTrack *trk = &mov->tracks[pkt->stream_index];
+ AVCodecContext *enc = trk->enc;
+ unsigned int samplesInChunk = 0;
+ int size= pkt->size;
+
+ if (url_is_streamed(&s->pb)) return 0; /* Can't handle that */
+ if (!size) return 0; /* Discard 0 sized packets */
+
+ if (enc->codec_id == CODEC_ID_AMR_NB) {
+ /* We must find out how many AMR blocks there are in one packet */
+ static uint16_t packed_size[16] =
+ {13, 14, 16, 18, 20, 21, 27, 32, 6, 0, 0, 0, 0, 0, 0, 0};
+ int len = 0;
+
+ while (len < size && samplesInChunk < 100) {
+ len += packed_size[(pkt->data[len] >> 3) & 0x0F];
+ samplesInChunk++;
+ }
+ if(samplesInChunk > 1){
+ av_log(s, AV_LOG_ERROR, "fatal error, input is not a single packet, inplement a AVParser for it\n");
+ return -1;
+ }
+ } else if (trk->sampleSize)
+ samplesInChunk = size/trk->sampleSize;
+ else
+ samplesInChunk = 1;
+
+ /* copy extradata if it exists */
+ if (trk->vosLen == 0 && enc->extradata_size > 0) {
+ trk->vosLen = enc->extradata_size;
+ trk->vosData = av_malloc(trk->vosLen);
+ memcpy(trk->vosData, enc->extradata, trk->vosLen);
+ }
+
+ if (enc->codec_id == CODEC_ID_H264 && trk->vosLen > 0 && *(uint8_t *)trk->vosData != 1) {
+ /* from x264 or from bytestream h264 */
+ /* nal reformating needed */
+ avc_parse_nal_units(&pkt->data, &pkt->size);
+ assert(pkt->size);
+ size = pkt->size;
+ }
+
+ if (!(trk->entry % MOV_INDEX_CLUSTER_SIZE)) {
+ trk->cluster = av_realloc(trk->cluster, (trk->entry + MOV_INDEX_CLUSTER_SIZE) * sizeof(*trk->cluster));
+ if (!trk->cluster)
+ return -1;
+ }
+
+ trk->cluster[trk->entry].pos = url_ftell(pb);
+ trk->cluster[trk->entry].samplesInChunk = samplesInChunk;
+ trk->cluster[trk->entry].size = size;
+ trk->cluster[trk->entry].entries = samplesInChunk;
+ trk->cluster[trk->entry].dts = pkt->dts;
+ trk->trackDuration = pkt->dts - trk->cluster[0].dts + pkt->duration;
+
+ if(enc->codec_type == CODEC_TYPE_VIDEO) {
+ if (pkt->dts != pkt->pts)
+ trk->hasBframes = 1;
+ trk->cluster[trk->entry].cts = pkt->pts - pkt->dts;
+ trk->cluster[trk->entry].key_frame = !!(pkt->flags & PKT_FLAG_KEY);
+ if(trk->cluster[trk->entry].key_frame)
+ trk->hasKeyframes++;
+ }
+ trk->entry++;
+ trk->sampleCount += samplesInChunk;
+ mov->mdat_size += size;
+
+ put_buffer(pb, pkt->data, size);
+
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int mov_write_trailer(AVFormatContext *s)
+{
+ MOVContext *mov = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int res = 0;
+ int i;
+
+ offset_t moov_pos = url_ftell(pb);
+
+ /* Write size of mdat tag */
+ if (mov->mdat_size+8 <= UINT32_MAX) {
+ url_fseek(pb, mov->mdat_pos, SEEK_SET);
+ put_be32(pb, mov->mdat_size+8);
+ } else {
+ /* overwrite 'wide' placeholder atom */
+ url_fseek(pb, mov->mdat_pos - 8, SEEK_SET);
+ put_be32(pb, 1); /* special value: real atom size will be 64 bit value after tag field */
+ put_tag(pb, "mdat");
+ put_be64(pb, mov->mdat_size+16);
+ }
+ url_fseek(pb, moov_pos, SEEK_SET);
+
+ mov_write_moov_tag(pb, mov, s);
+
+ for (i=0; i<mov->nb_streams; i++) {
+ av_freep(&mov->tracks[i].cluster);
+
+ if( mov->tracks[i].vosLen ) av_free( mov->tracks[i].vosData );
+
+ }
+
+ put_flush_packet(pb);
+
+ return res;
+}
+
+#ifdef CONFIG_MOV_MUXER
+AVOutputFormat mov_muxer = {
+ "mov",
+ "mov format",
+ NULL,
+ "mov",
+ sizeof(MOVContext),
+ CODEC_ID_AAC,
+ CODEC_ID_MPEG4,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+#ifdef CONFIG_TGP_MUXER
+AVOutputFormat tgp_muxer = {
+ "3gp",
+ "3gp format",
+ NULL,
+ "3gp",
+ sizeof(MOVContext),
+ CODEC_ID_AMR_NB,
+ CODEC_ID_H263,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+#ifdef CONFIG_MP4_MUXER
+AVOutputFormat mp4_muxer = {
+ "mp4",
+ "mp4 format",
+ "application/mp4",
+ "mp4,m4a",
+ sizeof(MOVContext),
+ CODEC_ID_AAC,
+ CODEC_ID_MPEG4,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+#ifdef CONFIG_PSP_MUXER
+AVOutputFormat psp_muxer = {
+ "psp",
+ "psp mp4 format",
+ NULL,
+ "mp4,psp",
+ sizeof(MOVContext),
+ CODEC_ID_AAC,
+ CODEC_ID_MPEG4,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
+#ifdef CONFIG_TG2_MUXER
+AVOutputFormat tg2_muxer = {
+ "3g2",
+ "3gp2 format",
+ NULL,
+ "3g2",
+ sizeof(MOVContext),
+ CODEC_ID_AMR_NB,
+ CODEC_ID_H263,
+ mov_write_header,
+ mov_write_packet,
+ mov_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/mp3.c b/contrib/ffmpeg/libavformat/mp3.c
new file mode 100644
index 000000000..723980c83
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mp3.c
@@ -0,0 +1,430 @@
+/*
+ * MP3 muxer and demuxer
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "mpegaudio.h"
+
+#define ID3_HEADER_SIZE 10
+#define ID3_TAG_SIZE 128
+
+#define ID3_GENRE_MAX 125
+
+static const char *id3_genre_str[ID3_GENRE_MAX + 1] = {
+ [0] = "Blues",
+ [1] = "Classic Rock",
+ [2] = "Country",
+ [3] = "Dance",
+ [4] = "Disco",
+ [5] = "Funk",
+ [6] = "Grunge",
+ [7] = "Hip-Hop",
+ [8] = "Jazz",
+ [9] = "Metal",
+ [10] = "New Age",
+ [11] = "Oldies",
+ [12] = "Other",
+ [13] = "Pop",
+ [14] = "R&B",
+ [15] = "Rap",
+ [16] = "Reggae",
+ [17] = "Rock",
+ [18] = "Techno",
+ [19] = "Industrial",
+ [20] = "Alternative",
+ [21] = "Ska",
+ [22] = "Death Metal",
+ [23] = "Pranks",
+ [24] = "Soundtrack",
+ [25] = "Euro-Techno",
+ [26] = "Ambient",
+ [27] = "Trip-Hop",
+ [28] = "Vocal",
+ [29] = "Jazz+Funk",
+ [30] = "Fusion",
+ [31] = "Trance",
+ [32] = "Classical",
+ [33] = "Instrumental",
+ [34] = "Acid",
+ [35] = "House",
+ [36] = "Game",
+ [37] = "Sound Clip",
+ [38] = "Gospel",
+ [39] = "Noise",
+ [40] = "AlternRock",
+ [41] = "Bass",
+ [42] = "Soul",
+ [43] = "Punk",
+ [44] = "Space",
+ [45] = "Meditative",
+ [46] = "Instrumental Pop",
+ [47] = "Instrumental Rock",
+ [48] = "Ethnic",
+ [49] = "Gothic",
+ [50] = "Darkwave",
+ [51] = "Techno-Industrial",
+ [52] = "Electronic",
+ [53] = "Pop-Folk",
+ [54] = "Eurodance",
+ [55] = "Dream",
+ [56] = "Southern Rock",
+ [57] = "Comedy",
+ [58] = "Cult",
+ [59] = "Gangsta",
+ [60] = "Top 40",
+ [61] = "Christian Rap",
+ [62] = "Pop/Funk",
+ [63] = "Jungle",
+ [64] = "Native American",
+ [65] = "Cabaret",
+ [66] = "New Wave",
+ [67] = "Psychadelic",
+ [68] = "Rave",
+ [69] = "Showtunes",
+ [70] = "Trailer",
+ [71] = "Lo-Fi",
+ [72] = "Tribal",
+ [73] = "Acid Punk",
+ [74] = "Acid Jazz",
+ [75] = "Polka",
+ [76] = "Retro",
+ [77] = "Musical",
+ [78] = "Rock & Roll",
+ [79] = "Hard Rock",
+ [80] = "Folk",
+ [81] = "Folk-Rock",
+ [82] = "National Folk",
+ [83] = "Swing",
+ [84] = "Fast Fusion",
+ [85] = "Bebob",
+ [86] = "Latin",
+ [87] = "Revival",
+ [88] = "Celtic",
+ [89] = "Bluegrass",
+ [90] = "Avantgarde",
+ [91] = "Gothic Rock",
+ [92] = "Progressive Rock",
+ [93] = "Psychedelic Rock",
+ [94] = "Symphonic Rock",
+ [95] = "Slow Rock",
+ [96] = "Big Band",
+ [97] = "Chorus",
+ [98] = "Easy Listening",
+ [99] = "Acoustic",
+ [100] = "Humour",
+ [101] = "Speech",
+ [102] = "Chanson",
+ [103] = "Opera",
+ [104] = "Chamber Music",
+ [105] = "Sonata",
+ [106] = "Symphony",
+ [107] = "Booty Bass",
+ [108] = "Primus",
+ [109] = "Porn Groove",
+ [110] = "Satire",
+ [111] = "Slow Jam",
+ [112] = "Club",
+ [113] = "Tango",
+ [114] = "Samba",
+ [115] = "Folklore",
+ [116] = "Ballad",
+ [117] = "Power Ballad",
+ [118] = "Rhythmic Soul",
+ [119] = "Freestyle",
+ [120] = "Duet",
+ [121] = "Punk Rock",
+ [122] = "Drum Solo",
+ [123] = "A capella",
+ [124] = "Euro-House",
+ [125] = "Dance Hall",
+};
+
+/* buf must be ID3_HEADER_SIZE byte long */
+static int id3_match(const uint8_t *buf)
+{
+ return (buf[0] == 'I' &&
+ buf[1] == 'D' &&
+ buf[2] == '3' &&
+ buf[3] != 0xff &&
+ buf[4] != 0xff &&
+ (buf[6] & 0x80) == 0 &&
+ (buf[7] & 0x80) == 0 &&
+ (buf[8] & 0x80) == 0 &&
+ (buf[9] & 0x80) == 0);
+}
+
+static void id3_get_string(char *str, int str_size,
+ const uint8_t *buf, int buf_size)
+{
+ int i, c;
+ char *q;
+
+ q = str;
+ for(i = 0; i < buf_size; i++) {
+ c = buf[i];
+ if (c == '\0')
+ break;
+ if ((q - str) >= str_size - 1)
+ break;
+ *q++ = c;
+ }
+ *q = '\0';
+}
+
+/* 'buf' must be ID3_TAG_SIZE byte long */
+static int id3_parse_tag(AVFormatContext *s, const uint8_t *buf)
+{
+ char str[5];
+ int genre;
+
+ if (!(buf[0] == 'T' &&
+ buf[1] == 'A' &&
+ buf[2] == 'G'))
+ return -1;
+ id3_get_string(s->title, sizeof(s->title), buf + 3, 30);
+ id3_get_string(s->author, sizeof(s->author), buf + 33, 30);
+ id3_get_string(s->album, sizeof(s->album), buf + 63, 30);
+ id3_get_string(str, sizeof(str), buf + 93, 4);
+ s->year = atoi(str);
+ id3_get_string(s->comment, sizeof(s->comment), buf + 97, 30);
+ if (buf[125] == 0 && buf[126] != 0)
+ s->track = buf[126];
+ genre = buf[127];
+ if (genre <= ID3_GENRE_MAX)
+ pstrcpy(s->genre, sizeof(s->genre), id3_genre_str[genre]);
+ return 0;
+}
+
+static void id3_create_tag(AVFormatContext *s, uint8_t *buf)
+{
+ int v, i;
+
+ memset(buf, 0, ID3_TAG_SIZE); /* fail safe */
+ buf[0] = 'T';
+ buf[1] = 'A';
+ buf[2] = 'G';
+ strncpy(buf + 3, s->title, 30);
+ strncpy(buf + 33, s->author, 30);
+ strncpy(buf + 63, s->album, 30);
+ v = s->year;
+ if (v > 0) {
+ for(i = 0;i < 4; i++) {
+ buf[96 - i] = '0' + (v % 10);
+ v = v / 10;
+ }
+ }
+ strncpy(buf + 97, s->comment, 30);
+ if (s->track != 0) {
+ buf[125] = 0;
+ buf[126] = s->track;
+ }
+ for(i = 0; i <= ID3_GENRE_MAX; i++) {
+ if (!strcasecmp(s->genre, id3_genre_str[i])) {
+ buf[127] = i;
+ break;
+ }
+ }
+}
+
+/* mp3 read */
+
+static int mp3_read_probe(AVProbeData *p)
+{
+ int max_frames, first_frames;
+ int fsize, frames, sample_rate;
+ uint32_t header;
+ uint8_t *buf, *buf2, *end;
+ AVCodecContext avctx;
+
+ if(p->buf_size < ID3_HEADER_SIZE)
+ return 0;
+
+ if(id3_match(p->buf))
+ return AVPROBE_SCORE_MAX/2+1; // this must be less then mpeg-ps because some retards put id3 tage before mpeg-ps files
+
+ max_frames = 0;
+ buf = p->buf;
+ end = buf + FFMIN(4096, p->buf_size - sizeof(uint32_t));
+
+ for(; buf < end; buf++) {
+ buf2 = buf;
+
+ for(frames = 0; buf2 < end; frames++) {
+ header = (buf2[0] << 24) | (buf2[1] << 16) | (buf2[2] << 8) | buf2[3];
+ fsize = mpa_decode_header(&avctx, header, &sample_rate);
+ if(fsize < 0)
+ break;
+ buf2 += fsize;
+ }
+ max_frames = FFMAX(max_frames, frames);
+ if(buf == p->buf)
+ first_frames= frames;
+ }
+ if (first_frames>=3) return AVPROBE_SCORE_MAX/2+1;
+ else if(max_frames>=3) return AVPROBE_SCORE_MAX/4;
+ else if(max_frames>=1) return 1;
+ else return 0;
+}
+
+static int mp3_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+ uint8_t buf[ID3_TAG_SIZE];
+ int len, ret, filesize;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_MP3;
+ st->need_parsing = 1;
+
+ /* try to get the TAG */
+ if (!url_is_streamed(&s->pb)) {
+ /* XXX: change that */
+ filesize = url_fsize(&s->pb);
+ if (filesize > 128) {
+ url_fseek(&s->pb, filesize - 128, SEEK_SET);
+ ret = get_buffer(&s->pb, buf, ID3_TAG_SIZE);
+ if (ret == ID3_TAG_SIZE) {
+ id3_parse_tag(s, buf);
+ }
+ url_fseek(&s->pb, 0, SEEK_SET);
+ }
+ }
+
+ /* if ID3 header found, skip it */
+ ret = get_buffer(&s->pb, buf, ID3_HEADER_SIZE);
+ if (ret != ID3_HEADER_SIZE)
+ return -1;
+ if (id3_match(buf)) {
+ /* skip ID3 header */
+ len = ((buf[6] & 0x7f) << 21) |
+ ((buf[7] & 0x7f) << 14) |
+ ((buf[8] & 0x7f) << 7) |
+ (buf[9] & 0x7f);
+ url_fskip(&s->pb, len);
+ } else {
+ url_fseek(&s->pb, 0, SEEK_SET);
+ }
+
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+#define MP3_PACKET_SIZE 1024
+
+static int mp3_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, size;
+ // AVStream *st = s->streams[0];
+
+ size= MP3_PACKET_SIZE;
+
+ ret= av_get_packet(&s->pb, pkt, size);
+
+ pkt->stream_index = 0;
+ if (ret <= 0) {
+ return AVERROR_IO;
+ }
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return ret;
+}
+
+static int mp3_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+#ifdef CONFIG_MUXERS
+/* simple formats */
+static int mp3_write_header(struct AVFormatContext *s)
+{
+ return 0;
+}
+
+static int mp3_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ put_buffer(&s->pb, pkt->data, pkt->size);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int mp3_write_trailer(struct AVFormatContext *s)
+{
+ uint8_t buf[ID3_TAG_SIZE];
+
+ /* write the id3 header */
+ if (s->title[0] != '\0') {
+ id3_create_tag(s, buf);
+ put_buffer(&s->pb, buf, ID3_TAG_SIZE);
+ put_flush_packet(&s->pb);
+ }
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+#ifdef CONFIG_MP3_DEMUXER
+AVInputFormat mp3_demuxer = {
+ "mp3",
+ "MPEG audio",
+ 0,
+ mp3_read_probe,
+ mp3_read_header,
+ mp3_read_packet,
+ mp3_read_close,
+ .extensions = "mp2,mp3,m2a", /* XXX: use probe */
+};
+#endif
+#ifdef CONFIG_MP2_MUXER
+AVOutputFormat mp2_muxer = {
+ "mp2",
+ "MPEG audio layer 2",
+ "audio/x-mpeg",
+#ifdef CONFIG_MP3LAME
+ "mp2,m2a",
+#else
+ "mp2,mp3,m2a",
+#endif
+ 0,
+ CODEC_ID_MP2,
+ 0,
+ mp3_write_header,
+ mp3_write_packet,
+ mp3_write_trailer,
+};
+#endif
+#ifdef CONFIG_MP3_MUXER
+AVOutputFormat mp3_muxer = {
+ "mp3",
+ "MPEG audio layer 3",
+ "audio/x-mpeg",
+ "mp3",
+ 0,
+ CODEC_ID_MP3,
+ 0,
+ mp3_write_header,
+ mp3_write_packet,
+ mp3_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/mpeg.c b/contrib/ffmpeg/libavformat/mpeg.c
new file mode 100644
index 000000000..709ce16f1
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpeg.c
@@ -0,0 +1,1824 @@
+/*
+ * MPEG1/2 muxer and demuxer
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "bitstream.h"
+#include "fifo.h"
+
+#define MAX_PAYLOAD_SIZE 4096
+//#define DEBUG_SEEK
+
+#undef NDEBUG
+#include <assert.h>
+
+typedef struct PacketDesc {
+ int64_t pts;
+ int64_t dts;
+ int size;
+ int unwritten_size;
+ int flags;
+ struct PacketDesc *next;
+} PacketDesc;
+
+typedef struct {
+ AVFifoBuffer fifo;
+ uint8_t id;
+ int max_buffer_size; /* in bytes */
+ int buffer_index;
+ PacketDesc *predecode_packet;
+ PacketDesc *premux_packet;
+ PacketDesc **next_packet;
+ int packet_number;
+ uint8_t lpcm_header[3];
+ int lpcm_align;
+ int bytes_to_iframe;
+ int align_iframe;
+ int64_t vobu_start_pts;
+} StreamInfo;
+
+typedef struct {
+ int packet_size; /* required packet size */
+ int packet_number;
+ int pack_header_freq; /* frequency (in packets^-1) at which we send pack headers */
+ int system_header_freq;
+ int system_header_size;
+ int mux_rate; /* bitrate in units of 50 bytes/s */
+ /* stream info */
+ int audio_bound;
+ int video_bound;
+ int is_mpeg2;
+ int is_vcd;
+ int is_svcd;
+ int is_dvd;
+ int64_t last_scr; /* current system clock */
+
+ double vcd_padding_bitrate; //FIXME floats
+ int64_t vcd_padding_bytes_written;
+
+} MpegMuxContext;
+
+#define PACK_START_CODE ((unsigned int)0x000001ba)
+#define SYSTEM_HEADER_START_CODE ((unsigned int)0x000001bb)
+#define SEQUENCE_END_CODE ((unsigned int)0x000001b7)
+#define PACKET_START_CODE_MASK ((unsigned int)0xffffff00)
+#define PACKET_START_CODE_PREFIX ((unsigned int)0x00000100)
+#define ISO_11172_END_CODE ((unsigned int)0x000001b9)
+
+/* mpeg2 */
+#define PROGRAM_STREAM_MAP 0x1bc
+#define PRIVATE_STREAM_1 0x1bd
+#define PADDING_STREAM 0x1be
+#define PRIVATE_STREAM_2 0x1bf
+
+
+#define AUDIO_ID 0xc0
+#define VIDEO_ID 0xe0
+#define AC3_ID 0x80
+#define DTS_ID 0x8a
+#define LPCM_ID 0xa0
+#define SUB_ID 0x20
+
+#define STREAM_TYPE_VIDEO_MPEG1 0x01
+#define STREAM_TYPE_VIDEO_MPEG2 0x02
+#define STREAM_TYPE_AUDIO_MPEG1 0x03
+#define STREAM_TYPE_AUDIO_MPEG2 0x04
+#define STREAM_TYPE_PRIVATE_SECTION 0x05
+#define STREAM_TYPE_PRIVATE_DATA 0x06
+#define STREAM_TYPE_AUDIO_AAC 0x0f
+#define STREAM_TYPE_VIDEO_MPEG4 0x10
+#define STREAM_TYPE_VIDEO_H264 0x1b
+
+#define STREAM_TYPE_AUDIO_AC3 0x81
+#define STREAM_TYPE_AUDIO_DTS 0x8a
+
+static const int lpcm_freq_tab[4] = { 48000, 96000, 44100, 32000 };
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat mpeg1system_muxer;
+AVOutputFormat mpeg1vcd_muxer;
+AVOutputFormat mpeg2vob_muxer;
+AVOutputFormat mpeg2svcd_muxer;
+AVOutputFormat mpeg2dvd_muxer;
+
+static int put_pack_header(AVFormatContext *ctx,
+ uint8_t *buf, int64_t timestamp)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ PutBitContext pb;
+
+ init_put_bits(&pb, buf, 128);
+
+ put_bits(&pb, 32, PACK_START_CODE);
+ if (s->is_mpeg2) {
+ put_bits(&pb, 2, 0x1);
+ } else {
+ put_bits(&pb, 4, 0x2);
+ }
+ put_bits(&pb, 3, (uint32_t)((timestamp >> 30) & 0x07));
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 15, (uint32_t)((timestamp >> 15) & 0x7fff));
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 15, (uint32_t)((timestamp) & 0x7fff));
+ put_bits(&pb, 1, 1);
+ if (s->is_mpeg2) {
+ /* clock extension */
+ put_bits(&pb, 9, 0);
+ }
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 22, s->mux_rate);
+ put_bits(&pb, 1, 1);
+ if (s->is_mpeg2) {
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 5, 0x1f); /* reserved */
+ put_bits(&pb, 3, 0); /* stuffing length */
+ }
+ flush_put_bits(&pb);
+ return pbBufPtr(&pb) - pb.buf;
+}
+
+static int put_system_header(AVFormatContext *ctx, uint8_t *buf,int only_for_stream_id)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int size, i, private_stream_coded, id;
+ PutBitContext pb;
+
+ init_put_bits(&pb, buf, 128);
+
+ put_bits(&pb, 32, SYSTEM_HEADER_START_CODE);
+ put_bits(&pb, 16, 0);
+ put_bits(&pb, 1, 1);
+
+ put_bits(&pb, 22, s->mux_rate); /* maximum bit rate of the multiplexed stream */
+ put_bits(&pb, 1, 1); /* marker */
+ if (s->is_vcd && only_for_stream_id==VIDEO_ID) {
+ /* This header applies only to the video stream (see VCD standard p. IV-7)*/
+ put_bits(&pb, 6, 0);
+ } else
+ put_bits(&pb, 6, s->audio_bound);
+
+ if (s->is_vcd) {
+ /* see VCD standard, p. IV-7*/
+ put_bits(&pb, 1, 0);
+ put_bits(&pb, 1, 1);
+ } else {
+ put_bits(&pb, 1, 0); /* variable bitrate*/
+ put_bits(&pb, 1, 0); /* non constrainted bit stream */
+ }
+
+ if (s->is_vcd || s->is_dvd) {
+ /* see VCD standard p IV-7 */
+ put_bits(&pb, 1, 1); /* audio locked */
+ put_bits(&pb, 1, 1); /* video locked */
+ } else {
+ put_bits(&pb, 1, 0); /* audio locked */
+ put_bits(&pb, 1, 0); /* video locked */
+ }
+
+ put_bits(&pb, 1, 1); /* marker */
+
+ if (s->is_vcd && only_for_stream_id==AUDIO_ID) {
+ /* This header applies only to the audio stream (see VCD standard p. IV-7)*/
+ put_bits(&pb, 5, 0);
+ } else
+ put_bits(&pb, 5, s->video_bound);
+
+ if (s->is_dvd) {
+ put_bits(&pb, 1, 0); /* packet_rate_restriction_flag */
+ put_bits(&pb, 7, 0x7f); /* reserved byte */
+ } else
+ put_bits(&pb, 8, 0xff); /* reserved byte */
+
+ /* DVD-Video Stream_bound entries
+ id (0xB9) video, maximum P-STD for stream 0xE0. (P-STD_buffer_bound_scale = 1)
+ id (0xB8) audio, maximum P-STD for any MPEG audio (0xC0 to 0xC7) streams. If there are none set to 4096 (32x128). (P-STD_buffer_bound_scale = 0)
+ id (0xBD) private stream 1 (audio other than MPEG and subpictures). (P-STD_buffer_bound_scale = 1)
+ id (0xBF) private stream 2, NAV packs, set to 2x1024. */
+ if (s->is_dvd) {
+
+ int P_STD_max_video = 0;
+ int P_STD_max_mpeg_audio = 0;
+ int P_STD_max_mpeg_PS1 = 0;
+
+ for(i=0;i<ctx->nb_streams;i++) {
+ StreamInfo *stream = ctx->streams[i]->priv_data;
+
+ id = stream->id;
+ if (id == 0xbd && stream->max_buffer_size > P_STD_max_mpeg_PS1) {
+ P_STD_max_mpeg_PS1 = stream->max_buffer_size;
+ } else if (id >= 0xc0 && id <= 0xc7 && stream->max_buffer_size > P_STD_max_mpeg_audio) {
+ P_STD_max_mpeg_audio = stream->max_buffer_size;
+ } else if (id == 0xe0 && stream->max_buffer_size > P_STD_max_video) {
+ P_STD_max_video = stream->max_buffer_size;
+ }
+ }
+
+ /* video */
+ put_bits(&pb, 8, 0xb9); /* stream ID */
+ put_bits(&pb, 2, 3);
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 13, P_STD_max_video / 1024);
+
+ /* audio */
+ if (P_STD_max_mpeg_audio == 0)
+ P_STD_max_mpeg_audio = 4096;
+ put_bits(&pb, 8, 0xb8); /* stream ID */
+ put_bits(&pb, 2, 3);
+ put_bits(&pb, 1, 0);
+ put_bits(&pb, 13, P_STD_max_mpeg_audio / 128);
+
+ /* private stream 1 */
+ put_bits(&pb, 8, 0xbd); /* stream ID */
+ put_bits(&pb, 2, 3);
+ put_bits(&pb, 1, 0);
+ put_bits(&pb, 13, P_STD_max_mpeg_PS1 / 128);
+
+ /* private stream 2 */
+ put_bits(&pb, 8, 0xbf); /* stream ID */
+ put_bits(&pb, 2, 3);
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 13, 2);
+ }
+ else {
+ /* audio stream info */
+ private_stream_coded = 0;
+ for(i=0;i<ctx->nb_streams;i++) {
+ StreamInfo *stream = ctx->streams[i]->priv_data;
+
+
+ /* For VCDs, only include the stream info for the stream
+ that the pack which contains this system belongs to.
+ (see VCD standard p. IV-7) */
+ if ( !s->is_vcd || stream->id==only_for_stream_id
+ || only_for_stream_id==0) {
+
+ id = stream->id;
+ if (id < 0xc0) {
+ /* special case for private streams (AC3 use that) */
+ if (private_stream_coded)
+ continue;
+ private_stream_coded = 1;
+ id = 0xbd;
+ }
+ put_bits(&pb, 8, id); /* stream ID */
+ put_bits(&pb, 2, 3);
+ if (id < 0xe0) {
+ /* audio */
+ put_bits(&pb, 1, 0);
+ put_bits(&pb, 13, stream->max_buffer_size / 128);
+ } else {
+ /* video */
+ put_bits(&pb, 1, 1);
+ put_bits(&pb, 13, stream->max_buffer_size / 1024);
+ }
+ }
+ }
+ }
+
+ flush_put_bits(&pb);
+ size = pbBufPtr(&pb) - pb.buf;
+ /* patch packet size */
+ buf[4] = (size - 6) >> 8;
+ buf[5] = (size - 6) & 0xff;
+
+ return size;
+}
+
+static int get_system_header_size(AVFormatContext *ctx)
+{
+ int buf_index, i, private_stream_coded;
+ StreamInfo *stream;
+ MpegMuxContext *s = ctx->priv_data;
+
+ if (s->is_dvd)
+ return 18; // DVD-Video system headers are 18 bytes fixed length.
+
+ buf_index = 12;
+ private_stream_coded = 0;
+ for(i=0;i<ctx->nb_streams;i++) {
+ stream = ctx->streams[i]->priv_data;
+ if (stream->id < 0xc0) {
+ if (private_stream_coded)
+ continue;
+ private_stream_coded = 1;
+ }
+ buf_index += 3;
+ }
+ return buf_index;
+}
+
+static int mpeg_mux_init(AVFormatContext *ctx)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int bitrate, i, mpa_id, mpv_id, mps_id, ac3_id, dts_id, lpcm_id, j;
+ AVStream *st;
+ StreamInfo *stream;
+ int audio_bitrate;
+ int video_bitrate;
+
+ s->packet_number = 0;
+ s->is_vcd = (ctx->oformat == &mpeg1vcd_muxer);
+ s->is_svcd = (ctx->oformat == &mpeg2svcd_muxer);
+ s->is_mpeg2 = (ctx->oformat == &mpeg2vob_muxer || ctx->oformat == &mpeg2svcd_muxer || ctx->oformat == &mpeg2dvd_muxer);
+ s->is_dvd = (ctx->oformat == &mpeg2dvd_muxer);
+
+ if(ctx->packet_size)
+ s->packet_size = ctx->packet_size;
+ else
+ s->packet_size = 2048;
+
+ s->vcd_padding_bytes_written = 0;
+ s->vcd_padding_bitrate=0;
+
+ s->audio_bound = 0;
+ s->video_bound = 0;
+ mpa_id = AUDIO_ID;
+ ac3_id = AC3_ID;
+ dts_id = DTS_ID;
+ mpv_id = VIDEO_ID;
+ mps_id = SUB_ID;
+ lpcm_id = LPCM_ID;
+ for(i=0;i<ctx->nb_streams;i++) {
+ st = ctx->streams[i];
+ stream = av_mallocz(sizeof(StreamInfo));
+ if (!stream)
+ goto fail;
+ st->priv_data = stream;
+
+ av_set_pts_info(st, 64, 1, 90000);
+
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (st->codec->codec_id == CODEC_ID_AC3) {
+ stream->id = ac3_id++;
+ } else if (st->codec->codec_id == CODEC_ID_DTS) {
+ stream->id = dts_id++;
+ } else if (st->codec->codec_id == CODEC_ID_PCM_S16BE) {
+ stream->id = lpcm_id++;
+ for(j = 0; j < 4; j++) {
+ if (lpcm_freq_tab[j] == st->codec->sample_rate)
+ break;
+ }
+ if (j == 4)
+ goto fail;
+ if (st->codec->channels > 8)
+ return -1;
+ stream->lpcm_header[0] = 0x0c;
+ stream->lpcm_header[1] = (st->codec->channels - 1) | (j << 4);
+ stream->lpcm_header[2] = 0x80;
+ stream->lpcm_align = st->codec->channels * 2;
+ } else {
+ stream->id = mpa_id++;
+ }
+
+ /* This value HAS to be used for VCD (see VCD standard, p. IV-7).
+ Right now it is also used for everything else.*/
+ stream->max_buffer_size = 4 * 1024;
+ s->audio_bound++;
+ break;
+ case CODEC_TYPE_VIDEO:
+ stream->id = mpv_id++;
+ if (st->codec->rc_buffer_size)
+ stream->max_buffer_size = 6*1024 + st->codec->rc_buffer_size/8;
+ else
+ stream->max_buffer_size = 230*1024; //FIXME this is probably too small as default
+#if 0
+ /* see VCD standard, p. IV-7*/
+ stream->max_buffer_size = 46 * 1024;
+ else
+ /* This value HAS to be used for SVCD (see SVCD standard, p. 26 V.2.3.2).
+ Right now it is also used for everything else.*/
+ stream->max_buffer_size = 230 * 1024;
+#endif
+ s->video_bound++;
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ stream->id = mps_id++;
+ stream->max_buffer_size = 16 * 1024;
+ break;
+ default:
+ return -1;
+ }
+ av_fifo_init(&stream->fifo, 16);
+ }
+ bitrate = 0;
+ audio_bitrate = 0;
+ video_bitrate = 0;
+ for(i=0;i<ctx->nb_streams;i++) {
+ int codec_rate;
+ st = ctx->streams[i];
+ stream = (StreamInfo*) st->priv_data;
+
+ if(st->codec->rc_max_rate || stream->id==VIDEO_ID)
+ codec_rate= st->codec->rc_max_rate;
+ else
+ codec_rate= st->codec->bit_rate;
+
+ if(!codec_rate)
+ codec_rate= (1<<21)*8*50/ctx->nb_streams;
+
+ bitrate += codec_rate;
+
+ if (stream->id==AUDIO_ID)
+ audio_bitrate += codec_rate;
+ else if (stream->id==VIDEO_ID)
+ video_bitrate += codec_rate;
+ }
+
+ if(ctx->mux_rate){
+ s->mux_rate= (ctx->mux_rate + (8 * 50) - 1) / (8 * 50);
+ } else {
+ /* we increase slightly the bitrate to take into account the
+ headers. XXX: compute it exactly */
+ bitrate += bitrate*5/100;
+ bitrate += 10000;
+ s->mux_rate = (bitrate + (8 * 50) - 1) / (8 * 50);
+ }
+
+ if (s->is_vcd) {
+ double overhead_rate;
+
+ /* The VCD standard mandates that the mux_rate field is 3528
+ (see standard p. IV-6).
+ The value is actually "wrong", i.e. if you calculate
+ it using the normal formula and the 75 sectors per second transfer
+ rate you get a different value because the real pack size is 2324,
+ not 2352. But the standard explicitly specifies that the mux_rate
+ field in the header must have this value.*/
+// s->mux_rate=2352 * 75 / 50; /* = 3528*/
+
+ /* The VCD standard states that the muxed stream must be
+ exactly 75 packs / second (the data rate of a single speed cdrom).
+ Since the video bitrate (probably 1150000 bits/sec) will be below
+ the theoretical maximum we have to add some padding packets
+ to make up for the lower data rate.
+ (cf. VCD standard p. IV-6 )*/
+
+ /* Add the header overhead to the data rate.
+ 2279 data bytes per audio pack, 2294 data bytes per video pack*/
+ overhead_rate = ((audio_bitrate / 8.0) / 2279) * (2324 - 2279);
+ overhead_rate += ((video_bitrate / 8.0) / 2294) * (2324 - 2294);
+ overhead_rate *= 8;
+
+ /* Add padding so that the full bitrate is 2324*75 bytes/sec */
+ s->vcd_padding_bitrate = 2324 * 75 * 8 - (bitrate + overhead_rate);
+ }
+
+ if (s->is_vcd || s->is_mpeg2)
+ /* every packet */
+ s->pack_header_freq = 1;
+ else
+ /* every 2 seconds */
+ s->pack_header_freq = 2 * bitrate / s->packet_size / 8;
+
+ /* the above seems to make pack_header_freq zero sometimes */
+ if (s->pack_header_freq == 0)
+ s->pack_header_freq = 1;
+
+ if (s->is_mpeg2)
+ /* every 200 packets. Need to look at the spec. */
+ s->system_header_freq = s->pack_header_freq * 40;
+ else if (s->is_vcd)
+ /* the standard mandates that there are only two system headers
+ in the whole file: one in the first packet of each stream.
+ (see standard p. IV-7 and IV-8) */
+ s->system_header_freq = 0x7fffffff;
+ else
+ s->system_header_freq = s->pack_header_freq * 5;
+
+ for(i=0;i<ctx->nb_streams;i++) {
+ stream = ctx->streams[i]->priv_data;
+ stream->packet_number = 0;
+ }
+ s->system_header_size = get_system_header_size(ctx);
+ s->last_scr = 0;
+ return 0;
+ fail:
+ for(i=0;i<ctx->nb_streams;i++) {
+ av_free(ctx->streams[i]->priv_data);
+ }
+ return -ENOMEM;
+}
+
+static inline void put_timestamp(ByteIOContext *pb, int id, int64_t timestamp)
+{
+ put_byte(pb,
+ (id << 4) |
+ (((timestamp >> 30) & 0x07) << 1) |
+ 1);
+ put_be16(pb, (uint16_t)((((timestamp >> 15) & 0x7fff) << 1) | 1));
+ put_be16(pb, (uint16_t)((((timestamp) & 0x7fff) << 1) | 1));
+}
+
+
+/* return the number of padding bytes that should be inserted into
+ the multiplexed stream.*/
+static int get_vcd_padding_size(AVFormatContext *ctx, int64_t pts)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int pad_bytes = 0;
+
+ if (s->vcd_padding_bitrate > 0 && pts!=AV_NOPTS_VALUE)
+ {
+ int64_t full_pad_bytes;
+
+ full_pad_bytes = (int64_t)((s->vcd_padding_bitrate * (pts / 90000.0)) / 8.0); //FIXME this is wrong
+ pad_bytes = (int) (full_pad_bytes - s->vcd_padding_bytes_written);
+
+ if (pad_bytes<0)
+ /* might happen if we have already padded to a later timestamp. This
+ can occur if another stream has already advanced further.*/
+ pad_bytes=0;
+ }
+
+ return pad_bytes;
+}
+
+
+#if 0 /* unused, remove? */
+/* return the exact available payload size for the next packet for
+ stream 'stream_index'. 'pts' and 'dts' are only used to know if
+ timestamps are needed in the packet header. */
+static int get_packet_payload_size(AVFormatContext *ctx, int stream_index,
+ int64_t pts, int64_t dts)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int buf_index;
+ StreamInfo *stream;
+
+ stream = ctx->streams[stream_index]->priv_data;
+
+ buf_index = 0;
+ if (((s->packet_number % s->pack_header_freq) == 0)) {
+ /* pack header size */
+ if (s->is_mpeg2)
+ buf_index += 14;
+ else
+ buf_index += 12;
+
+ if (s->is_vcd) {
+ /* there is exactly one system header for each stream in a VCD MPEG,
+ One in the very first video packet and one in the very first
+ audio packet (see VCD standard p. IV-7 and IV-8).*/
+
+ if (stream->packet_number==0)
+ /* The system headers refer only to the stream they occur in,
+ so they have a constant size.*/
+ buf_index += 15;
+
+ } else {
+ if ((s->packet_number % s->system_header_freq) == 0)
+ buf_index += s->system_header_size;
+ }
+ }
+
+ if ((s->is_vcd && stream->packet_number==0)
+ || (s->is_svcd && s->packet_number==0))
+ /* the first pack of each stream contains only the pack header,
+ the system header and some padding (see VCD standard p. IV-6)
+ Add the padding size, so that the actual payload becomes 0.*/
+ buf_index += s->packet_size - buf_index;
+ else {
+ /* packet header size */
+ buf_index += 6;
+ if (s->is_mpeg2) {
+ buf_index += 3;
+ if (stream->packet_number==0)
+ buf_index += 3; /* PES extension */
+ buf_index += 1; /* obligatory stuffing byte */
+ }
+ if (pts != AV_NOPTS_VALUE) {
+ if (dts != pts)
+ buf_index += 5 + 5;
+ else
+ buf_index += 5;
+
+ } else {
+ if (!s->is_mpeg2)
+ buf_index++;
+ }
+
+ if (stream->id < 0xc0) {
+ /* AC3/LPCM private data header */
+ buf_index += 4;
+ if (stream->id >= 0xa0) {
+ int n;
+ buf_index += 3;
+ /* NOTE: we round the payload size to an integer number of
+ LPCM samples */
+ n = (s->packet_size - buf_index) % stream->lpcm_align;
+ if (n)
+ buf_index += (stream->lpcm_align - n);
+ }
+ }
+
+ if (s->is_vcd && stream->id == AUDIO_ID)
+ /* The VCD standard demands that 20 zero bytes follow
+ each audio packet (see standard p. IV-8).*/
+ buf_index+=20;
+ }
+ return s->packet_size - buf_index;
+}
+#endif
+
+/* Write an MPEG padding packet header. */
+static void put_padding_packet(AVFormatContext *ctx, ByteIOContext *pb,int packet_bytes)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int i;
+
+ put_be32(pb, PADDING_STREAM);
+ put_be16(pb, packet_bytes - 6);
+ if (!s->is_mpeg2) {
+ put_byte(pb, 0x0f);
+ packet_bytes -= 7;
+ } else
+ packet_bytes -= 6;
+
+ for(i=0;i<packet_bytes;i++)
+ put_byte(pb, 0xff);
+}
+
+static int get_nb_frames(AVFormatContext *ctx, StreamInfo *stream, int len){
+ int nb_frames=0;
+ PacketDesc *pkt_desc= stream->premux_packet;
+
+ while(len>0){
+ if(pkt_desc->size == pkt_desc->unwritten_size)
+ nb_frames++;
+ len -= pkt_desc->unwritten_size;
+ pkt_desc= pkt_desc->next;
+ }
+
+ return nb_frames;
+}
+
+/* flush the packet on stream stream_index */
+static int flush_packet(AVFormatContext *ctx, int stream_index,
+ int64_t pts, int64_t dts, int64_t scr, int trailer_size)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ StreamInfo *stream = ctx->streams[stream_index]->priv_data;
+ uint8_t *buf_ptr;
+ int size, payload_size, startcode, id, stuffing_size, i, header_len;
+ int packet_size;
+ uint8_t buffer[128];
+ int zero_trail_bytes = 0;
+ int pad_packet_bytes = 0;
+ int pes_flags;
+ int general_pack = 0; /*"general" pack without data specific to one stream?*/
+ int nb_frames;
+
+ id = stream->id;
+
+#if 0
+ printf("packet ID=%2x PTS=%0.3f\n",
+ id, pts / 90000.0);
+#endif
+
+ buf_ptr = buffer;
+
+ if ((s->packet_number % s->pack_header_freq) == 0 || s->last_scr != scr) {
+ /* output pack and systems header if needed */
+ size = put_pack_header(ctx, buf_ptr, scr);
+ buf_ptr += size;
+ s->last_scr= scr;
+
+ if (s->is_vcd) {
+ /* there is exactly one system header for each stream in a VCD MPEG,
+ One in the very first video packet and one in the very first
+ audio packet (see VCD standard p. IV-7 and IV-8).*/
+
+ if (stream->packet_number==0) {
+ size = put_system_header(ctx, buf_ptr, id);
+ buf_ptr += size;
+ }
+ } else if (s->is_dvd) {
+ if (stream->align_iframe || s->packet_number == 0){
+ int PES_bytes_to_fill = s->packet_size - size - 10;
+
+ if (pts != AV_NOPTS_VALUE) {
+ if (dts != pts)
+ PES_bytes_to_fill -= 5 + 5;
+ else
+ PES_bytes_to_fill -= 5;
+ }
+
+ if (stream->bytes_to_iframe == 0 || s->packet_number == 0) {
+ size = put_system_header(ctx, buf_ptr, 0);
+ buf_ptr += size;
+ size = buf_ptr - buffer;
+ put_buffer(&ctx->pb, buffer, size);
+
+ put_be32(&ctx->pb, PRIVATE_STREAM_2);
+ put_be16(&ctx->pb, 0x03d4); // length
+ put_byte(&ctx->pb, 0x00); // substream ID, 00=PCI
+ for (i = 0; i < 979; i++)
+ put_byte(&ctx->pb, 0x00);
+
+ put_be32(&ctx->pb, PRIVATE_STREAM_2);
+ put_be16(&ctx->pb, 0x03fa); // length
+ put_byte(&ctx->pb, 0x01); // substream ID, 01=DSI
+ for (i = 0; i < 1017; i++)
+ put_byte(&ctx->pb, 0x00);
+
+ memset(buffer, 0, 128);
+ buf_ptr = buffer;
+ s->packet_number++;
+ stream->align_iframe = 0;
+ scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
+ size = put_pack_header(ctx, buf_ptr, scr);
+ s->last_scr= scr;
+ buf_ptr += size;
+ /* GOP Start */
+ } else if (stream->bytes_to_iframe < PES_bytes_to_fill) {
+ pad_packet_bytes = PES_bytes_to_fill - stream->bytes_to_iframe;
+ }
+ }
+ } else {
+ if ((s->packet_number % s->system_header_freq) == 0) {
+ size = put_system_header(ctx, buf_ptr, 0);
+ buf_ptr += size;
+ }
+ }
+ }
+ size = buf_ptr - buffer;
+ put_buffer(&ctx->pb, buffer, size);
+
+ packet_size = s->packet_size - size;
+
+ if (s->is_vcd && id == AUDIO_ID)
+ /* The VCD standard demands that 20 zero bytes follow
+ each audio pack (see standard p. IV-8).*/
+ zero_trail_bytes += 20;
+
+ if ((s->is_vcd && stream->packet_number==0)
+ || (s->is_svcd && s->packet_number==0)) {
+ /* for VCD the first pack of each stream contains only the pack header,
+ the system header and lots of padding (see VCD standard p. IV-6).
+ In the case of an audio pack, 20 zero bytes are also added at
+ the end.*/
+ /* For SVCD we fill the very first pack to increase compatibility with
+ some DVD players. Not mandated by the standard.*/
+ if (s->is_svcd)
+ general_pack = 1; /* the system header refers to both streams and no stream data*/
+ pad_packet_bytes = packet_size - zero_trail_bytes;
+ }
+
+ packet_size -= pad_packet_bytes + zero_trail_bytes;
+
+ if (packet_size > 0) {
+
+ /* packet header size */
+ packet_size -= 6;
+
+ /* packet header */
+ if (s->is_mpeg2) {
+ header_len = 3;
+ if (stream->packet_number==0)
+ header_len += 3; /* PES extension */
+ header_len += 1; /* obligatory stuffing byte */
+ } else {
+ header_len = 0;
+ }
+ if (pts != AV_NOPTS_VALUE) {
+ if (dts != pts)
+ header_len += 5 + 5;
+ else
+ header_len += 5;
+ } else {
+ if (!s->is_mpeg2)
+ header_len++;
+ }
+
+ payload_size = packet_size - header_len;
+ if (id < 0xc0) {
+ startcode = PRIVATE_STREAM_1;
+ payload_size -= 1;
+ if (id >= 0x40) {
+ payload_size -= 3;
+ if (id >= 0xa0)
+ payload_size -= 3;
+ }
+ } else {
+ startcode = 0x100 + id;
+ }
+
+ stuffing_size = payload_size - av_fifo_size(&stream->fifo);
+
+ // first byte doesnt fit -> reset pts/dts + stuffing
+ if(payload_size <= trailer_size && pts != AV_NOPTS_VALUE){
+ int timestamp_len=0;
+ if(dts != pts)
+ timestamp_len += 5;
+ if(pts != AV_NOPTS_VALUE)
+ timestamp_len += s->is_mpeg2 ? 5 : 4;
+ pts=dts= AV_NOPTS_VALUE;
+ header_len -= timestamp_len;
+ if (s->is_dvd && stream->align_iframe) {
+ pad_packet_bytes += timestamp_len;
+ packet_size -= timestamp_len;
+ } else {
+ payload_size += timestamp_len;
+ }
+ stuffing_size += timestamp_len;
+ if(payload_size > trailer_size)
+ stuffing_size += payload_size - trailer_size;
+ }
+
+ if (pad_packet_bytes > 0 && pad_packet_bytes <= 7) { // can't use padding, so use stuffing
+ packet_size += pad_packet_bytes;
+ payload_size += pad_packet_bytes; // undo the previous adjustment
+ if (stuffing_size < 0) {
+ stuffing_size = pad_packet_bytes;
+ } else {
+ stuffing_size += pad_packet_bytes;
+ }
+ pad_packet_bytes = 0;
+ }
+
+ if (stuffing_size < 0)
+ stuffing_size = 0;
+ if (stuffing_size > 16) { /*<=16 for MPEG-1, <=32 for MPEG-2*/
+ pad_packet_bytes += stuffing_size;
+ packet_size -= stuffing_size;
+ payload_size -= stuffing_size;
+ stuffing_size = 0;
+ }
+
+ nb_frames= get_nb_frames(ctx, stream, payload_size - stuffing_size);
+
+ put_be32(&ctx->pb, startcode);
+
+ put_be16(&ctx->pb, packet_size);
+
+ if (!s->is_mpeg2)
+ for(i=0;i<stuffing_size;i++)
+ put_byte(&ctx->pb, 0xff);
+
+ if (s->is_mpeg2) {
+ put_byte(&ctx->pb, 0x80); /* mpeg2 id */
+
+ pes_flags=0;
+
+ if (pts != AV_NOPTS_VALUE) {
+ pes_flags |= 0x80;
+ if (dts != pts)
+ pes_flags |= 0x40;
+ }
+
+ /* Both the MPEG-2 and the SVCD standards demand that the
+ P-STD_buffer_size field be included in the first packet of
+ every stream. (see SVCD standard p. 26 V.2.3.1 and V.2.3.2
+ and MPEG-2 standard 2.7.7) */
+ if (stream->packet_number == 0)
+ pes_flags |= 0x01;
+
+ put_byte(&ctx->pb, pes_flags); /* flags */
+ put_byte(&ctx->pb, header_len - 3 + stuffing_size);
+
+ if (pes_flags & 0x80) /*write pts*/
+ put_timestamp(&ctx->pb, (pes_flags & 0x40) ? 0x03 : 0x02, pts);
+ if (pes_flags & 0x40) /*write dts*/
+ put_timestamp(&ctx->pb, 0x01, dts);
+
+ if (pes_flags & 0x01) { /*write pes extension*/
+ put_byte(&ctx->pb, 0x10); /* flags */
+
+ /* P-STD buffer info */
+ if (id == AUDIO_ID)
+ put_be16(&ctx->pb, 0x4000 | stream->max_buffer_size/128);
+ else
+ put_be16(&ctx->pb, 0x6000 | stream->max_buffer_size/1024);
+ }
+
+ } else {
+ if (pts != AV_NOPTS_VALUE) {
+ if (dts != pts) {
+ put_timestamp(&ctx->pb, 0x03, pts);
+ put_timestamp(&ctx->pb, 0x01, dts);
+ } else {
+ put_timestamp(&ctx->pb, 0x02, pts);
+ }
+ } else {
+ put_byte(&ctx->pb, 0x0f);
+ }
+ }
+
+ if (s->is_mpeg2) {
+ /* special stuffing byte that is always written
+ to prevent accidental generation of start codes. */
+ put_byte(&ctx->pb, 0xff);
+
+ for(i=0;i<stuffing_size;i++)
+ put_byte(&ctx->pb, 0xff);
+ }
+
+ if (startcode == PRIVATE_STREAM_1) {
+ put_byte(&ctx->pb, id);
+ if (id >= 0xa0) {
+ /* LPCM (XXX: check nb_frames) */
+ put_byte(&ctx->pb, 7);
+ put_be16(&ctx->pb, 4); /* skip 3 header bytes */
+ put_byte(&ctx->pb, stream->lpcm_header[0]);
+ put_byte(&ctx->pb, stream->lpcm_header[1]);
+ put_byte(&ctx->pb, stream->lpcm_header[2]);
+ } else if (id >= 0x40) {
+ /* AC3 */
+ put_byte(&ctx->pb, nb_frames);
+ put_be16(&ctx->pb, trailer_size+1);
+ }
+ }
+
+ /* output data */
+ if(av_fifo_generic_read(&stream->fifo, payload_size - stuffing_size, &put_buffer, &ctx->pb) < 0)
+ return -1;
+ stream->bytes_to_iframe -= payload_size - stuffing_size;
+ }else{
+ payload_size=
+ stuffing_size= 0;
+ }
+
+ if (pad_packet_bytes > 0)
+ put_padding_packet(ctx,&ctx->pb, pad_packet_bytes);
+
+ for(i=0;i<zero_trail_bytes;i++)
+ put_byte(&ctx->pb, 0x00);
+
+ put_flush_packet(&ctx->pb);
+
+ s->packet_number++;
+
+ /* only increase the stream packet number if this pack actually contains
+ something that is specific to this stream! I.e. a dedicated header
+ or some data.*/
+ if (!general_pack)
+ stream->packet_number++;
+
+ return payload_size - stuffing_size;
+}
+
+static void put_vcd_padding_sector(AVFormatContext *ctx)
+{
+ /* There are two ways to do this padding: writing a sector/pack
+ of 0 values, or writing an MPEG padding pack. Both seem to
+ work with most decoders, BUT the VCD standard only allows a 0-sector
+ (see standard p. IV-4, IV-5).
+ So a 0-sector it is...*/
+
+ MpegMuxContext *s = ctx->priv_data;
+ int i;
+
+ for(i=0;i<s->packet_size;i++)
+ put_byte(&ctx->pb, 0);
+
+ s->vcd_padding_bytes_written += s->packet_size;
+
+ put_flush_packet(&ctx->pb);
+
+ /* increasing the packet number is correct. The SCR of the following packs
+ is calculated from the packet_number and it has to include the padding
+ sector (it represents the sector index, not the MPEG pack index)
+ (see VCD standard p. IV-6)*/
+ s->packet_number++;
+}
+
+#if 0 /* unused, remove? */
+static int64_t get_vcd_scr(AVFormatContext *ctx,int stream_index,int64_t pts)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int64_t scr;
+
+ /* Since the data delivery rate is constant, SCR is computed
+ using the formula C + i * 1200 where C is the start constant
+ and i is the pack index.
+ It is recommended that SCR 0 is at the beginning of the VCD front
+ margin (a sequence of empty Form 2 sectors on the CD).
+ It is recommended that the front margin is 30 sectors long, so
+ we use C = 30*1200 = 36000
+ (Note that even if the front margin is not 30 sectors the file
+ will still be correct according to the standard. It just won't have
+ the "recommended" value).*/
+ scr = 36000 + s->packet_number * 1200;
+
+ return scr;
+}
+#endif
+
+static int remove_decoded_packets(AVFormatContext *ctx, int64_t scr){
+// MpegMuxContext *s = ctx->priv_data;
+ int i;
+
+ for(i=0; i<ctx->nb_streams; i++){
+ AVStream *st = ctx->streams[i];
+ StreamInfo *stream = st->priv_data;
+ PacketDesc *pkt_desc= stream->predecode_packet;
+
+ while(pkt_desc && scr > pkt_desc->dts){ //FIXME > vs >=
+ if(stream->buffer_index < pkt_desc->size ||
+ stream->predecode_packet == stream->premux_packet){
+ av_log(ctx, AV_LOG_ERROR, "buffer underflow\n");
+ break;
+ }
+ stream->buffer_index -= pkt_desc->size;
+
+ stream->predecode_packet= pkt_desc->next;
+ av_freep(&pkt_desc);
+ }
+ }
+
+ return 0;
+}
+
+static int output_packet(AVFormatContext *ctx, int flush){
+ MpegMuxContext *s = ctx->priv_data;
+ AVStream *st;
+ StreamInfo *stream;
+ int i, avail_space, es_size, trailer_size;
+ int best_i= -1;
+ int best_score= INT_MIN;
+ int ignore_constraints=0;
+ int64_t scr= s->last_scr;
+ PacketDesc *timestamp_packet;
+ const int64_t max_delay= av_rescale(ctx->max_delay, 90000, AV_TIME_BASE);
+
+retry:
+ for(i=0; i<ctx->nb_streams; i++){
+ AVStream *st = ctx->streams[i];
+ StreamInfo *stream = st->priv_data;
+ const int avail_data= av_fifo_size(&stream->fifo);
+ const int space= stream->max_buffer_size - stream->buffer_index;
+ int rel_space= 1024*space / stream->max_buffer_size;
+ PacketDesc *next_pkt= stream->premux_packet;
+
+ /* for subtitle, a single PES packet must be generated,
+ so we flush after every single subtitle packet */
+ if(s->packet_size > avail_data && !flush
+ && st->codec->codec_type != CODEC_TYPE_SUBTITLE)
+ return 0;
+ if(avail_data==0)
+ continue;
+ assert(avail_data>0);
+
+ if(space < s->packet_size && !ignore_constraints)
+ continue;
+
+ if(next_pkt && next_pkt->dts - scr > max_delay)
+ continue;
+
+ if(rel_space > best_score){
+ best_score= rel_space;
+ best_i = i;
+ avail_space= space;
+ }
+ }
+
+ if(best_i < 0){
+ int64_t best_dts= INT64_MAX;
+
+ for(i=0; i<ctx->nb_streams; i++){
+ AVStream *st = ctx->streams[i];
+ StreamInfo *stream = st->priv_data;
+ PacketDesc *pkt_desc= stream->predecode_packet;
+ if(pkt_desc && pkt_desc->dts < best_dts)
+ best_dts= pkt_desc->dts;
+ }
+
+#if 0
+ av_log(ctx, AV_LOG_DEBUG, "bumping scr, scr:%f, dts:%f\n",
+ scr/90000.0, best_dts/90000.0);
+#endif
+ if(best_dts == INT64_MAX)
+ return 0;
+
+ if(scr >= best_dts+1 && !ignore_constraints){
+ av_log(ctx, AV_LOG_ERROR, "packet too large, ignoring buffer limits to mux it\n");
+ ignore_constraints= 1;
+ }
+ scr= FFMAX(best_dts+1, scr);
+ if(remove_decoded_packets(ctx, scr) < 0)
+ return -1;
+ goto retry;
+ }
+
+ assert(best_i >= 0);
+
+ st = ctx->streams[best_i];
+ stream = st->priv_data;
+
+ assert(av_fifo_size(&stream->fifo) > 0);
+
+ assert(avail_space >= s->packet_size || ignore_constraints);
+
+ timestamp_packet= stream->premux_packet;
+ if(timestamp_packet->unwritten_size == timestamp_packet->size){
+ trailer_size= 0;
+ }else{
+ trailer_size= timestamp_packet->unwritten_size;
+ timestamp_packet= timestamp_packet->next;
+ }
+
+ if(timestamp_packet){
+//av_log(ctx, AV_LOG_DEBUG, "dts:%f pts:%f scr:%f stream:%d\n", timestamp_packet->dts/90000.0, timestamp_packet->pts/90000.0, scr/90000.0, best_i);
+ es_size= flush_packet(ctx, best_i, timestamp_packet->pts, timestamp_packet->dts, scr, trailer_size);
+ }else{
+ assert(av_fifo_size(&stream->fifo) == trailer_size);
+ es_size= flush_packet(ctx, best_i, AV_NOPTS_VALUE, AV_NOPTS_VALUE, scr, trailer_size);
+ }
+
+ if (s->is_vcd) {
+ /* Write one or more padding sectors, if necessary, to reach
+ the constant overall bitrate.*/
+ int vcd_pad_bytes;
+
+ while((vcd_pad_bytes = get_vcd_padding_size(ctx,stream->premux_packet->pts) ) >= s->packet_size){ //FIXME pts cannot be correct here
+ put_vcd_padding_sector(ctx);
+ s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
+ }
+ }
+
+ stream->buffer_index += es_size;
+ s->last_scr += s->packet_size*90000LL / (s->mux_rate*50LL); //FIXME rounding and first few bytes of each packet
+
+ while(stream->premux_packet && stream->premux_packet->unwritten_size <= es_size){
+ es_size -= stream->premux_packet->unwritten_size;
+ stream->premux_packet= stream->premux_packet->next;
+ }
+ if(es_size)
+ stream->premux_packet->unwritten_size -= es_size;
+
+ if(remove_decoded_packets(ctx, s->last_scr) < 0)
+ return -1;
+
+ return 1;
+}
+
+static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
+{
+ MpegMuxContext *s = ctx->priv_data;
+ int stream_index= pkt->stream_index;
+ int size= pkt->size;
+ uint8_t *buf= pkt->data;
+ AVStream *st = ctx->streams[stream_index];
+ StreamInfo *stream = st->priv_data;
+ int64_t pts, dts;
+ PacketDesc *pkt_desc;
+ const int preload= av_rescale(ctx->preload, 90000, AV_TIME_BASE);
+ const int is_iframe = st->codec->codec_type == CODEC_TYPE_VIDEO && (pkt->flags & PKT_FLAG_KEY);
+
+ pts= pkt->pts;
+ dts= pkt->dts;
+
+ if(pts != AV_NOPTS_VALUE) pts += preload;
+ if(dts != AV_NOPTS_VALUE) dts += preload;
+
+//av_log(ctx, AV_LOG_DEBUG, "dts:%f pts:%f flags:%d stream:%d nopts:%d\n", dts/90000.0, pts/90000.0, pkt->flags, pkt->stream_index, pts != AV_NOPTS_VALUE);
+ if (!stream->premux_packet)
+ stream->next_packet = &stream->premux_packet;
+ *stream->next_packet=
+ pkt_desc= av_mallocz(sizeof(PacketDesc));
+ pkt_desc->pts= pts;
+ pkt_desc->dts= dts;
+ pkt_desc->unwritten_size=
+ pkt_desc->size= size;
+ if(!stream->predecode_packet)
+ stream->predecode_packet= pkt_desc;
+ stream->next_packet= &pkt_desc->next;
+
+ av_fifo_realloc(&stream->fifo, av_fifo_size(&stream->fifo) + size + 1);
+
+ if (s->is_dvd){
+ if (is_iframe && (s->packet_number == 0 || (pts - stream->vobu_start_pts >= 36000))) { // min VOBU length 0.4 seconds (mpucoder)
+ stream->bytes_to_iframe = av_fifo_size(&stream->fifo);
+ stream->align_iframe = 1;
+ stream->vobu_start_pts = pts;
+ } else {
+ stream->align_iframe = 0;
+ }
+ }
+
+ av_fifo_write(&stream->fifo, buf, size);
+
+ for(;;){
+ int ret= output_packet(ctx, 0);
+ if(ret<=0)
+ return ret;
+ }
+}
+
+static int mpeg_mux_end(AVFormatContext *ctx)
+{
+// MpegMuxContext *s = ctx->priv_data;
+ StreamInfo *stream;
+ int i;
+
+ for(;;){
+ int ret= output_packet(ctx, 1);
+ if(ret<0)
+ return ret;
+ else if(ret==0)
+ break;
+ }
+
+ /* End header according to MPEG1 systems standard. We do not write
+ it as it is usually not needed by decoders and because it
+ complicates MPEG stream concatenation. */
+ //put_be32(&ctx->pb, ISO_11172_END_CODE);
+ //put_flush_packet(&ctx->pb);
+
+ for(i=0;i<ctx->nb_streams;i++) {
+ stream = ctx->streams[i]->priv_data;
+
+ assert(av_fifo_size(&stream->fifo) == 0);
+ av_fifo_free(&stream->fifo);
+ }
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/*********************************************/
+/* demux code */
+
+#define MAX_SYNC_SIZE 100000
+
+static int cdxa_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'R' && p->buf[1] == 'I' &&
+ p->buf[2] == 'F' && p->buf[3] == 'F' &&
+ p->buf[8] == 'C' && p->buf[9] == 'D' &&
+ p->buf[10] == 'X' && p->buf[11] == 'A')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int mpegps_probe(AVProbeData *p)
+{
+ uint32_t code= -1;
+ int sys=0, pspack=0, priv1=0, vid=0, audio=0;
+ int i;
+ int score=0;
+
+ score = cdxa_probe(p);
+ if (score > 0) return score;
+
+ /* Search for MPEG stream */
+ for(i=0; i<p->buf_size; i++){
+ code = (code<<8) + p->buf[i];
+ if ((code & 0xffffff00) == 0x100) {
+ if(code == SYSTEM_HEADER_START_CODE) sys++;
+ else if(code == PRIVATE_STREAM_1) priv1++;
+ else if(code == PACK_START_CODE) pspack++;
+ else if((code & 0xf0) == VIDEO_ID) vid++;
+ else if((code & 0xe0) == AUDIO_ID) audio++;
+ }
+ }
+
+ if(vid || audio) /* invalid VDR files nd short PES streams */
+ score= AVPROBE_SCORE_MAX/4;
+
+//av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d\n", sys, priv1, pspack,vid, audio);
+ if(sys && sys*9 <= pspack*10)
+ return AVPROBE_SCORE_MAX/2+2; // +1 for .mpg
+ if((priv1 || vid || audio) && (priv1+vid+audio)*9 <= pspack*10)
+ return AVPROBE_SCORE_MAX/2+2; // +1 for .mpg
+ if((!!vid ^ !!audio) && (audio+vid > 1) && !sys && !pspack) /* PES stream */
+ return AVPROBE_SCORE_MAX/2+2;
+
+ //02-Penguin.flac has sys:0 priv1:0 pspack:0 vid:0 audio:1
+ return score;
+}
+
+
+typedef struct MpegDemuxContext {
+ int32_t header_state;
+ unsigned char psm_es_type[256];
+} MpegDemuxContext;
+
+static int mpegps_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MpegDemuxContext *m = s->priv_data;
+ m->header_state = 0xff;
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+
+ /* no need to do more */
+ return 0;
+}
+
+static int64_t get_pts(ByteIOContext *pb, int c)
+{
+ int64_t pts;
+ int val;
+
+ if (c < 0)
+ c = get_byte(pb);
+ pts = (int64_t)((c >> 1) & 0x07) << 30;
+ val = get_be16(pb);
+ pts |= (int64_t)(val >> 1) << 15;
+ val = get_be16(pb);
+ pts |= (int64_t)(val >> 1);
+ return pts;
+}
+
+static int find_next_start_code(ByteIOContext *pb, int *size_ptr,
+ int32_t *header_state)
+{
+ unsigned int state, v;
+ int val, n;
+
+ state = *header_state;
+ n = *size_ptr;
+ while (n > 0) {
+ if (url_feof(pb))
+ break;
+ v = get_byte(pb);
+ n--;
+ if (state == 0x000001) {
+ state = ((state << 8) | v) & 0xffffff;
+ val = state;
+ goto found;
+ }
+ state = ((state << 8) | v) & 0xffffff;
+ }
+ val = -1;
+ found:
+ *header_state = state;
+ *size_ptr = n;
+ return val;
+}
+
+#if 0 /* unused, remove? */
+/* XXX: optimize */
+static int find_prev_start_code(ByteIOContext *pb, int *size_ptr)
+{
+ int64_t pos, pos_start;
+ int max_size, start_code;
+
+ max_size = *size_ptr;
+ pos_start = url_ftell(pb);
+
+ /* in order to go faster, we fill the buffer */
+ pos = pos_start - 16386;
+ if (pos < 0)
+ pos = 0;
+ url_fseek(pb, pos, SEEK_SET);
+ get_byte(pb);
+
+ pos = pos_start;
+ for(;;) {
+ pos--;
+ if (pos < 0 || (pos_start - pos) >= max_size) {
+ start_code = -1;
+ goto the_end;
+ }
+ url_fseek(pb, pos, SEEK_SET);
+ start_code = get_be32(pb);
+ if ((start_code & 0xffffff00) == 0x100)
+ break;
+ }
+ the_end:
+ *size_ptr = pos_start - pos;
+ return start_code;
+}
+#endif
+
+/**
+ * Extracts stream types from a program stream map
+ * According to ISO/IEC 13818-1 ('MPEG-2 Systems') table 2-35
+ *
+ * @return number of bytes occupied by PSM in the bitstream
+ */
+static long mpegps_psm_parse(MpegDemuxContext *m, ByteIOContext *pb)
+{
+ int psm_length, ps_info_length, es_map_length;
+
+ psm_length = get_be16(pb);
+ get_byte(pb);
+ get_byte(pb);
+ ps_info_length = get_be16(pb);
+
+ /* skip program_stream_info */
+ url_fskip(pb, ps_info_length);
+ es_map_length = get_be16(pb);
+
+ /* at least one es available? */
+ while (es_map_length >= 4){
+ unsigned char type = get_byte(pb);
+ unsigned char es_id = get_byte(pb);
+ uint16_t es_info_length = get_be16(pb);
+ /* remember mapping from stream id to stream type */
+ m->psm_es_type[es_id] = type;
+ /* skip program_stream_info */
+ url_fskip(pb, es_info_length);
+ es_map_length -= 4 + es_info_length;
+ }
+ get_be32(pb); /* crc32 */
+ return 2 + psm_length;
+}
+
+/* read the next PES header. Return its position in ppos
+ (if not NULL), and its start code, pts and dts.
+ */
+static int mpegps_read_pes_header(AVFormatContext *s,
+ int64_t *ppos, int *pstart_code,
+ int64_t *ppts, int64_t *pdts)
+{
+ MpegDemuxContext *m = s->priv_data;
+ int len, size, startcode, c, flags, header_len;
+ int64_t pts, dts, last_pos;
+
+ last_pos = -1;
+ redo:
+ /* next start code (should be immediately after) */
+ m->header_state = 0xff;
+ size = MAX_SYNC_SIZE;
+ startcode = find_next_start_code(&s->pb, &size, &m->header_state);
+ //printf("startcode=%x pos=0x%"PRIx64"\n", startcode, url_ftell(&s->pb));
+ if (startcode < 0)
+ return AVERROR_IO;
+ if (startcode == PACK_START_CODE)
+ goto redo;
+ if (startcode == SYSTEM_HEADER_START_CODE)
+ goto redo;
+ if (startcode == PADDING_STREAM ||
+ startcode == PRIVATE_STREAM_2) {
+ /* skip them */
+ len = get_be16(&s->pb);
+ url_fskip(&s->pb, len);
+ goto redo;
+ }
+ if (startcode == PROGRAM_STREAM_MAP) {
+ mpegps_psm_parse(m, &s->pb);
+ goto redo;
+ }
+
+ /* find matching stream */
+ if (!((startcode >= 0x1c0 && startcode <= 0x1df) ||
+ (startcode >= 0x1e0 && startcode <= 0x1ef) ||
+ (startcode == 0x1bd)))
+ goto redo;
+ if (ppos) {
+ *ppos = url_ftell(&s->pb) - 4;
+ }
+ len = get_be16(&s->pb);
+ pts = AV_NOPTS_VALUE;
+ dts = AV_NOPTS_VALUE;
+ /* stuffing */
+ for(;;) {
+ if (len < 1)
+ goto redo;
+ c = get_byte(&s->pb);
+ len--;
+ /* XXX: for mpeg1, should test only bit 7 */
+ if (c != 0xff)
+ break;
+ }
+ if ((c & 0xc0) == 0x40) {
+ /* buffer scale & size */
+ if (len < 2)
+ goto redo;
+ get_byte(&s->pb);
+ c = get_byte(&s->pb);
+ len -= 2;
+ }
+ if ((c & 0xf0) == 0x20) {
+ if (len < 4)
+ goto redo;
+ dts = pts = get_pts(&s->pb, c);
+ len -= 4;
+ } else if ((c & 0xf0) == 0x30) {
+ if (len < 9)
+ goto redo;
+ pts = get_pts(&s->pb, c);
+ dts = get_pts(&s->pb, -1);
+ len -= 9;
+ } else if ((c & 0xc0) == 0x80) {
+ /* mpeg 2 PES */
+#if 0 /* some streams have this field set for no apparent reason */
+ if ((c & 0x30) != 0) {
+ /* Encrypted multiplex not handled */
+ goto redo;
+ }
+#endif
+ flags = get_byte(&s->pb);
+ header_len = get_byte(&s->pb);
+ len -= 2;
+ if (header_len > len)
+ goto redo;
+ if ((flags & 0xc0) == 0x80) {
+ dts = pts = get_pts(&s->pb, -1);
+ if (header_len < 5)
+ goto redo;
+ header_len -= 5;
+ len -= 5;
+ } if ((flags & 0xc0) == 0xc0) {
+ pts = get_pts(&s->pb, -1);
+ dts = get_pts(&s->pb, -1);
+ if (header_len < 10)
+ goto redo;
+ header_len -= 10;
+ len -= 10;
+ }
+ len -= header_len;
+ while (header_len > 0) {
+ get_byte(&s->pb);
+ header_len--;
+ }
+ }
+ else if( c!= 0xf )
+ goto redo;
+
+ if (startcode == PRIVATE_STREAM_1 && !m->psm_es_type[startcode & 0xff]) {
+ if (len < 1)
+ goto redo;
+ startcode = get_byte(&s->pb);
+ len--;
+ if (startcode >= 0x80 && startcode <= 0xbf) {
+ /* audio: skip header */
+ if (len < 3)
+ goto redo;
+ get_byte(&s->pb);
+ get_byte(&s->pb);
+ get_byte(&s->pb);
+ len -= 3;
+ }
+ }
+ if(dts != AV_NOPTS_VALUE && ppos){
+ int i;
+ for(i=0; i<s->nb_streams; i++){
+ if(startcode == s->streams[i]->id) {
+ av_add_index_entry(s->streams[i], *ppos, dts, 0, 0, AVINDEX_KEYFRAME /* FIXME keyframe? */);
+ }
+ }
+ }
+
+ *pstart_code = startcode;
+ *ppts = pts;
+ *pdts = dts;
+ return len;
+}
+
+static int mpegps_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MpegDemuxContext *m = s->priv_data;
+ AVStream *st;
+ int len, startcode, i, type, codec_id = 0, es_type;
+ int64_t pts, dts, dummy_pos; //dummy_pos is needed for the index building to work
+
+ redo:
+ len = mpegps_read_pes_header(s, &dummy_pos, &startcode, &pts, &dts);
+ if (len < 0)
+ return len;
+
+ /* now find stream */
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ if (st->id == startcode)
+ goto found;
+ }
+
+ es_type = m->psm_es_type[startcode & 0xff];
+ if(es_type > 0){
+ if(es_type == STREAM_TYPE_VIDEO_MPEG1){
+ codec_id = CODEC_ID_MPEG2VIDEO;
+ type = CODEC_TYPE_VIDEO;
+ } else if(es_type == STREAM_TYPE_VIDEO_MPEG2){
+ codec_id = CODEC_ID_MPEG2VIDEO;
+ type = CODEC_TYPE_VIDEO;
+ } else if(es_type == STREAM_TYPE_AUDIO_MPEG1 ||
+ es_type == STREAM_TYPE_AUDIO_MPEG2){
+ codec_id = CODEC_ID_MP3;
+ type = CODEC_TYPE_AUDIO;
+ } else if(es_type == STREAM_TYPE_AUDIO_AAC){
+ codec_id = CODEC_ID_AAC;
+ type = CODEC_TYPE_AUDIO;
+ } else if(es_type == STREAM_TYPE_VIDEO_MPEG4){
+ codec_id = CODEC_ID_MPEG4;
+ type = CODEC_TYPE_VIDEO;
+ } else if(es_type == STREAM_TYPE_VIDEO_H264){
+ codec_id = CODEC_ID_H264;
+ type = CODEC_TYPE_VIDEO;
+ } else if(es_type == STREAM_TYPE_AUDIO_AC3){
+ codec_id = CODEC_ID_AC3;
+ type = CODEC_TYPE_AUDIO;
+ } else {
+ goto skip;
+ }
+ } else if (startcode >= 0x1e0 && startcode <= 0x1ef) {
+ static const unsigned char avs_seqh[4] = { 0, 0, 1, 0xb0 };
+ unsigned char buf[8];
+ get_buffer(&s->pb, buf, 8);
+ url_fseek(&s->pb, -8, SEEK_CUR);
+ if(!memcmp(buf, avs_seqh, 4) && (buf[6] != 0 || buf[7] != 1))
+ codec_id = CODEC_ID_CAVS;
+ else
+ codec_id = CODEC_ID_MPEG2VIDEO;
+ type = CODEC_TYPE_VIDEO;
+ } else if (startcode >= 0x1c0 && startcode <= 0x1df) {
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_MP2;
+ } else if (startcode >= 0x80 && startcode <= 0x87) {
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_AC3;
+ } else if (startcode >= 0x88 && startcode <= 0x9f) {
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_DTS;
+ } else if (startcode >= 0xa0 && startcode <= 0xbf) {
+ type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_PCM_S16BE;
+ } else if (startcode >= 0x20 && startcode <= 0x3f) {
+ type = CODEC_TYPE_SUBTITLE;
+ codec_id = CODEC_ID_DVD_SUBTITLE;
+ } else {
+ skip:
+ /* skip packet */
+ url_fskip(&s->pb, len);
+ goto redo;
+ }
+ /* no stream found: add a new stream */
+ st = av_new_stream(s, startcode);
+ if (!st)
+ goto skip;
+ st->codec->codec_type = type;
+ st->codec->codec_id = codec_id;
+ if (codec_id != CODEC_ID_PCM_S16BE)
+ st->need_parsing = 1;
+ found:
+ if(st->discard >= AVDISCARD_ALL)
+ goto skip;
+ if (startcode >= 0xa0 && startcode <= 0xbf) {
+ int b1, freq;
+
+ /* for LPCM, we just skip the header and consider it is raw
+ audio data */
+ if (len <= 3)
+ goto skip;
+ get_byte(&s->pb); /* emphasis (1), muse(1), reserved(1), frame number(5) */
+ b1 = get_byte(&s->pb); /* quant (2), freq(2), reserved(1), channels(3) */
+ get_byte(&s->pb); /* dynamic range control (0x80 = off) */
+ len -= 3;
+ freq = (b1 >> 4) & 3;
+ st->codec->sample_rate = lpcm_freq_tab[freq];
+ st->codec->channels = 1 + (b1 & 7);
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate * 2;
+ }
+ av_new_packet(pkt, len);
+ get_buffer(&s->pb, pkt->data, pkt->size);
+ pkt->pts = pts;
+ pkt->dts = dts;
+ pkt->stream_index = st->index;
+#if 0
+ av_log(s, AV_LOG_DEBUG, "%d: pts=%0.3f dts=%0.3f size=%d\n",
+ pkt->stream_index, pkt->pts / 90000.0, pkt->dts / 90000.0, pkt->size);
+#endif
+
+ return 0;
+}
+
+static int mpegps_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int64_t mpegps_read_dts(AVFormatContext *s, int stream_index,
+ int64_t *ppos, int64_t pos_limit)
+{
+ int len, startcode;
+ int64_t pos, pts, dts;
+
+ pos = *ppos;
+#ifdef DEBUG_SEEK
+ printf("read_dts: pos=0x%"PRIx64" next=%d -> ", pos, find_next);
+#endif
+ url_fseek(&s->pb, pos, SEEK_SET);
+ for(;;) {
+ len = mpegps_read_pes_header(s, &pos, &startcode, &pts, &dts);
+ if (len < 0) {
+#ifdef DEBUG_SEEK
+ printf("none (ret=%d)\n", len);
+#endif
+ return AV_NOPTS_VALUE;
+ }
+ if (startcode == s->streams[stream_index]->id &&
+ dts != AV_NOPTS_VALUE) {
+ break;
+ }
+ url_fskip(&s->pb, len);
+ }
+#ifdef DEBUG_SEEK
+ printf("pos=0x%"PRIx64" dts=0x%"PRIx64" %0.3f\n", pos, dts, dts / 90000.0);
+#endif
+ *ppos = pos;
+ return dts;
+}
+
+#ifdef CONFIG_MPEG1SYSTEM_MUXER
+AVOutputFormat mpeg1system_muxer = {
+ "mpeg",
+ "MPEG1 System format",
+ "video/mpeg",
+ "mpg,mpeg",
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG1VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+#ifdef CONFIG_MPEG1VCD_MUXER
+AVOutputFormat mpeg1vcd_muxer = {
+ "vcd",
+ "MPEG1 System format (VCD)",
+ "video/mpeg",
+ NULL,
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG1VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+#ifdef CONFIG_MPEG2VOB_MUXER
+AVOutputFormat mpeg2vob_muxer = {
+ "vob",
+ "MPEG2 PS format (VOB)",
+ "video/mpeg",
+ "vob",
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG2VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+
+/* Same as mpeg2vob_mux except that the pack size is 2324 */
+#ifdef CONFIG_MPEG2SVCD_MUXER
+AVOutputFormat mpeg2svcd_muxer = {
+ "svcd",
+ "MPEG2 PS format (VOB)",
+ "video/mpeg",
+ "vob",
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG2VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+
+/* Same as mpeg2vob_mux except the 'is_dvd' flag is set to produce NAV pkts */
+#ifdef CONFIG_MPEG2DVD_MUXER
+AVOutputFormat mpeg2dvd_muxer = {
+ "dvd",
+ "MPEG2 PS format (DVD VOB)",
+ "video/mpeg",
+ "dvd",
+ sizeof(MpegMuxContext),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG2VIDEO,
+ mpeg_mux_init,
+ mpeg_mux_write_packet,
+ mpeg_mux_end,
+};
+#endif
+
+#ifdef CONFIG_MPEGPS_DEMUXER
+AVInputFormat mpegps_demuxer = {
+ "mpeg",
+ "MPEG PS format",
+ sizeof(MpegDemuxContext),
+ mpegps_probe,
+ mpegps_read_header,
+ mpegps_read_packet,
+ mpegps_read_close,
+ NULL, //mpegps_read_seek,
+ mpegps_read_dts,
+ .flags = AVFMT_SHOW_IDS,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/mpegts.c b/contrib/ffmpeg/libavformat/mpegts.c
new file mode 100644
index 000000000..dd5f0adca
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpegts.c
@@ -0,0 +1,1527 @@
+/*
+ * MPEG2 transport stream (aka DVB) demuxer
+ * Copyright (c) 2002-2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "crc.h"
+#include "mpegts.h"
+
+//#define DEBUG_SI
+//#define DEBUG_SEEK
+
+/* 1.0 second at 24Mbit/s */
+#define MAX_SCAN_PACKETS 32000
+
+/* maximum size in which we look for synchronisation if
+ synchronisation is lost */
+#define MAX_RESYNC_SIZE 4096
+
+typedef struct PESContext PESContext;
+
+static PESContext* add_pes_stream(MpegTSContext *ts, int pid, int stream_type);
+static AVStream* new_pes_av_stream(PESContext *pes, uint32_t code);
+
+enum MpegTSFilterType {
+ MPEGTS_PES,
+ MPEGTS_SECTION,
+};
+
+typedef void PESCallback(void *opaque, const uint8_t *buf, int len, int is_start);
+
+typedef struct MpegTSPESFilter {
+ PESCallback *pes_cb;
+ void *opaque;
+} MpegTSPESFilter;
+
+typedef void SectionCallback(void *opaque, const uint8_t *buf, int len);
+
+typedef void SetServiceCallback(void *opaque, int ret);
+
+typedef struct MpegTSSectionFilter {
+ int section_index;
+ int section_h_size;
+ uint8_t *section_buf;
+ int check_crc:1;
+ int end_of_section_reached:1;
+ SectionCallback *section_cb;
+ void *opaque;
+} MpegTSSectionFilter;
+
+typedef struct MpegTSFilter {
+ int pid;
+ int last_cc; /* last cc code (-1 if first packet) */
+ enum MpegTSFilterType type;
+ union {
+ MpegTSPESFilter pes_filter;
+ MpegTSSectionFilter section_filter;
+ } u;
+} MpegTSFilter;
+
+typedef struct MpegTSService {
+ int running:1;
+ int sid;
+ char *provider_name;
+ char *name;
+} MpegTSService;
+
+struct MpegTSContext {
+ /* user data */
+ AVFormatContext *stream;
+ int raw_packet_size; /* raw packet size, including FEC if present */
+ int auto_guess; /* if true, all pids are analized to find streams */
+ int set_service_ret;
+
+ int mpeg2ts_raw; /* force raw MPEG2 transport stream output, if possible */
+ int mpeg2ts_compute_pcr; /* compute exact PCR for each transport stream packet */
+
+ /* used to estimate the exact PCR */
+ int64_t cur_pcr;
+ int pcr_incr;
+ int pcr_pid;
+
+ /* data needed to handle file based ts */
+ int stop_parse; /* stop parsing loop */
+ AVPacket *pkt; /* packet containing av data */
+
+ /******************************************/
+ /* private mpegts data */
+ /* scan context */
+ MpegTSFilter *sdt_filter;
+ int nb_services;
+ MpegTSService **services;
+
+ /* set service context (XXX: allocated it ?) */
+ SetServiceCallback *set_service_cb;
+ void *set_service_opaque;
+ MpegTSFilter *pat_filter;
+ MpegTSFilter *pmt_filter;
+ int req_sid;
+
+ MpegTSFilter *pids[NB_PID_MAX];
+};
+
+static void write_section_data(AVFormatContext *s, MpegTSFilter *tss1,
+ const uint8_t *buf, int buf_size, int is_start)
+{
+ MpegTSSectionFilter *tss = &tss1->u.section_filter;
+ int len;
+
+ if (is_start) {
+ memcpy(tss->section_buf, buf, buf_size);
+ tss->section_index = buf_size;
+ tss->section_h_size = -1;
+ tss->end_of_section_reached = 0;
+ } else {
+ if (tss->end_of_section_reached)
+ return;
+ len = 4096 - tss->section_index;
+ if (buf_size < len)
+ len = buf_size;
+ memcpy(tss->section_buf + tss->section_index, buf, len);
+ tss->section_index += len;
+ }
+
+ /* compute section length if possible */
+ if (tss->section_h_size == -1 && tss->section_index >= 3) {
+ len = (((tss->section_buf[1] & 0xf) << 8) | tss->section_buf[2]) + 3;
+ if (len > 4096)
+ return;
+ tss->section_h_size = len;
+ }
+
+ if (tss->section_h_size != -1 && tss->section_index >= tss->section_h_size) {
+ tss->end_of_section_reached = 1;
+ if (!tss->check_crc ||
+ av_crc(av_crc04C11DB7, -1, tss->section_buf, tss->section_h_size) == 0)
+ tss->section_cb(tss->opaque, tss->section_buf, tss->section_h_size);
+ }
+}
+
+static MpegTSFilter *mpegts_open_section_filter(MpegTSContext *ts, unsigned int pid,
+ SectionCallback *section_cb, void *opaque,
+ int check_crc)
+
+{
+ MpegTSFilter *filter;
+ MpegTSSectionFilter *sec;
+
+#ifdef DEBUG_SI
+ printf("Filter: pid=0x%x\n", pid);
+#endif
+ if (pid >= NB_PID_MAX || ts->pids[pid])
+ return NULL;
+ filter = av_mallocz(sizeof(MpegTSFilter));
+ if (!filter)
+ return NULL;
+ ts->pids[pid] = filter;
+ filter->type = MPEGTS_SECTION;
+ filter->pid = pid;
+ filter->last_cc = -1;
+ sec = &filter->u.section_filter;
+ sec->section_cb = section_cb;
+ sec->opaque = opaque;
+ sec->section_buf = av_malloc(MAX_SECTION_SIZE);
+ sec->check_crc = check_crc;
+ if (!sec->section_buf) {
+ av_free(filter);
+ return NULL;
+ }
+ return filter;
+}
+
+static MpegTSFilter *mpegts_open_pes_filter(MpegTSContext *ts, unsigned int pid,
+ PESCallback *pes_cb,
+ void *opaque)
+{
+ MpegTSFilter *filter;
+ MpegTSPESFilter *pes;
+
+ if (pid >= NB_PID_MAX || ts->pids[pid])
+ return NULL;
+ filter = av_mallocz(sizeof(MpegTSFilter));
+ if (!filter)
+ return NULL;
+ ts->pids[pid] = filter;
+ filter->type = MPEGTS_PES;
+ filter->pid = pid;
+ filter->last_cc = -1;
+ pes = &filter->u.pes_filter;
+ pes->pes_cb = pes_cb;
+ pes->opaque = opaque;
+ return filter;
+}
+
+static void mpegts_close_filter(MpegTSContext *ts, MpegTSFilter *filter)
+{
+ int pid;
+
+ pid = filter->pid;
+ if (filter->type == MPEGTS_SECTION)
+ av_freep(&filter->u.section_filter.section_buf);
+ else if (filter->type == MPEGTS_PES)
+ av_freep(&filter->u.pes_filter.opaque);
+
+ av_free(filter);
+ ts->pids[pid] = NULL;
+}
+
+static int analyze(const uint8_t *buf, int size, int packet_size, int *index){
+ int stat[packet_size];
+ int i;
+ int x=0;
+ int best_score=0;
+
+ memset(stat, 0, packet_size*sizeof(int));
+
+ for(x=i=0; i<size; i++){
+ if(buf[i] == 0x47){
+ stat[x]++;
+ if(stat[x] > best_score){
+ best_score= stat[x];
+ if(index) *index= x;
+ }
+ }
+
+ x++;
+ if(x == packet_size) x= 0;
+ }
+
+ return best_score;
+}
+
+/* autodetect fec presence. Must have at least 1024 bytes */
+static int get_packet_size(const uint8_t *buf, int size)
+{
+ int score, fec_score, dvhs_score;
+
+ if (size < (TS_FEC_PACKET_SIZE * 5 + 1))
+ return -1;
+
+ score = analyze(buf, size, TS_PACKET_SIZE, NULL);
+ dvhs_score = analyze(buf, size, TS_DVHS_PACKET_SIZE, NULL);
+ fec_score= analyze(buf, size, TS_FEC_PACKET_SIZE, NULL);
+// av_log(NULL, AV_LOG_DEBUG, "score: %d, dvhs_score: %d, fec_score: %d \n", score, dvhs_score, fec_score);
+
+ if (score > fec_score && score > dvhs_score) return TS_PACKET_SIZE;
+ else if(dvhs_score > score && dvhs_score > fec_score) return TS_DVHS_PACKET_SIZE;
+ else if(score < fec_score && dvhs_score < fec_score) return TS_FEC_PACKET_SIZE;
+ else return -1;
+}
+
+typedef struct SectionHeader {
+ uint8_t tid;
+ uint16_t id;
+ uint8_t version;
+ uint8_t sec_num;
+ uint8_t last_sec_num;
+} SectionHeader;
+
+static inline int get8(const uint8_t **pp, const uint8_t *p_end)
+{
+ const uint8_t *p;
+ int c;
+
+ p = *pp;
+ if (p >= p_end)
+ return -1;
+ c = *p++;
+ *pp = p;
+ return c;
+}
+
+static inline int get16(const uint8_t **pp, const uint8_t *p_end)
+{
+ const uint8_t *p;
+ int c;
+
+ p = *pp;
+ if ((p + 1) >= p_end)
+ return -1;
+ c = (p[0] << 8) | p[1];
+ p += 2;
+ *pp = p;
+ return c;
+}
+
+/* read and allocate a DVB string preceeded by its length */
+static char *getstr8(const uint8_t **pp, const uint8_t *p_end)
+{
+ int len;
+ const uint8_t *p;
+ char *str;
+
+ p = *pp;
+ len = get8(&p, p_end);
+ if (len < 0)
+ return NULL;
+ if ((p + len) > p_end)
+ return NULL;
+ str = av_malloc(len + 1);
+ if (!str)
+ return NULL;
+ memcpy(str, p, len);
+ str[len] = '\0';
+ p += len;
+ *pp = p;
+ return str;
+}
+
+static int parse_section_header(SectionHeader *h,
+ const uint8_t **pp, const uint8_t *p_end)
+{
+ int val;
+
+ val = get8(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->tid = val;
+ *pp += 2;
+ val = get16(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->id = val;
+ val = get8(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->version = (val >> 1) & 0x1f;
+ val = get8(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->sec_num = val;
+ val = get8(pp, p_end);
+ if (val < 0)
+ return -1;
+ h->last_sec_num = val;
+ return 0;
+}
+
+static MpegTSService *new_service(MpegTSContext *ts, int sid,
+ char *provider_name, char *name)
+{
+ MpegTSService *service;
+
+#ifdef DEBUG_SI
+ printf("new_service: sid=0x%04x provider='%s' name='%s'\n",
+ sid, provider_name, name);
+#endif
+
+ service = av_mallocz(sizeof(MpegTSService));
+ if (!service)
+ return NULL;
+ service->sid = sid;
+ service->provider_name = provider_name;
+ service->name = name;
+ dynarray_add(&ts->services, &ts->nb_services, service);
+ return service;
+}
+
+static void pmt_cb(void *opaque, const uint8_t *section, int section_len)
+{
+ MpegTSContext *ts = opaque;
+ SectionHeader h1, *h = &h1;
+ PESContext *pes;
+ AVStream *st;
+ const uint8_t *p, *p_end, *desc_list_end, *desc_end;
+ int program_info_length, pcr_pid, pid, stream_type;
+ int desc_list_len, desc_len, desc_tag;
+ int comp_page = 0, anc_page = 0; /* initialize to kill warnings */
+ char language[4];
+
+#ifdef DEBUG_SI
+ printf("PMT:\n");
+ av_hex_dump(stdout, (uint8_t *)section, section_len);
+#endif
+ p_end = section + section_len - 4;
+ p = section;
+ if (parse_section_header(h, &p, p_end) < 0)
+ return;
+#ifdef DEBUG_SI
+ printf("sid=0x%x sec_num=%d/%d\n", h->id, h->sec_num, h->last_sec_num);
+#endif
+ if (h->tid != PMT_TID || (ts->req_sid >= 0 && h->id != ts->req_sid) )
+ return;
+
+ pcr_pid = get16(&p, p_end) & 0x1fff;
+ if (pcr_pid < 0)
+ return;
+ ts->pcr_pid = pcr_pid;
+#ifdef DEBUG_SI
+ printf("pcr_pid=0x%x\n", pcr_pid);
+#endif
+ program_info_length = get16(&p, p_end) & 0xfff;
+ if (program_info_length < 0)
+ return;
+ p += program_info_length;
+ if (p >= p_end)
+ return;
+ for(;;) {
+ language[0] = 0;
+ st = 0;
+ stream_type = get8(&p, p_end);
+ if (stream_type < 0)
+ break;
+ pid = get16(&p, p_end) & 0x1fff;
+ if (pid < 0)
+ break;
+ desc_list_len = get16(&p, p_end) & 0xfff;
+ if (desc_list_len < 0)
+ break;
+ desc_list_end = p + desc_list_len;
+ if (desc_list_end > p_end)
+ break;
+ for(;;) {
+ desc_tag = get8(&p, desc_list_end);
+ if (desc_tag < 0)
+ break;
+ if (stream_type == STREAM_TYPE_PRIVATE_DATA) {
+ if((desc_tag == 0x6A) || (desc_tag == 0x7A)) {
+ /*assume DVB AC-3 Audio*/
+ stream_type = STREAM_TYPE_AUDIO_AC3;
+ } else if(desc_tag == 0x7B) {
+ /* DVB DTS audio */
+ stream_type = STREAM_TYPE_AUDIO_DTS;
+ }
+ }
+ desc_len = get8(&p, desc_list_end);
+ desc_end = p + desc_len;
+ if (desc_end > desc_list_end)
+ break;
+#ifdef DEBUG_SI
+ printf("tag: 0x%02x len=%d\n", desc_tag, desc_len);
+#endif
+ switch(desc_tag) {
+ case DVB_SUBT_DESCID:
+ if (stream_type == STREAM_TYPE_PRIVATE_DATA)
+ stream_type = STREAM_TYPE_SUBTITLE_DVB;
+
+ language[0] = get8(&p, desc_end);
+ language[1] = get8(&p, desc_end);
+ language[2] = get8(&p, desc_end);
+ language[3] = 0;
+ get8(&p, desc_end);
+ comp_page = get16(&p, desc_end);
+ anc_page = get16(&p, desc_end);
+
+ break;
+ case 0x0a: /* ISO 639 language descriptor */
+ language[0] = get8(&p, desc_end);
+ language[1] = get8(&p, desc_end);
+ language[2] = get8(&p, desc_end);
+ language[3] = 0;
+ break;
+ default:
+ break;
+ }
+ p = desc_end;
+ }
+ p = desc_list_end;
+
+#ifdef DEBUG_SI
+ printf("stream_type=%d pid=0x%x\n", stream_type, pid);
+#endif
+
+ /* now create ffmpeg stream */
+ switch(stream_type) {
+ case STREAM_TYPE_AUDIO_MPEG1:
+ case STREAM_TYPE_AUDIO_MPEG2:
+ case STREAM_TYPE_VIDEO_MPEG1:
+ case STREAM_TYPE_VIDEO_MPEG2:
+ case STREAM_TYPE_VIDEO_MPEG4:
+ case STREAM_TYPE_VIDEO_H264:
+ case STREAM_TYPE_AUDIO_AAC:
+ case STREAM_TYPE_AUDIO_AC3:
+ case STREAM_TYPE_AUDIO_DTS:
+ case STREAM_TYPE_SUBTITLE_DVB:
+ pes = add_pes_stream(ts, pid, stream_type);
+ if (pes)
+ st = new_pes_av_stream(pes, 0);
+ break;
+ default:
+ /* we ignore the other streams */
+ break;
+ }
+
+ if (st) {
+ if (language[0] != 0) {
+ st->language[0] = language[0];
+ st->language[1] = language[1];
+ st->language[2] = language[2];
+ st->language[3] = language[3];
+ }
+
+ if (stream_type == STREAM_TYPE_SUBTITLE_DVB) {
+ st->codec->sub_id = (anc_page << 16) | comp_page;
+ }
+ }
+ }
+ /* all parameters are there */
+ ts->set_service_cb(ts->set_service_opaque, 0);
+ mpegts_close_filter(ts, ts->pmt_filter);
+ ts->pmt_filter = NULL;
+}
+
+static void pat_cb(void *opaque, const uint8_t *section, int section_len)
+{
+ MpegTSContext *ts = opaque;
+ SectionHeader h1, *h = &h1;
+ const uint8_t *p, *p_end;
+ int sid, pmt_pid;
+
+#ifdef DEBUG_SI
+ printf("PAT:\n");
+ av_hex_dump(stdout, (uint8_t *)section, section_len);
+#endif
+ p_end = section + section_len - 4;
+ p = section;
+ if (parse_section_header(h, &p, p_end) < 0)
+ return;
+ if (h->tid != PAT_TID)
+ return;
+
+ for(;;) {
+ sid = get16(&p, p_end);
+ if (sid < 0)
+ break;
+ pmt_pid = get16(&p, p_end) & 0x1fff;
+ if (pmt_pid < 0)
+ break;
+#ifdef DEBUG_SI
+ printf("sid=0x%x pid=0x%x\n", sid, pmt_pid);
+#endif
+ if (sid == 0x0000) {
+ /* NIT info */
+ } else {
+ if (ts->req_sid == sid) {
+ ts->pmt_filter = mpegts_open_section_filter(ts, pmt_pid,
+ pmt_cb, ts, 1);
+ goto found;
+ }
+ }
+ }
+ /* not found */
+ ts->set_service_cb(ts->set_service_opaque, -1);
+
+ found:
+ mpegts_close_filter(ts, ts->pat_filter);
+ ts->pat_filter = NULL;
+}
+
+/* add all services found in the PAT */
+static void pat_scan_cb(void *opaque, const uint8_t *section, int section_len)
+{
+ MpegTSContext *ts = opaque;
+ SectionHeader h1, *h = &h1;
+ const uint8_t *p, *p_end;
+ int sid, pmt_pid;
+ char *provider_name, *name;
+ char buf[256];
+
+#ifdef DEBUG_SI
+ printf("PAT:\n");
+ av_hex_dump(stdout, (uint8_t *)section, section_len);
+#endif
+ p_end = section + section_len - 4;
+ p = section;
+ if (parse_section_header(h, &p, p_end) < 0)
+ return;
+ if (h->tid != PAT_TID)
+ return;
+
+ for(;;) {
+ sid = get16(&p, p_end);
+ if (sid < 0)
+ break;
+ pmt_pid = get16(&p, p_end) & 0x1fff;
+ if (pmt_pid < 0)
+ break;
+#ifdef DEBUG_SI
+ printf("sid=0x%x pid=0x%x\n", sid, pmt_pid);
+#endif
+ if (sid == 0x0000) {
+ /* NIT info */
+ } else {
+ /* add the service with a dummy name */
+ snprintf(buf, sizeof(buf), "Service %x\n", sid);
+ name = av_strdup(buf);
+ provider_name = av_strdup("");
+ if (name && provider_name) {
+ new_service(ts, sid, provider_name, name);
+ } else {
+ av_freep(&name);
+ av_freep(&provider_name);
+ }
+ }
+ }
+ ts->stop_parse = 1;
+
+ /* remove filter */
+ mpegts_close_filter(ts, ts->pat_filter);
+ ts->pat_filter = NULL;
+}
+
+static void mpegts_set_service(MpegTSContext *ts, int sid,
+ SetServiceCallback *set_service_cb, void *opaque)
+{
+ ts->set_service_cb = set_service_cb;
+ ts->set_service_opaque = opaque;
+ ts->req_sid = sid;
+ ts->pat_filter = mpegts_open_section_filter(ts, PAT_PID,
+ pat_cb, ts, 1);
+}
+
+static void sdt_cb(void *opaque, const uint8_t *section, int section_len)
+{
+ MpegTSContext *ts = opaque;
+ SectionHeader h1, *h = &h1;
+ const uint8_t *p, *p_end, *desc_list_end, *desc_end;
+ int onid, val, sid, desc_list_len, desc_tag, desc_len, service_type;
+ char *name, *provider_name;
+
+#ifdef DEBUG_SI
+ printf("SDT:\n");
+ av_hex_dump(stdout, (uint8_t *)section, section_len);
+#endif
+
+ p_end = section + section_len - 4;
+ p = section;
+ if (parse_section_header(h, &p, p_end) < 0)
+ return;
+ if (h->tid != SDT_TID)
+ return;
+ onid = get16(&p, p_end);
+ if (onid < 0)
+ return;
+ val = get8(&p, p_end);
+ if (val < 0)
+ return;
+ for(;;) {
+ sid = get16(&p, p_end);
+ if (sid < 0)
+ break;
+ val = get8(&p, p_end);
+ if (val < 0)
+ break;
+ desc_list_len = get16(&p, p_end) & 0xfff;
+ if (desc_list_len < 0)
+ break;
+ desc_list_end = p + desc_list_len;
+ if (desc_list_end > p_end)
+ break;
+ for(;;) {
+ desc_tag = get8(&p, desc_list_end);
+ if (desc_tag < 0)
+ break;
+ desc_len = get8(&p, desc_list_end);
+ desc_end = p + desc_len;
+ if (desc_end > desc_list_end)
+ break;
+#ifdef DEBUG_SI
+ printf("tag: 0x%02x len=%d\n", desc_tag, desc_len);
+#endif
+ switch(desc_tag) {
+ case 0x48:
+ service_type = get8(&p, p_end);
+ if (service_type < 0)
+ break;
+ provider_name = getstr8(&p, p_end);
+ if (!provider_name)
+ break;
+ name = getstr8(&p, p_end);
+ if (!name)
+ break;
+ new_service(ts, sid, provider_name, name);
+ break;
+ default:
+ break;
+ }
+ p = desc_end;
+ }
+ p = desc_list_end;
+ }
+ ts->stop_parse = 1;
+
+ /* remove filter */
+ mpegts_close_filter(ts, ts->sdt_filter);
+ ts->sdt_filter = NULL;
+}
+
+/* scan services in a transport stream by looking at the SDT */
+static void mpegts_scan_sdt(MpegTSContext *ts)
+{
+ ts->sdt_filter = mpegts_open_section_filter(ts, SDT_PID,
+ sdt_cb, ts, 1);
+}
+
+/* scan services in a transport stream by looking at the PAT (better
+ than nothing !) */
+static void mpegts_scan_pat(MpegTSContext *ts)
+{
+ ts->pat_filter = mpegts_open_section_filter(ts, PAT_PID,
+ pat_scan_cb, ts, 1);
+}
+
+/* TS stream handling */
+
+enum MpegTSState {
+ MPEGTS_HEADER = 0,
+ MPEGTS_PESHEADER_FILL,
+ MPEGTS_PAYLOAD,
+ MPEGTS_SKIP,
+};
+
+/* enough for PES header + length */
+#define PES_START_SIZE 9
+#define MAX_PES_HEADER_SIZE (9 + 255)
+
+struct PESContext {
+ int pid;
+ int stream_type;
+ MpegTSContext *ts;
+ AVFormatContext *stream;
+ AVStream *st;
+ enum MpegTSState state;
+ /* used to get the format */
+ int data_index;
+ int total_size;
+ int pes_header_size;
+ int64_t pts, dts;
+ uint8_t header[MAX_PES_HEADER_SIZE];
+};
+
+static int64_t get_pts(const uint8_t *p)
+{
+ int64_t pts;
+ int val;
+
+ pts = (int64_t)((p[0] >> 1) & 0x07) << 30;
+ val = (p[1] << 8) | p[2];
+ pts |= (int64_t)(val >> 1) << 15;
+ val = (p[3] << 8) | p[4];
+ pts |= (int64_t)(val >> 1);
+ return pts;
+}
+
+/* return non zero if a packet could be constructed */
+static void mpegts_push_data(void *opaque,
+ const uint8_t *buf, int buf_size, int is_start)
+{
+ PESContext *pes = opaque;
+ MpegTSContext *ts = pes->ts;
+ const uint8_t *p;
+ int len, code;
+
+ if (is_start) {
+ pes->state = MPEGTS_HEADER;
+ pes->data_index = 0;
+ }
+ p = buf;
+ while (buf_size > 0) {
+ switch(pes->state) {
+ case MPEGTS_HEADER:
+ len = PES_START_SIZE - pes->data_index;
+ if (len > buf_size)
+ len = buf_size;
+ memcpy(pes->header + pes->data_index, p, len);
+ pes->data_index += len;
+ p += len;
+ buf_size -= len;
+ if (pes->data_index == PES_START_SIZE) {
+ /* we got all the PES or section header. We can now
+ decide */
+#if 0
+ av_hex_dump(pes->header, pes->data_index);
+#endif
+ if (pes->header[0] == 0x00 && pes->header[1] == 0x00 &&
+ pes->header[2] == 0x01) {
+ /* it must be an mpeg2 PES stream */
+ code = pes->header[3] | 0x100;
+ if (!((code >= 0x1c0 && code <= 0x1df) ||
+ (code >= 0x1e0 && code <= 0x1ef) ||
+ (code == 0x1bd)))
+ goto skip;
+ if (!pes->st) {
+ /* allocate stream */
+ new_pes_av_stream(pes, code);
+ }
+ pes->state = MPEGTS_PESHEADER_FILL;
+ pes->total_size = (pes->header[4] << 8) | pes->header[5];
+ /* NOTE: a zero total size means the PES size is
+ unbounded */
+ if (pes->total_size)
+ pes->total_size += 6;
+ pes->pes_header_size = pes->header[8] + 9;
+ } else {
+ /* otherwise, it should be a table */
+ /* skip packet */
+ skip:
+ pes->state = MPEGTS_SKIP;
+ continue;
+ }
+ }
+ break;
+ /**********************************************/
+ /* PES packing parsing */
+ case MPEGTS_PESHEADER_FILL:
+ len = pes->pes_header_size - pes->data_index;
+ if (len > buf_size)
+ len = buf_size;
+ memcpy(pes->header + pes->data_index, p, len);
+ pes->data_index += len;
+ p += len;
+ buf_size -= len;
+ if (pes->data_index == pes->pes_header_size) {
+ const uint8_t *r;
+ unsigned int flags;
+
+ flags = pes->header[7];
+ r = pes->header + 9;
+ pes->pts = AV_NOPTS_VALUE;
+ pes->dts = AV_NOPTS_VALUE;
+ if ((flags & 0xc0) == 0x80) {
+ pes->pts = get_pts(r);
+ r += 5;
+ } else if ((flags & 0xc0) == 0xc0) {
+ pes->pts = get_pts(r);
+ r += 5;
+ pes->dts = get_pts(r);
+ r += 5;
+ }
+ /* we got the full header. We parse it and get the payload */
+ pes->state = MPEGTS_PAYLOAD;
+ }
+ break;
+ case MPEGTS_PAYLOAD:
+ if (pes->total_size) {
+ len = pes->total_size - pes->data_index;
+ if (len > buf_size)
+ len = buf_size;
+ } else {
+ len = buf_size;
+ }
+ if (len > 0) {
+ AVPacket *pkt = ts->pkt;
+ if (pes->st && av_new_packet(pkt, len) == 0) {
+ memcpy(pkt->data, p, len);
+ pkt->stream_index = pes->st->index;
+ pkt->pts = pes->pts;
+ pkt->dts = pes->dts;
+ /* reset pts values */
+ pes->pts = AV_NOPTS_VALUE;
+ pes->dts = AV_NOPTS_VALUE;
+ ts->stop_parse = 1;
+ return;
+ }
+ }
+ buf_size = 0;
+ break;
+ case MPEGTS_SKIP:
+ buf_size = 0;
+ break;
+ }
+ }
+}
+
+static AVStream* new_pes_av_stream(PESContext *pes, uint32_t code)
+{
+ AVStream *st;
+ int codec_type, codec_id;
+
+ switch(pes->stream_type){
+ case STREAM_TYPE_AUDIO_MPEG1:
+ case STREAM_TYPE_AUDIO_MPEG2:
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_MP3;
+ break;
+ case STREAM_TYPE_VIDEO_MPEG1:
+ case STREAM_TYPE_VIDEO_MPEG2:
+ codec_type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_MPEG2VIDEO;
+ break;
+ case STREAM_TYPE_VIDEO_MPEG4:
+ codec_type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_MPEG4;
+ break;
+ case STREAM_TYPE_VIDEO_H264:
+ codec_type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_H264;
+ break;
+ case STREAM_TYPE_AUDIO_AAC:
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_AAC;
+ break;
+ case STREAM_TYPE_AUDIO_AC3:
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_AC3;
+ break;
+ case STREAM_TYPE_AUDIO_DTS:
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_DTS;
+ break;
+ case STREAM_TYPE_SUBTITLE_DVB:
+ codec_type = CODEC_TYPE_SUBTITLE;
+ codec_id = CODEC_ID_DVB_SUBTITLE;
+ break;
+ default:
+ if (code >= 0x1c0 && code <= 0x1df) {
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_MP2;
+ } else if (code == 0x1bd) {
+ codec_type = CODEC_TYPE_AUDIO;
+ codec_id = CODEC_ID_AC3;
+ } else {
+ codec_type = CODEC_TYPE_VIDEO;
+ codec_id = CODEC_ID_MPEG1VIDEO;
+ }
+ break;
+ }
+ st = av_new_stream(pes->stream, pes->pid);
+ if (st) {
+ av_set_pts_info(st, 33, 1, 90000);
+ st->priv_data = pes;
+ st->codec->codec_type = codec_type;
+ st->codec->codec_id = codec_id;
+ st->need_parsing = 1;
+ pes->st = st;
+ }
+ return st;
+}
+
+
+static PESContext *add_pes_stream(MpegTSContext *ts, int pid, int stream_type)
+{
+ MpegTSFilter *tss;
+ PESContext *pes;
+
+ /* if no pid found, then add a pid context */
+ pes = av_mallocz(sizeof(PESContext));
+ if (!pes)
+ return 0;
+ pes->ts = ts;
+ pes->stream = ts->stream;
+ pes->pid = pid;
+ pes->stream_type = stream_type;
+ tss = mpegts_open_pes_filter(ts, pid, mpegts_push_data, pes);
+ if (!tss) {
+ av_free(pes);
+ return 0;
+ }
+ return pes;
+}
+
+/* handle one TS packet */
+static void handle_packet(MpegTSContext *ts, const uint8_t *packet)
+{
+ AVFormatContext *s = ts->stream;
+ MpegTSFilter *tss;
+ int len, pid, cc, cc_ok, afc, is_start;
+ const uint8_t *p, *p_end;
+
+ pid = ((packet[1] & 0x1f) << 8) | packet[2];
+ is_start = packet[1] & 0x40;
+ tss = ts->pids[pid];
+ if (ts->auto_guess && tss == NULL && is_start) {
+ add_pes_stream(ts, pid, 0);
+ tss = ts->pids[pid];
+ }
+ if (!tss)
+ return;
+
+ /* continuity check (currently not used) */
+ cc = (packet[3] & 0xf);
+ cc_ok = (tss->last_cc < 0) || ((((tss->last_cc + 1) & 0x0f) == cc));
+ tss->last_cc = cc;
+
+ /* skip adaptation field */
+ afc = (packet[3] >> 4) & 3;
+ p = packet + 4;
+ if (afc == 0) /* reserved value */
+ return;
+ if (afc == 2) /* adaptation field only */
+ return;
+ if (afc == 3) {
+ /* skip adapation field */
+ p += p[0] + 1;
+ }
+ /* if past the end of packet, ignore */
+ p_end = packet + TS_PACKET_SIZE;
+ if (p >= p_end)
+ return;
+
+ if (tss->type == MPEGTS_SECTION) {
+ if (is_start) {
+ /* pointer field present */
+ len = *p++;
+ if (p + len > p_end)
+ return;
+ if (len && cc_ok) {
+ /* write remaining section bytes */
+ write_section_data(s, tss,
+ p, len, 0);
+ /* check whether filter has been closed */
+ if (!ts->pids[pid])
+ return;
+ }
+ p += len;
+ if (p < p_end) {
+ write_section_data(s, tss,
+ p, p_end - p, 1);
+ }
+ } else {
+ if (cc_ok) {
+ write_section_data(s, tss,
+ p, p_end - p, 0);
+ }
+ }
+ } else {
+ tss->u.pes_filter.pes_cb(tss->u.pes_filter.opaque,
+ p, p_end - p, is_start);
+ }
+}
+
+/* XXX: try to find a better synchro over several packets (use
+ get_packet_size() ?) */
+static int mpegts_resync(ByteIOContext *pb)
+{
+ int c, i;
+
+ for(i = 0;i < MAX_RESYNC_SIZE; i++) {
+ c = url_fgetc(pb);
+ if (c < 0)
+ return -1;
+ if (c == 0x47) {
+ url_fseek(pb, -1, SEEK_CUR);
+ return 0;
+ }
+ }
+ /* no sync found */
+ return -1;
+}
+
+/* return -1 if error or EOF. Return 0 if OK. */
+static int read_packet(ByteIOContext *pb, uint8_t *buf, int raw_packet_size)
+{
+ int skip, len;
+
+ for(;;) {
+ len = get_buffer(pb, buf, TS_PACKET_SIZE);
+ if (len != TS_PACKET_SIZE)
+ return AVERROR_IO;
+ /* check paquet sync byte */
+ if (buf[0] != 0x47) {
+ /* find a new packet start */
+ url_fseek(pb, -TS_PACKET_SIZE, SEEK_CUR);
+ if (mpegts_resync(pb) < 0)
+ return AVERROR_INVALIDDATA;
+ else
+ continue;
+ } else {
+ skip = raw_packet_size - TS_PACKET_SIZE;
+ if (skip > 0)
+ url_fskip(pb, skip);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int handle_packets(MpegTSContext *ts, int nb_packets)
+{
+ AVFormatContext *s = ts->stream;
+ ByteIOContext *pb = &s->pb;
+ uint8_t packet[TS_PACKET_SIZE];
+ int packet_num, ret;
+
+ ts->stop_parse = 0;
+ packet_num = 0;
+ for(;;) {
+ if (ts->stop_parse)
+ break;
+ packet_num++;
+ if (nb_packets != 0 && packet_num >= nb_packets)
+ break;
+ ret = read_packet(pb, packet, ts->raw_packet_size);
+ if (ret != 0)
+ return ret;
+ handle_packet(ts, packet);
+ }
+ return 0;
+}
+
+static int mpegts_probe(AVProbeData *p)
+{
+#if 1
+ const int size= p->buf_size;
+ int score, fec_score, dvhs_score;
+#define CHECK_COUNT 10
+
+ if (size < (TS_FEC_PACKET_SIZE * CHECK_COUNT))
+ return -1;
+
+ score = analyze(p->buf, TS_PACKET_SIZE *CHECK_COUNT, TS_PACKET_SIZE, NULL);
+ dvhs_score = analyze(p->buf, TS_DVHS_PACKET_SIZE *CHECK_COUNT, TS_DVHS_PACKET_SIZE, NULL);
+ fec_score= analyze(p->buf, TS_FEC_PACKET_SIZE*CHECK_COUNT, TS_FEC_PACKET_SIZE, NULL);
+// av_log(NULL, AV_LOG_DEBUG, "score: %d, dvhs_score: %d, fec_score: %d \n", score, dvhs_score, fec_score);
+
+// we need a clear definition for the returned score otherwise things will become messy sooner or later
+ if (score > fec_score && score > dvhs_score && score > 6) return AVPROBE_SCORE_MAX + score - CHECK_COUNT;
+ else if(dvhs_score > score && dvhs_score > fec_score && dvhs_score > 6) return AVPROBE_SCORE_MAX + dvhs_score - CHECK_COUNT;
+ else if( fec_score > 6) return AVPROBE_SCORE_MAX + fec_score - CHECK_COUNT;
+ else return -1;
+#else
+ /* only use the extension for safer guess */
+ if (match_ext(p->filename, "ts"))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+#endif
+}
+
+static void set_service_cb(void *opaque, int ret)
+{
+ MpegTSContext *ts = opaque;
+ ts->set_service_ret = ret;
+ ts->stop_parse = 1;
+}
+
+/* return the 90 kHz PCR and the extension for the 27 MHz PCR. return
+ (-1) if not available */
+static int parse_pcr(int64_t *ppcr_high, int *ppcr_low,
+ const uint8_t *packet)
+{
+ int afc, len, flags;
+ const uint8_t *p;
+ unsigned int v;
+
+ afc = (packet[3] >> 4) & 3;
+ if (afc <= 1)
+ return -1;
+ p = packet + 4;
+ len = p[0];
+ p++;
+ if (len == 0)
+ return -1;
+ flags = *p++;
+ len--;
+ if (!(flags & 0x10))
+ return -1;
+ if (len < 6)
+ return -1;
+ v = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
+ *ppcr_high = ((int64_t)v << 1) | (p[4] >> 7);
+ *ppcr_low = ((p[4] & 1) << 8) | p[5];
+ return 0;
+}
+
+static int mpegts_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ MpegTSContext *ts = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint8_t buf[1024];
+ int len, sid, i;
+ int64_t pos;
+ MpegTSService *service;
+
+ if (ap) {
+ ts->mpeg2ts_raw = ap->mpeg2ts_raw;
+ ts->mpeg2ts_compute_pcr = ap->mpeg2ts_compute_pcr;
+ }
+
+ /* read the first 1024 bytes to get packet size */
+ pos = url_ftell(pb);
+ len = get_buffer(pb, buf, sizeof(buf));
+ if (len != sizeof(buf))
+ goto fail;
+ ts->raw_packet_size = get_packet_size(buf, sizeof(buf));
+ if (ts->raw_packet_size <= 0)
+ goto fail;
+ ts->stream = s;
+ ts->auto_guess = 0;
+
+goto_auto_guess:
+ if (!ts->mpeg2ts_raw) {
+ /* normal demux */
+
+ if (!ts->auto_guess) {
+ ts->set_service_ret = -1;
+
+ /* first do a scaning to get all the services */
+ url_fseek(pb, pos, SEEK_SET);
+ mpegts_scan_sdt(ts);
+
+ handle_packets(ts, s->probesize);
+
+ if (ts->nb_services <= 0) {
+ /* no SDT found, we try to look at the PAT */
+
+ /* First remove the SDT filters from each PID */
+ int i;
+ for (i=0; i < NB_PID_MAX; i++) {
+ if (ts->pids[i])
+ mpegts_close_filter(ts, ts->pids[i]);
+ }
+ url_fseek(pb, pos, SEEK_SET);
+ mpegts_scan_pat(ts);
+
+ handle_packets(ts, s->probesize);
+ }
+
+ if (ts->nb_services <= 0) {
+ /* raw transport stream */
+ ts->auto_guess = 1;
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+ goto do_pcr;
+ }
+
+ /* tune to first service found */
+ for(i=0; i<ts->nb_services && ts->set_service_ret; i++){
+ service = ts->services[i];
+ sid = service->sid;
+#ifdef DEBUG_SI
+ printf("tuning to '%s'\n", service->name);
+#endif
+
+ /* now find the info for the first service if we found any,
+ otherwise try to filter all PATs */
+
+ url_fseek(pb, pos, SEEK_SET);
+ mpegts_set_service(ts, sid, set_service_cb, ts);
+
+ handle_packets(ts, s->probesize);
+ }
+ /* if could not find service, exit */
+
+ if (ts->set_service_ret != 0) {
+ if(ts->auto_guess)
+ return -1;
+ else {
+ //let's retry with auto_guess set
+ ts->auto_guess = 1;
+ goto goto_auto_guess;
+ }
+ }
+
+#ifdef DEBUG_SI
+ printf("tuning done\n");
+#endif
+ }
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+ } else {
+ AVStream *st;
+ int pcr_pid, pid, nb_packets, nb_pcrs, ret, pcr_l;
+ int64_t pcrs[2], pcr_h;
+ int packet_count[2];
+ uint8_t packet[TS_PACKET_SIZE];
+
+ /* only read packets */
+
+ do_pcr:
+ st = av_new_stream(s, 0);
+ if (!st)
+ goto fail;
+ av_set_pts_info(st, 60, 1, 27000000);
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ st->codec->codec_id = CODEC_ID_MPEG2TS;
+
+ /* we iterate until we find two PCRs to estimate the bitrate */
+ pcr_pid = -1;
+ nb_pcrs = 0;
+ nb_packets = 0;
+ for(;;) {
+ ret = read_packet(&s->pb, packet, ts->raw_packet_size);
+ if (ret < 0)
+ return -1;
+ pid = ((packet[1] & 0x1f) << 8) | packet[2];
+ if ((pcr_pid == -1 || pcr_pid == pid) &&
+ parse_pcr(&pcr_h, &pcr_l, packet) == 0) {
+ pcr_pid = pid;
+ packet_count[nb_pcrs] = nb_packets;
+ pcrs[nb_pcrs] = pcr_h * 300 + pcr_l;
+ nb_pcrs++;
+ if (nb_pcrs >= 2)
+ break;
+ }
+ nb_packets++;
+ }
+ ts->pcr_pid = pcr_pid;
+
+ /* NOTE1: the bitrate is computed without the FEC */
+ /* NOTE2: it is only the bitrate of the start of the stream */
+ ts->pcr_incr = (pcrs[1] - pcrs[0]) / (packet_count[1] - packet_count[0]);
+ ts->cur_pcr = pcrs[0] - ts->pcr_incr * packet_count[0];
+ s->bit_rate = (TS_PACKET_SIZE * 8) * 27e6 / ts->pcr_incr;
+ st->codec->bit_rate = s->bit_rate;
+ st->start_time = ts->cur_pcr;
+#if 0
+ printf("start=%0.3f pcr=%0.3f incr=%d\n",
+ st->start_time / 1000000.0, pcrs[0] / 27e6, ts->pcr_incr);
+#endif
+ }
+
+ url_fseek(pb, pos, SEEK_SET);
+ return 0;
+ fail:
+ return -1;
+}
+
+#define MAX_PACKET_READAHEAD ((128 * 1024) / 188)
+
+static int mpegts_raw_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MpegTSContext *ts = s->priv_data;
+ int ret, i;
+ int64_t pcr_h, next_pcr_h, pos;
+ int pcr_l, next_pcr_l;
+ uint8_t pcr_buf[12];
+
+ if (av_new_packet(pkt, TS_PACKET_SIZE) < 0)
+ return -ENOMEM;
+ pkt->pos= url_ftell(&s->pb);
+ ret = read_packet(&s->pb, pkt->data, ts->raw_packet_size);
+ if (ret < 0) {
+ av_free_packet(pkt);
+ return ret;
+ }
+ if (ts->mpeg2ts_compute_pcr) {
+ /* compute exact PCR for each packet */
+ if (parse_pcr(&pcr_h, &pcr_l, pkt->data) == 0) {
+ /* we read the next PCR (XXX: optimize it by using a bigger buffer */
+ pos = url_ftell(&s->pb);
+ for(i = 0; i < MAX_PACKET_READAHEAD; i++) {
+ url_fseek(&s->pb, pos + i * ts->raw_packet_size, SEEK_SET);
+ get_buffer(&s->pb, pcr_buf, 12);
+ if (parse_pcr(&next_pcr_h, &next_pcr_l, pcr_buf) == 0) {
+ /* XXX: not precise enough */
+ ts->pcr_incr = ((next_pcr_h - pcr_h) * 300 + (next_pcr_l - pcr_l)) /
+ (i + 1);
+ break;
+ }
+ }
+ url_fseek(&s->pb, pos, SEEK_SET);
+ /* no next PCR found: we use previous increment */
+ ts->cur_pcr = pcr_h * 300 + pcr_l;
+ }
+ pkt->pts = ts->cur_pcr;
+ pkt->duration = ts->pcr_incr;
+ ts->cur_pcr += ts->pcr_incr;
+ }
+ pkt->stream_index = 0;
+ return 0;
+}
+
+static int mpegts_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ MpegTSContext *ts = s->priv_data;
+
+ if (!ts->mpeg2ts_raw) {
+ ts->pkt = pkt;
+ return handle_packets(ts, 0);
+ } else {
+ return mpegts_raw_read_packet(s, pkt);
+ }
+}
+
+static int mpegts_read_close(AVFormatContext *s)
+{
+ MpegTSContext *ts = s->priv_data;
+ int i;
+ for(i=0;i<NB_PID_MAX;i++)
+ if (ts->pids[i]) mpegts_close_filter(ts, ts->pids[i]);
+
+ for(i = 0; i < ts->nb_services; i++){
+ av_free(ts->services[i]->provider_name);
+ av_free(ts->services[i]->name);
+ av_free(ts->services[i]);
+ }
+ av_freep(&ts->services);
+
+ return 0;
+}
+
+static int64_t mpegts_get_pcr(AVFormatContext *s, int stream_index,
+ int64_t *ppos, int64_t pos_limit)
+{
+ MpegTSContext *ts = s->priv_data;
+ int64_t pos, timestamp;
+ uint8_t buf[TS_PACKET_SIZE];
+ int pcr_l, pid;
+ const int find_next= 1;
+ pos = ((*ppos + ts->raw_packet_size - 1) / ts->raw_packet_size) * ts->raw_packet_size;
+ if (find_next) {
+ for(;;) {
+ url_fseek(&s->pb, pos, SEEK_SET);
+ if (get_buffer(&s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
+ return AV_NOPTS_VALUE;
+ pid = ((buf[1] & 0x1f) << 8) | buf[2];
+ if (pid == ts->pcr_pid &&
+ parse_pcr(&timestamp, &pcr_l, buf) == 0) {
+ break;
+ }
+ pos += ts->raw_packet_size;
+ }
+ } else {
+ for(;;) {
+ pos -= ts->raw_packet_size;
+ if (pos < 0)
+ return AV_NOPTS_VALUE;
+ url_fseek(&s->pb, pos, SEEK_SET);
+ if (get_buffer(&s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
+ return AV_NOPTS_VALUE;
+ pid = ((buf[1] & 0x1f) << 8) | buf[2];
+ if (pid == ts->pcr_pid &&
+ parse_pcr(&timestamp, &pcr_l, buf) == 0) {
+ break;
+ }
+ }
+ }
+ *ppos = pos;
+
+ return timestamp;
+}
+
+static int read_seek(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
+ MpegTSContext *ts = s->priv_data;
+ uint8_t buf[TS_PACKET_SIZE];
+ int64_t pos;
+
+ if(av_seek_frame_binary(s, stream_index, target_ts, flags) < 0)
+ return -1;
+
+ pos= url_ftell(&s->pb);
+
+ for(;;) {
+ url_fseek(&s->pb, pos, SEEK_SET);
+ if (get_buffer(&s->pb, buf, TS_PACKET_SIZE) != TS_PACKET_SIZE)
+ return -1;
+// pid = ((buf[1] & 0x1f) << 8) | buf[2];
+ if(buf[1] & 0x40) break;
+ pos += ts->raw_packet_size;
+ }
+ url_fseek(&s->pb, pos, SEEK_SET);
+
+ return 0;
+}
+
+/**************************************************************/
+/* parsing functions - called from other demuxers such as RTP */
+
+MpegTSContext *mpegts_parse_open(AVFormatContext *s)
+{
+ MpegTSContext *ts;
+
+ ts = av_mallocz(sizeof(MpegTSContext));
+ if (!ts)
+ return NULL;
+ /* no stream case, currently used by RTP */
+ ts->raw_packet_size = TS_PACKET_SIZE;
+ ts->stream = s;
+ ts->auto_guess = 1;
+ return ts;
+}
+
+/* return the consumed length if a packet was output, or -1 if no
+ packet is output */
+int mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
+ const uint8_t *buf, int len)
+{
+ int len1;
+
+ len1 = len;
+ ts->pkt = pkt;
+ ts->stop_parse = 0;
+ for(;;) {
+ if (ts->stop_parse)
+ break;
+ if (len < TS_PACKET_SIZE)
+ return -1;
+ if (buf[0] != 0x47) {
+ buf++;
+ len--;
+ } else {
+ handle_packet(ts, buf);
+ buf += TS_PACKET_SIZE;
+ len -= TS_PACKET_SIZE;
+ }
+ }
+ return len1 - len;
+}
+
+void mpegts_parse_close(MpegTSContext *ts)
+{
+ int i;
+
+ for(i=0;i<NB_PID_MAX;i++)
+ av_free(ts->pids[i]);
+ av_free(ts);
+}
+
+AVInputFormat mpegts_demuxer = {
+ "mpegts",
+ "MPEG2 transport stream format",
+ sizeof(MpegTSContext),
+ mpegts_probe,
+ mpegts_read_header,
+ mpegts_read_packet,
+ mpegts_read_close,
+ read_seek,
+ mpegts_get_pcr,
+ .flags = AVFMT_SHOW_IDS,
+};
diff --git a/contrib/ffmpeg/libavformat/mpegts.h b/contrib/ffmpeg/libavformat/mpegts.h
new file mode 100644
index 000000000..b3eb3cda7
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpegts.h
@@ -0,0 +1,63 @@
+/*
+ * MPEG2 transport stream defines
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define TS_FEC_PACKET_SIZE 204
+#define TS_DVHS_PACKET_SIZE 192
+#define TS_PACKET_SIZE 188
+#define NB_PID_MAX 8192
+#define MAX_SECTION_SIZE 4096
+
+/* pids */
+#define PAT_PID 0x0000
+#define SDT_PID 0x0011
+
+/* table ids */
+#define PAT_TID 0x00
+#define PMT_TID 0x02
+#define SDT_TID 0x42
+
+/* descriptor ids */
+#define DVB_SUBT_DESCID 0x59
+
+#define STREAM_TYPE_VIDEO_MPEG1 0x01
+#define STREAM_TYPE_VIDEO_MPEG2 0x02
+#define STREAM_TYPE_AUDIO_MPEG1 0x03
+#define STREAM_TYPE_AUDIO_MPEG2 0x04
+#define STREAM_TYPE_PRIVATE_SECTION 0x05
+#define STREAM_TYPE_PRIVATE_DATA 0x06
+#define STREAM_TYPE_AUDIO_AAC 0x0f
+#define STREAM_TYPE_VIDEO_MPEG4 0x10
+#define STREAM_TYPE_VIDEO_H264 0x1b
+
+#define STREAM_TYPE_AUDIO_AC3 0x81
+#define STREAM_TYPE_AUDIO_DTS 0x8a
+
+#define STREAM_TYPE_SUBTITLE_DVB 0x100
+
+unsigned int mpegts_crc32(const uint8_t *data, int len);
+extern AVOutputFormat mpegts_muxer;
+
+typedef struct MpegTSContext MpegTSContext;
+
+MpegTSContext *mpegts_parse_open(AVFormatContext *s);
+int mpegts_parse_packet(MpegTSContext *ts, AVPacket *pkt,
+ const uint8_t *buf, int len);
+void mpegts_parse_close(MpegTSContext *ts);
diff --git a/contrib/ffmpeg/libavformat/mpegtsenc.c b/contrib/ffmpeg/libavformat/mpegtsenc.c
new file mode 100644
index 000000000..39868bea4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpegtsenc.c
@@ -0,0 +1,676 @@
+/*
+ * MPEG2 transport stream (aka DVB) muxer
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "crc.h"
+#include "mpegts.h"
+
+/* write DVB SI sections */
+
+/*********************************************/
+/* mpegts section writer */
+
+typedef struct MpegTSSection {
+ int pid;
+ int cc;
+ void (*write_packet)(struct MpegTSSection *s, const uint8_t *packet);
+ void *opaque;
+} MpegTSSection;
+
+/* NOTE: 4 bytes must be left at the end for the crc32 */
+static void mpegts_write_section(MpegTSSection *s, uint8_t *buf, int len)
+{
+ unsigned int crc;
+ unsigned char packet[TS_PACKET_SIZE];
+ const unsigned char *buf_ptr;
+ unsigned char *q;
+ int first, b, len1, left;
+
+ crc = bswap_32(av_crc(av_crc04C11DB7, -1, buf, len - 4));
+ buf[len - 4] = (crc >> 24) & 0xff;
+ buf[len - 3] = (crc >> 16) & 0xff;
+ buf[len - 2] = (crc >> 8) & 0xff;
+ buf[len - 1] = (crc) & 0xff;
+
+ /* send each packet */
+ buf_ptr = buf;
+ while (len > 0) {
+ first = (buf == buf_ptr);
+ q = packet;
+ *q++ = 0x47;
+ b = (s->pid >> 8);
+ if (first)
+ b |= 0x40;
+ *q++ = b;
+ *q++ = s->pid;
+ s->cc = (s->cc + 1) & 0xf;
+ *q++ = 0x10 | s->cc;
+ if (first)
+ *q++ = 0; /* 0 offset */
+ len1 = TS_PACKET_SIZE - (q - packet);
+ if (len1 > len)
+ len1 = len;
+ memcpy(q, buf_ptr, len1);
+ q += len1;
+ /* add known padding data */
+ left = TS_PACKET_SIZE - (q - packet);
+ if (left > 0)
+ memset(q, 0xff, left);
+
+ s->write_packet(s, packet);
+
+ buf_ptr += len1;
+ len -= len1;
+ }
+}
+
+static inline void put16(uint8_t **q_ptr, int val)
+{
+ uint8_t *q;
+ q = *q_ptr;
+ *q++ = val >> 8;
+ *q++ = val;
+ *q_ptr = q;
+}
+
+static int mpegts_write_section1(MpegTSSection *s, int tid, int id,
+ int version, int sec_num, int last_sec_num,
+ uint8_t *buf, int len)
+{
+ uint8_t section[1024], *q;
+ unsigned int tot_len;
+
+ tot_len = 3 + 5 + len + 4;
+ /* check if not too big */
+ if (tot_len > 1024)
+ return -1;
+
+ q = section;
+ *q++ = tid;
+ put16(&q, 0xb000 | (len + 5 + 4)); /* 5 byte header + 4 byte CRC */
+ put16(&q, id);
+ *q++ = 0xc1 | (version << 1); /* current_next_indicator = 1 */
+ *q++ = sec_num;
+ *q++ = last_sec_num;
+ memcpy(q, buf, len);
+
+ mpegts_write_section(s, section, tot_len);
+ return 0;
+}
+
+/*********************************************/
+/* mpegts writer */
+
+#define DEFAULT_PMT_START_PID 0x1000
+#define DEFAULT_START_PID 0x0100
+#define DEFAULT_PROVIDER_NAME "FFmpeg"
+#define DEFAULT_SERVICE_NAME "Service01"
+
+/* default network id, transport stream and service identifiers */
+#define DEFAULT_ONID 0x0001
+#define DEFAULT_TSID 0x0001
+#define DEFAULT_SID 0x0001
+
+/* a PES packet header is generated every DEFAULT_PES_HEADER_FREQ packets */
+#define DEFAULT_PES_HEADER_FREQ 16
+#define DEFAULT_PES_PAYLOAD_SIZE ((DEFAULT_PES_HEADER_FREQ - 1) * 184 + 170)
+
+/* we retransmit the SI info at this rate */
+#define SDT_RETRANS_TIME 500
+#define PAT_RETRANS_TIME 100
+#define PCR_RETRANS_TIME 20
+
+typedef struct MpegTSWriteStream {
+ struct MpegTSService *service;
+ int pid; /* stream associated pid */
+ int cc;
+ int payload_index;
+ int64_t payload_pts;
+ uint8_t payload[DEFAULT_PES_PAYLOAD_SIZE];
+} MpegTSWriteStream;
+
+typedef struct MpegTSService {
+ MpegTSSection pmt; /* MPEG2 pmt table context */
+ int sid; /* service ID */
+ char *name;
+ char *provider_name;
+ int pcr_pid;
+ int pcr_packet_count;
+ int pcr_packet_freq;
+} MpegTSService;
+
+typedef struct MpegTSWrite {
+ MpegTSSection pat; /* MPEG2 pat table */
+ MpegTSSection sdt; /* MPEG2 sdt table context */
+ MpegTSService **services;
+ int sdt_packet_count;
+ int sdt_packet_freq;
+ int pat_packet_count;
+ int pat_packet_freq;
+ int nb_services;
+ int onid;
+ int tsid;
+} MpegTSWrite;
+
+static void mpegts_write_pat(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ MpegTSService *service;
+ uint8_t data[1012], *q;
+ int i;
+
+ q = data;
+ for(i = 0; i < ts->nb_services; i++) {
+ service = ts->services[i];
+ put16(&q, service->sid);
+ put16(&q, 0xe000 | service->pmt.pid);
+ }
+ mpegts_write_section1(&ts->pat, PAT_TID, ts->tsid, 0, 0, 0,
+ data, q - data);
+}
+
+static void mpegts_write_pmt(AVFormatContext *s, MpegTSService *service)
+{
+ // MpegTSWrite *ts = s->priv_data;
+ uint8_t data[1012], *q, *desc_length_ptr, *program_info_length_ptr;
+ int val, stream_type, i;
+
+ q = data;
+ put16(&q, 0xe000 | service->pcr_pid);
+
+ program_info_length_ptr = q;
+ q += 2; /* patched after */
+
+ /* put program info here */
+
+ val = 0xf000 | (q - program_info_length_ptr - 2);
+ program_info_length_ptr[0] = val >> 8;
+ program_info_length_ptr[1] = val;
+
+ for(i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+ MpegTSWriteStream *ts_st = st->priv_data;
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MPEG1VIDEO:
+ case CODEC_ID_MPEG2VIDEO:
+ stream_type = STREAM_TYPE_VIDEO_MPEG2;
+ break;
+ case CODEC_ID_MPEG4:
+ stream_type = STREAM_TYPE_VIDEO_MPEG4;
+ break;
+ case CODEC_ID_H264:
+ stream_type = STREAM_TYPE_VIDEO_H264;
+ break;
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ stream_type = STREAM_TYPE_AUDIO_MPEG1;
+ break;
+ case CODEC_ID_AAC:
+ stream_type = STREAM_TYPE_AUDIO_AAC;
+ break;
+ case CODEC_ID_AC3:
+ stream_type = STREAM_TYPE_AUDIO_AC3;
+ break;
+ default:
+ stream_type = STREAM_TYPE_PRIVATE_DATA;
+ break;
+ }
+ *q++ = stream_type;
+ put16(&q, 0xe000 | ts_st->pid);
+ desc_length_ptr = q;
+ q += 2; /* patched after */
+
+ /* write optional descriptors here */
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if (strlen(st->language) == 3) {
+ *q++ = 0x0a; /* ISO 639 language descriptor */
+ *q++ = 4;
+ *q++ = st->language[0];
+ *q++ = st->language[1];
+ *q++ = st->language[2];
+ *q++ = 0; /* undefined type */
+ }
+ break;
+ case CODEC_TYPE_SUBTITLE:
+ {
+ const char *language;
+ language = st->language;
+ if (strlen(language) != 3)
+ language = "eng";
+ *q++ = 0x59;
+ *q++ = 8;
+ *q++ = language[0];
+ *q++ = language[1];
+ *q++ = language[2];
+ *q++ = 0x10; /* normal subtitles (0x20 = if hearing pb) */
+ put16(&q, 1); /* page id */
+ put16(&q, 1); /* ancillary page id */
+ }
+ break;
+ }
+
+ val = 0xf000 | (q - desc_length_ptr - 2);
+ desc_length_ptr[0] = val >> 8;
+ desc_length_ptr[1] = val;
+ }
+ mpegts_write_section1(&service->pmt, PMT_TID, service->sid, 0, 0, 0,
+ data, q - data);
+}
+
+/* NOTE: str == NULL is accepted for an empty string */
+static void putstr8(uint8_t **q_ptr, const char *str)
+{
+ uint8_t *q;
+ int len;
+
+ q = *q_ptr;
+ if (!str)
+ len = 0;
+ else
+ len = strlen(str);
+ *q++ = len;
+ memcpy(q, str, len);
+ q += len;
+ *q_ptr = q;
+}
+
+static void mpegts_write_sdt(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ MpegTSService *service;
+ uint8_t data[1012], *q, *desc_list_len_ptr, *desc_len_ptr;
+ int i, running_status, free_ca_mode, val;
+
+ q = data;
+ put16(&q, ts->onid);
+ *q++ = 0xff;
+ for(i = 0; i < ts->nb_services; i++) {
+ service = ts->services[i];
+ put16(&q, service->sid);
+ *q++ = 0xfc | 0x00; /* currently no EIT info */
+ desc_list_len_ptr = q;
+ q += 2;
+ running_status = 4; /* running */
+ free_ca_mode = 0;
+
+ /* write only one descriptor for the service name and provider */
+ *q++ = 0x48;
+ desc_len_ptr = q;
+ q++;
+ *q++ = 0x01; /* digital television service */
+ putstr8(&q, service->provider_name);
+ putstr8(&q, service->name);
+ desc_len_ptr[0] = q - desc_len_ptr - 1;
+
+ /* fill descriptor length */
+ val = (running_status << 13) | (free_ca_mode << 12) |
+ (q - desc_list_len_ptr - 2);
+ desc_list_len_ptr[0] = val >> 8;
+ desc_list_len_ptr[1] = val;
+ }
+ mpegts_write_section1(&ts->sdt, SDT_TID, ts->tsid, 0, 0, 0,
+ data, q - data);
+}
+
+static MpegTSService *mpegts_add_service(MpegTSWrite *ts,
+ int sid,
+ const char *provider_name,
+ const char *name)
+{
+ MpegTSService *service;
+
+ service = av_mallocz(sizeof(MpegTSService));
+ if (!service)
+ return NULL;
+ service->pmt.pid = DEFAULT_PMT_START_PID + ts->nb_services - 1;
+ service->sid = sid;
+ service->provider_name = av_strdup(provider_name);
+ service->name = av_strdup(name);
+ service->pcr_pid = 0x1fff;
+ dynarray_add(&ts->services, &ts->nb_services, service);
+ return service;
+}
+
+static void section_write_packet(MpegTSSection *s, const uint8_t *packet)
+{
+ AVFormatContext *ctx = s->opaque;
+ put_buffer(&ctx->pb, packet, TS_PACKET_SIZE);
+}
+
+static int mpegts_write_header(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ MpegTSWriteStream *ts_st;
+ MpegTSService *service;
+ AVStream *st;
+ int i, total_bit_rate;
+ const char *service_name;
+
+ ts->tsid = DEFAULT_TSID;
+ ts->onid = DEFAULT_ONID;
+ /* allocate a single DVB service */
+ service_name = s->title;
+ if (service_name[0] == '\0')
+ service_name = DEFAULT_SERVICE_NAME;
+ service = mpegts_add_service(ts, DEFAULT_SID,
+ DEFAULT_PROVIDER_NAME, service_name);
+ service->pmt.write_packet = section_write_packet;
+ service->pmt.opaque = s;
+
+ ts->pat.pid = PAT_PID;
+ ts->pat.cc = 0;
+ ts->pat.write_packet = section_write_packet;
+ ts->pat.opaque = s;
+
+ ts->sdt.pid = SDT_PID;
+ ts->sdt.cc = 0;
+ ts->sdt.write_packet = section_write_packet;
+ ts->sdt.opaque = s;
+
+ /* assign pids to each stream */
+ total_bit_rate = 0;
+ for(i = 0;i < s->nb_streams; i++) {
+ st = s->streams[i];
+ ts_st = av_mallocz(sizeof(MpegTSWriteStream));
+ if (!ts_st)
+ goto fail;
+ st->priv_data = ts_st;
+ ts_st->service = service;
+ ts_st->pid = DEFAULT_START_PID + i;
+ ts_st->payload_pts = AV_NOPTS_VALUE;
+ /* update PCR pid by using the first video stream */
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO &&
+ service->pcr_pid == 0x1fff)
+ service->pcr_pid = ts_st->pid;
+ total_bit_rate += st->codec->bit_rate;
+ }
+
+ /* if no video stream, use the first stream as PCR */
+ if (service->pcr_pid == 0x1fff && s->nb_streams > 0) {
+ ts_st = s->streams[0]->priv_data;
+ service->pcr_pid = ts_st->pid;
+ }
+
+ if (total_bit_rate <= 8 * 1024)
+ total_bit_rate = 8 * 1024;
+ service->pcr_packet_freq = (total_bit_rate * PCR_RETRANS_TIME) /
+ (TS_PACKET_SIZE * 8 * 1000);
+ ts->sdt_packet_freq = (total_bit_rate * SDT_RETRANS_TIME) /
+ (TS_PACKET_SIZE * 8 * 1000);
+ ts->pat_packet_freq = (total_bit_rate * PAT_RETRANS_TIME) /
+ (TS_PACKET_SIZE * 8 * 1000);
+#if 0
+ printf("%d %d %d\n",
+ total_bit_rate, ts->sdt_packet_freq, ts->pat_packet_freq);
+#endif
+
+ /* write info at the start of the file, so that it will be fast to
+ find them */
+ mpegts_write_sdt(s);
+ mpegts_write_pat(s);
+ for(i = 0; i < ts->nb_services; i++) {
+ mpegts_write_pmt(s, ts->services[i]);
+ }
+ put_flush_packet(&s->pb);
+
+ return 0;
+
+ fail:
+ for(i = 0;i < s->nb_streams; i++) {
+ st = s->streams[i];
+ av_free(st->priv_data);
+ }
+ return -1;
+}
+
+/* send SDT, PAT and PMT tables regulary */
+static void retransmit_si_info(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ int i;
+
+ if (++ts->sdt_packet_count == ts->sdt_packet_freq) {
+ ts->sdt_packet_count = 0;
+ mpegts_write_sdt(s);
+ }
+ if (++ts->pat_packet_count == ts->pat_packet_freq) {
+ ts->pat_packet_count = 0;
+ mpegts_write_pat(s);
+ for(i = 0; i < ts->nb_services; i++) {
+ mpegts_write_pmt(s, ts->services[i]);
+ }
+ }
+}
+
+/* NOTE: pes_data contains all the PES packet */
+static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
+ const uint8_t *payload, int payload_size,
+ int64_t pts)
+{
+ MpegTSWriteStream *ts_st = st->priv_data;
+ uint8_t buf[TS_PACKET_SIZE];
+ uint8_t *q;
+ int val, is_start, len, header_len, write_pcr, private_code;
+ int afc_len, stuffing_len;
+ int64_t pcr = -1; /* avoid warning */
+
+ is_start = 1;
+ while (payload_size > 0) {
+ retransmit_si_info(s);
+
+ write_pcr = 0;
+ if (ts_st->pid == ts_st->service->pcr_pid) {
+ ts_st->service->pcr_packet_count++;
+ if (ts_st->service->pcr_packet_count >=
+ ts_st->service->pcr_packet_freq) {
+ ts_st->service->pcr_packet_count = 0;
+ write_pcr = 1;
+ /* XXX: this is incorrect, but at least we have a PCR
+ value */
+ pcr = pts;
+ }
+ }
+
+ /* prepare packet header */
+ q = buf;
+ *q++ = 0x47;
+ val = (ts_st->pid >> 8);
+ if (is_start)
+ val |= 0x40;
+ *q++ = val;
+ *q++ = ts_st->pid;
+ *q++ = 0x10 | ts_st->cc | (write_pcr ? 0x20 : 0);
+ ts_st->cc = (ts_st->cc + 1) & 0xf;
+ if (write_pcr) {
+ *q++ = 7; /* AFC length */
+ *q++ = 0x10; /* flags: PCR present */
+ *q++ = pcr >> 25;
+ *q++ = pcr >> 17;
+ *q++ = pcr >> 9;
+ *q++ = pcr >> 1;
+ *q++ = (pcr & 1) << 7;
+ *q++ = 0;
+ }
+ if (is_start) {
+ /* write PES header */
+ *q++ = 0x00;
+ *q++ = 0x00;
+ *q++ = 0x01;
+ private_code = 0;
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ *q++ = 0xe0;
+ } else if (st->codec->codec_type == CODEC_TYPE_AUDIO &&
+ (st->codec->codec_id == CODEC_ID_MP2 ||
+ st->codec->codec_id == CODEC_ID_MP3)) {
+ *q++ = 0xc0;
+ } else {
+ *q++ = 0xbd;
+ if (st->codec->codec_type == CODEC_TYPE_SUBTITLE) {
+ private_code = 0x20;
+ }
+ }
+ if (pts != AV_NOPTS_VALUE)
+ header_len = 8;
+ else
+ header_len = 3;
+ if (private_code != 0)
+ header_len++;
+ len = payload_size + header_len;
+ *q++ = len >> 8;
+ *q++ = len;
+ val = 0x80;
+ /* data alignment indicator is required for subtitle data */
+ if (st->codec->codec_type == CODEC_TYPE_SUBTITLE)
+ val |= 0x04;
+ *q++ = val;
+ if (pts != AV_NOPTS_VALUE) {
+ *q++ = 0x80; /* PTS only */
+ *q++ = 0x05; /* header len */
+ val = (0x02 << 4) |
+ (((pts >> 30) & 0x07) << 1) | 1;
+ *q++ = val;
+ val = (((pts >> 15) & 0x7fff) << 1) | 1;
+ *q++ = val >> 8;
+ *q++ = val;
+ val = (((pts) & 0x7fff) << 1) | 1;
+ *q++ = val >> 8;
+ *q++ = val;
+ } else {
+ *q++ = 0x00;
+ *q++ = 0x00;
+ }
+ if (private_code != 0)
+ *q++ = private_code;
+ is_start = 0;
+ }
+ /* header size */
+ header_len = q - buf;
+ /* data len */
+ len = TS_PACKET_SIZE - header_len;
+ if (len > payload_size)
+ len = payload_size;
+ stuffing_len = TS_PACKET_SIZE - header_len - len;
+ if (stuffing_len > 0) {
+ /* add stuffing with AFC */
+ if (buf[3] & 0x20) {
+ /* stuffing already present: increase its size */
+ afc_len = buf[4] + 1;
+ memmove(buf + 4 + afc_len + stuffing_len,
+ buf + 4 + afc_len,
+ header_len - (4 + afc_len));
+ buf[4] += stuffing_len;
+ memset(buf + 4 + afc_len, 0xff, stuffing_len);
+ } else {
+ /* add stuffing */
+ memmove(buf + 4 + stuffing_len, buf + 4, header_len - 4);
+ buf[3] |= 0x20;
+ buf[4] = stuffing_len - 1;
+ if (stuffing_len >= 2) {
+ buf[5] = 0x00;
+ memset(buf + 6, 0xff, stuffing_len - 2);
+ }
+ }
+ }
+ memcpy(buf + TS_PACKET_SIZE - len, payload, len);
+ payload += len;
+ payload_size -= len;
+ put_buffer(&s->pb, buf, TS_PACKET_SIZE);
+ }
+ put_flush_packet(&s->pb);
+}
+
+static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVStream *st = s->streams[pkt->stream_index];
+ int size= pkt->size;
+ uint8_t *buf= pkt->data;
+ MpegTSWriteStream *ts_st = st->priv_data;
+ int len, max_payload_size;
+
+ if (st->codec->codec_type == CODEC_TYPE_SUBTITLE) {
+ /* for subtitle, a single PES packet must be generated */
+ mpegts_write_pes(s, st, buf, size, pkt->pts);
+ return 0;
+ }
+
+ max_payload_size = DEFAULT_PES_PAYLOAD_SIZE;
+ while (size > 0) {
+ len = max_payload_size - ts_st->payload_index;
+ if (len > size)
+ len = size;
+ memcpy(ts_st->payload + ts_st->payload_index, buf, len);
+ buf += len;
+ size -= len;
+ ts_st->payload_index += len;
+ if (ts_st->payload_pts == AV_NOPTS_VALUE)
+ ts_st->payload_pts = pkt->pts;
+ if (ts_st->payload_index >= max_payload_size) {
+ mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index,
+ ts_st->payload_pts);
+ ts_st->payload_pts = AV_NOPTS_VALUE;
+ ts_st->payload_index = 0;
+ }
+ }
+ return 0;
+}
+
+static int mpegts_write_end(AVFormatContext *s)
+{
+ MpegTSWrite *ts = s->priv_data;
+ MpegTSWriteStream *ts_st;
+ MpegTSService *service;
+ AVStream *st;
+ int i;
+
+ /* flush current packets */
+ for(i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ ts_st = st->priv_data;
+ if (ts_st->payload_index > 0) {
+ mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index,
+ ts_st->payload_pts);
+ }
+ }
+ put_flush_packet(&s->pb);
+
+ for(i = 0; i < ts->nb_services; i++) {
+ service = ts->services[i];
+ av_freep(&service->provider_name);
+ av_freep(&service->name);
+ av_free(service);
+ }
+ av_free(ts->services);
+
+ return 0;
+}
+
+AVOutputFormat mpegts_muxer = {
+ "mpegts",
+ "MPEG2 transport stream format",
+ "video/x-mpegts",
+ "ts",
+ sizeof(MpegTSWrite),
+ CODEC_ID_MP2,
+ CODEC_ID_MPEG2VIDEO,
+ mpegts_write_header,
+ mpegts_write_packet,
+ mpegts_write_end,
+};
diff --git a/contrib/ffmpeg/libavformat/mpjpeg.c b/contrib/ffmpeg/libavformat/mpjpeg.c
new file mode 100644
index 000000000..937917313
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mpjpeg.c
@@ -0,0 +1,67 @@
+/*
+ * Multipart JPEG format
+ * Copyright (c) 2000, 2001, 2002, 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+/* Multipart JPEG */
+
+#define BOUNDARY_TAG "ffserver"
+
+static int mpjpeg_write_header(AVFormatContext *s)
+{
+ uint8_t buf1[256];
+
+ snprintf(buf1, sizeof(buf1), "--%s\n", BOUNDARY_TAG);
+ put_buffer(&s->pb, buf1, strlen(buf1));
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int mpjpeg_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ uint8_t buf1[256];
+
+ snprintf(buf1, sizeof(buf1), "Content-type: image/jpeg\n\n");
+ put_buffer(&s->pb, buf1, strlen(buf1));
+ put_buffer(&s->pb, pkt->data, pkt->size);
+
+ snprintf(buf1, sizeof(buf1), "\n--%s\n", BOUNDARY_TAG);
+ put_buffer(&s->pb, buf1, strlen(buf1));
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int mpjpeg_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVOutputFormat mpjpeg_muxer = {
+ "mpjpeg",
+ "Mime multipart JPEG format",
+ "multipart/x-mixed-replace;boundary=" BOUNDARY_TAG,
+ "mjpg",
+ 0,
+ CODEC_ID_NONE,
+ CODEC_ID_MJPEG,
+ mpjpeg_write_header,
+ mpjpeg_write_packet,
+ mpjpeg_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/mtv.c b/contrib/ffmpeg/libavformat/mtv.c
new file mode 100644
index 000000000..7a68ea97f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mtv.c
@@ -0,0 +1,187 @@
+/*
+ * mtv demuxer
+ * Copyright (c) 2006 Reynaldo H. Verdejo Pinochet
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file mtv.c
+ * MTV demuxer.
+ */
+
+#include "avformat.h"
+#include "bswap.h"
+
+#define MTV_ASUBCHUNK_DATA_SIZE 500
+#define MTV_HEADER_SIZE 512
+#define MTV_AUDIO_PADDING_SIZE 12
+#define AUDIO_SAMPLING_RATE 44100
+#define VIDEO_SID 0
+#define AUDIO_SID 1
+
+typedef struct MTVDemuxContext {
+
+ unsigned int file_size; ///< filesize, not always right
+ unsigned int segments; ///< number of 512 byte segments
+ unsigned int audio_identifier; ///< 'MP3' on all files I have seen
+ unsigned int audio_br; ///< bitrate of audio chanel (mp3)
+ unsigned int img_colorfmt; ///< frame colorfmt rgb 565/555
+ unsigned int img_bpp; ///< frame bits per pixel
+ unsigned int img_width; //
+ unsigned int img_height; //
+ unsigned int img_segment_size; ///< size of image segment
+ unsigned int video_fps; //
+ unsigned int audio_subsegments; ///< audio subsegments on one segment
+
+ uint8_t audio_packet_count;
+
+} MTVDemuxContext;
+
+static int mtv_probe(AVProbeData *p)
+{
+ if(p->buf_size < 3)
+ return 0;
+
+ /* Magic is 'AMV' */
+
+ if(*(p->buf) != 'A' || *(p->buf+1) != 'M' || *(p->buf+2) != 'V')
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int mtv_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ MTVDemuxContext *mtv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+
+
+ url_fskip(pb, 3);
+ mtv->file_size = get_le32(pb);
+ mtv->segments = get_le32(pb);
+ url_fskip(pb, 32);
+ mtv->audio_identifier = get_le24(pb);
+ mtv->audio_br = get_le16(pb);
+ mtv->img_colorfmt = get_le24(pb);
+ mtv->img_bpp = get_byte(pb);
+ mtv->img_width = get_le16(pb);
+ mtv->img_height = get_le16(pb);
+ mtv->img_segment_size = get_le16(pb);
+ url_fskip(pb, 4);
+ mtv->audio_subsegments = get_le16(pb);
+ mtv->video_fps = (mtv->audio_br / 4) / mtv->audio_subsegments;
+
+ /* FIXME Add sanity check here */
+
+ /* first packet is allways audio*/
+
+ mtv->audio_packet_count = 1;
+
+ /* all systems go! init decoders */
+
+ /* video - raw rgb565 */
+
+ st = av_new_stream(s, VIDEO_SID);
+ if(!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 64, 1, mtv->video_fps);
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->codec_tag = MKTAG('R', 'G', 'B', mtv->img_bpp);
+ st->codec->width = mtv->img_width;
+ st->codec->height = mtv->img_height;
+ st->codec->bits_per_sample = mtv->img_bpp;
+ st->codec->sample_rate = mtv->video_fps;
+
+ /* audio - mp3 */
+
+ st = av_new_stream(s, AUDIO_SID);
+ if(!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 64, 1, AUDIO_SAMPLING_RATE);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_MP3;
+ st->codec->bit_rate = mtv->audio_br;
+ st->need_parsing=1;
+
+ /* Jump over header */
+
+ if(url_fseek(pb, MTV_HEADER_SIZE, SEEK_SET) != MTV_HEADER_SIZE)
+ return AVERROR_IO;
+
+ return(0);
+
+}
+
+static int mtv_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ MTVDemuxContext *mtv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret;
+#ifndef WORDS_BIGENDIAN
+ int i;
+#endif
+
+ ret = 0;
+
+ if(mtv->audio_subsegments >= mtv->audio_packet_count)
+ {
+ url_fskip(pb, MTV_AUDIO_PADDING_SIZE);
+
+ ret = av_get_packet(pb, pkt, MTV_ASUBCHUNK_DATA_SIZE);
+ if(ret != MTV_ASUBCHUNK_DATA_SIZE)
+ return AVERROR_IO;
+
+ mtv->audio_packet_count++;
+ pkt->stream_index = AUDIO_SID;
+
+ }else
+ {
+ ret = av_get_packet(pb, pkt, mtv->img_segment_size);
+ if(ret != mtv->img_segment_size)
+ return AVERROR_IO;
+
+#ifndef WORDS_BIGENDIAN
+
+ /* pkt->data is GGGRRRR BBBBBGGG
+ * and we need RRRRRGGG GGGBBBBB
+ * for PIX_FMT_RGB565 so here we
+ * just swap bytes as they come
+ */
+
+ for(i=0;i<mtv->img_segment_size/2;i++)
+ *((uint16_t *)pkt->data+i) = bswap_16(*((uint16_t *)pkt->data+i));
+#endif
+ mtv->audio_packet_count = 1;
+ pkt->stream_index = VIDEO_SID;
+ }
+
+ return(ret);
+}
+
+AVInputFormat mtv_demuxer = {
+ "MTV",
+ "MTV format",
+ sizeof(MTVDemuxContext),
+ mtv_probe,
+ mtv_read_header,
+ mtv_read_packet,
+};
diff --git a/contrib/ffmpeg/libavformat/mxf.c b/contrib/ffmpeg/libavformat/mxf.c
new file mode 100644
index 000000000..b20679943
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/mxf.c
@@ -0,0 +1,1082 @@
+/*
+ * MXF demuxer.
+ * Copyright (c) 2006 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * References
+ * SMPTE 336M KLV Data Encoding Protocol Using Key-Length-Value
+ * SMPTE 377M MXF File Format Specifications
+ * SMPTE 378M Operational Pattern 1a
+ * SMPTE 379M MXF Generic Container
+ * SMPTE 381M Mapping MPEG Streams into the MXF Generic Container
+ * SMPTE 382M Mapping AES3 and Broadcast Wave Audio into the MXF Generic Container
+ * SMPTE 383M Mapping DV-DIF Data to the MXF Generic Container
+ *
+ * Principle
+ * Search for Track numbers which will identify essence element KLV packets.
+ * Search for SourcePackage which define tracks which contains Track numbers.
+ * Material Package contains tracks with reference to SourcePackage tracks.
+ * Search for Descriptors (Picture, Sound) which contains codec info and parameters.
+ * Assign Descriptors to correct Tracks.
+ *
+ * Metadata reading functions read Local Tags, get InstanceUID(0x3C0A) then add MetaDataSet to MXFContext.
+ * Metadata parsing resolves Strong References to objects.
+ *
+ * Simple demuxer, only OP1A supported and some files might not work at all.
+ * Only tracks with associated descriptors will be decoded. "Highly Desirable" SMPTE 377M D.1
+ */
+
+//#define DEBUG
+
+#include "avformat.h"
+
+typedef uint8_t UID[16];
+
+enum MXFMetadataSetType {
+ MaterialPackage,
+ SourcePackage,
+ SourceClip,
+ TimecodeComponent,
+ Sequence,
+ MultipleDescriptor,
+ Descriptor,
+ Track,
+ EssenceContainerData,
+};
+
+typedef struct MXFStructuralComponent {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID source_package_uid;
+ UID data_definition_ul;
+ int64_t duration;
+ int64_t start_position;
+ int source_track_id;
+} MXFStructuralComponent;
+
+typedef struct MXFSequence {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID data_definition_ul;
+ UID *structural_components_refs;
+ int structural_components_count;
+ int64_t duration;
+} MXFSequence;
+
+typedef struct MXFTrack {
+ UID uid;
+ enum MXFMetadataSetType type;
+ MXFSequence *sequence; /* mandatory, and only one */
+ UID sequence_ref;
+ int track_id;
+ uint8_t track_number[4];
+ AVRational edit_rate;
+} MXFTrack;
+
+typedef struct MXFDescriptor {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID essence_container_ul;
+ UID essence_codec_ul;
+ AVRational sample_rate;
+ AVRational aspect_ratio;
+ int width;
+ int height;
+ int channels;
+ int bits_per_sample;
+ UID *sub_descriptors_refs;
+ int sub_descriptors_count;
+ int linked_track_id;
+ uint8_t *extradata;
+ int extradata_size;
+} MXFDescriptor;
+
+typedef struct MXFPackage {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID package_uid;
+ UID *tracks_refs;
+ int tracks_count;
+ MXFDescriptor *descriptor; /* only one */
+ UID descriptor_ref;
+} MXFPackage;
+
+typedef struct MXFEssenceContainerData {
+ UID uid;
+ enum MXFMetadataSetType type;
+ UID linked_package_uid;
+} MXFEssenceContainerData;
+
+typedef struct {
+ UID uid;
+ enum MXFMetadataSetType type;
+} MXFMetadataSet;
+
+typedef struct MXFContext {
+ UID *packages_refs;
+ int packages_count;
+ UID *essence_container_data_sets_refs;
+ int essence_container_data_sets_count;
+ UID *essence_containers_uls; /* Universal Labels SMPTE RP224 */
+ int essence_containers_uls_count;
+ UID operational_pattern_ul;
+ UID content_storage_uid;
+ MXFMetadataSet **metadata_sets;
+ int metadata_sets_count;
+ AVFormatContext *fc;
+} MXFContext;
+
+typedef struct KLVPacket {
+ UID key;
+ offset_t offset;
+ uint64_t length;
+} KLVPacket;
+
+enum MXFWrappingScheme {
+ Frame,
+ Clip,
+};
+
+typedef struct MXFCodecUL {
+ UID uid;
+ enum CodecID id;
+ enum MXFWrappingScheme wrapping;
+} MXFCodecUL;
+
+typedef struct MXFDataDefinitionUL {
+ UID uid;
+ enum CodecType type;
+} MXFDataDefinitionUL;
+
+typedef struct MXFMetadataReadTableEntry {
+ const UID key;
+ int (*read)(MXFContext *mxf, KLVPacket *klv);
+} MXFMetadataReadTableEntry;
+
+/* partial keys to match */
+static const uint8_t mxf_header_partition_pack_key[] = { 0x06,0x0e,0x2b,0x34,0x02,0x05,0x01,0x01,0x0d,0x01,0x02,0x01,0x01,0x02 };
+static const uint8_t mxf_essence_element_key[] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01 };
+
+#define IS_KLV_KEY(x, y) (!memcmp(x, y, sizeof(y)))
+
+#define PRINT_KEY(s, x) dprintf("%s %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", s, \
+ (x)[0], (x)[1], (x)[2], (x)[3], (x)[4], (x)[5], (x)[6], (x)[7], (x)[8], (x)[9], (x)[10], (x)[11], (x)[12], (x)[13], (x)[14], (x)[15])
+
+static int64_t klv_decode_ber_length(ByteIOContext *pb)
+{
+ int64_t size = 0;
+ uint8_t length = get_byte(pb);
+ int type = length >> 7;
+
+ if (type) { /* long form */
+ int bytes_num = length & 0x7f;
+ /* SMPTE 379M 5.3.4 guarantee that bytes_num must not exceed 8 bytes */
+ if (bytes_num > 8)
+ return -1;
+ while (bytes_num--)
+ size = size << 8 | get_byte(pb);
+ } else {
+ size = length & 0x7f;
+ }
+ return size;
+}
+
+static int klv_read_packet(KLVPacket *klv, ByteIOContext *pb)
+{
+ klv->offset = url_ftell(pb);
+ get_buffer(pb, klv->key, 16);
+ klv->length = klv_decode_ber_length(pb);
+ return klv->length == -1 ? -1 : 0;
+}
+
+static int mxf_get_stream_index(AVFormatContext *s, KLVPacket *klv)
+{
+ int i;
+
+ for (i = 0; i < s->nb_streams; i++) {
+ MXFTrack *track = s->streams[i]->priv_data;
+ /* SMPTE 379M 7.3 */
+ if (!memcmp(klv->key + sizeof(mxf_essence_element_key), track->track_number, sizeof(track->track_number)))
+ return i;
+ }
+ /* return 0 if only one stream, for OP Atom files with 0 as track number */
+ return s->nb_streams == 1 ? 0 : -1;
+}
+
+/* XXX: use AVBitStreamFilter */
+static int mxf_get_d10_aes3_packet(ByteIOContext *pb, AVStream *st, AVPacket *pkt, int64_t length)
+{
+ uint8_t buffer[61444];
+ uint8_t *buf_ptr, *end_ptr, *data_ptr;
+
+ if (length > 61444) /* worst case PAL 1920 samples 8 channels */
+ return -1;
+ get_buffer(pb, buffer, length);
+ av_new_packet(pkt, length);
+ data_ptr = pkt->data;
+ end_ptr = buffer + length;
+ buf_ptr = buffer + 4; /* skip SMPTE 331M header */
+ for (; buf_ptr < end_ptr; buf_ptr += 4) {
+ if (st->codec->bits_per_sample == 24) {
+ data_ptr[0] = (buf_ptr[2] >> 4) | ((buf_ptr[3] & 0x0f) << 4);
+ data_ptr[1] = (buf_ptr[1] >> 4) | ((buf_ptr[2] & 0x0f) << 4);
+ data_ptr[2] = (buf_ptr[0] >> 4) | ((buf_ptr[1] & 0x0f) << 4);
+ data_ptr += 3;
+ } else {
+ data_ptr[0] = (buf_ptr[2] >> 4) | ((buf_ptr[3] & 0x0f) << 4);
+ data_ptr[1] = (buf_ptr[1] >> 4) | ((buf_ptr[2] & 0x0f) << 4);
+ data_ptr += 2;
+ }
+ }
+ pkt->size = data_ptr - pkt->data;
+ return 0;
+}
+
+static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ KLVPacket klv;
+
+ while (!url_feof(&s->pb)) {
+ if (klv_read_packet(&klv, &s->pb) < 0) {
+ av_log(s, AV_LOG_ERROR, "error reading KLV packet\n");
+ return -1;
+ }
+#ifdef DEBUG
+ PRINT_KEY("read packet", klv.key);
+#endif
+ if (IS_KLV_KEY(klv.key, mxf_essence_element_key)) {
+ int index = mxf_get_stream_index(s, &klv);
+ if (index < 0) {
+ av_log(s, AV_LOG_ERROR, "error getting stream index\n");
+ url_fskip(&s->pb, klv.length);
+ return -1;
+ }
+ /* check for 8 channels AES3 element */
+ if (klv.key[12] == 0x06 && klv.key[13] == 0x01 && klv.key[14] == 0x10) {
+ if (mxf_get_d10_aes3_packet(&s->pb, s->streams[index], pkt, klv.length) < 0) {
+ av_log(s, AV_LOG_ERROR, "error reading D-10 aes3 frame\n");
+ return -1;
+ }
+ } else
+ av_get_packet(&s->pb, pkt, klv.length);
+ pkt->stream_index = index;
+ return 0;
+ } else
+ url_fskip(&s->pb, klv.length);
+ }
+ return AVERROR_IO;
+}
+
+static int mxf_add_metadata_set(MXFContext *mxf, void *metadata_set)
+{
+ mxf->metadata_sets = av_realloc(mxf->metadata_sets, (mxf->metadata_sets_count + 1) * sizeof(*mxf->metadata_sets));
+ mxf->metadata_sets[mxf->metadata_sets_count] = metadata_set;
+ mxf->metadata_sets_count++;
+ return 0;
+}
+
+static int mxf_read_metadata_preface(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* SMPTE 336M Table 8 KLV specified length, 0x53 */
+
+ switch (tag) {
+ case 0x3B03:
+ get_buffer(pb, mxf->content_storage_uid, 16);
+ break;
+ case 0x3B09:
+ get_buffer(pb, mxf->operational_pattern_ul, 16);
+ break;
+ case 0x3B0A:
+ mxf->essence_containers_uls_count = get_be32(pb);
+ if (mxf->essence_containers_uls_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ mxf->essence_containers_uls = av_malloc(mxf->essence_containers_uls_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)mxf->essence_containers_uls, mxf->essence_containers_uls_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ return 0;
+}
+
+static int mxf_read_metadata_content_storage(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* SMPTE 336M Table 8 KLV specified length, 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x1901:
+ mxf->packages_count = get_be32(pb);
+ if (mxf->packages_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ mxf->packages_refs = av_malloc(mxf->packages_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)mxf->packages_refs, mxf->packages_count * sizeof(UID));
+ break;
+ case 0x1902:
+ mxf->essence_container_data_sets_count = get_be32(pb);
+ if (mxf->essence_container_data_sets_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ mxf->essence_container_data_sets_refs = av_malloc(mxf->essence_container_data_sets_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)mxf->essence_container_data_sets_refs, mxf->essence_container_data_sets_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ return 0;
+}
+
+static int mxf_read_metadata_source_clip(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFStructuralComponent *source_clip = av_mallocz(sizeof(*source_clip));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* SMPTE 336M Table 8 KLV specified length, 0x53 */
+
+ bytes_read += size + 4;
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ if (!size) { /* ignore empty tag, needed for some files with empty UMID tag */
+ av_log(mxf->fc, AV_LOG_ERROR, "local tag 0x%04X with 0 size\n", tag);
+ continue;
+ }
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, source_clip->uid, 16);
+ break;
+ case 0x0202:
+ source_clip->duration = get_be64(pb);
+ break;
+ case 0x1201:
+ source_clip->start_position = get_be64(pb);
+ break;
+ case 0x1101:
+ /* UMID, only get last 16 bytes */
+ url_fskip(pb, 16);
+ get_buffer(pb, source_clip->source_package_uid, 16);
+ break;
+ case 0x1102:
+ source_clip->source_track_id = get_be32(pb);
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ }
+ source_clip->type = SourceClip;
+ return mxf_add_metadata_set(mxf, source_clip);
+}
+
+static int mxf_read_metadata_material_package(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFPackage *package = av_mallocz(sizeof(*package));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, package->uid, 16);
+ break;
+ case 0x4403:
+ package->tracks_count = get_be32(pb);
+ if (package->tracks_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ package->type = MaterialPackage;
+ return mxf_add_metadata_set(mxf, package);
+}
+
+static int mxf_read_metadata_track(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFTrack *track = av_mallocz(sizeof(*track));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, track->uid, 16);
+ break;
+ case 0x4801:
+ track->track_id = get_be32(pb);
+ break;
+ case 0x4804:
+ get_buffer(pb, track->track_number, 4);
+ break;
+ case 0x4B01:
+ track->edit_rate.den = get_be32(pb);
+ track->edit_rate.num = get_be32(pb);
+ break;
+ case 0x4803:
+ get_buffer(pb, track->sequence_ref, 16);
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ track->type = Track;
+ return mxf_add_metadata_set(mxf, track);
+}
+
+static int mxf_read_metadata_sequence(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFSequence *sequence = av_mallocz(sizeof(*sequence));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, sequence->uid, 16);
+ break;
+ case 0x0202:
+ sequence->duration = get_be64(pb);
+ break;
+ case 0x0201:
+ get_buffer(pb, sequence->data_definition_ul, 16);
+ break;
+ case 0x1001:
+ sequence->structural_components_count = get_be32(pb);
+ if (sequence->structural_components_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ sequence->structural_components_refs = av_malloc(sequence->structural_components_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)sequence->structural_components_refs, sequence->structural_components_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ sequence->type = Sequence;
+ return mxf_add_metadata_set(mxf, sequence);
+}
+
+static int mxf_read_metadata_source_package(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFPackage *package = av_mallocz(sizeof(*package));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, package->uid, 16);
+ break;
+ case 0x4403:
+ package->tracks_count = get_be32(pb);
+ if (package->tracks_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ package->tracks_refs = av_malloc(package->tracks_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)package->tracks_refs, package->tracks_count * sizeof(UID));
+ break;
+ case 0x4401:
+ /* UMID, only get last 16 bytes */
+ url_fskip(pb, 16);
+ get_buffer(pb, package->package_uid, 16);
+ break;
+ case 0x4701:
+ get_buffer(pb, package->descriptor_ref, 16);
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ package->type = SourcePackage;
+ return mxf_add_metadata_set(mxf, package);
+}
+
+static int mxf_read_metadata_multiple_descriptor(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFDescriptor *descriptor = av_mallocz(sizeof(*descriptor));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, descriptor->uid, 16);
+ break;
+ case 0x3F01:
+ descriptor->sub_descriptors_count = get_be32(pb);
+ if (descriptor->sub_descriptors_count >= UINT_MAX / sizeof(UID))
+ return -1;
+ descriptor->sub_descriptors_refs = av_malloc(descriptor->sub_descriptors_count * sizeof(UID));
+ url_fskip(pb, 4); /* useless size of objects, always 16 according to specs */
+ get_buffer(pb, (uint8_t *)descriptor->sub_descriptors_refs, descriptor->sub_descriptors_count * sizeof(UID));
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ descriptor->type = MultipleDescriptor;
+ return mxf_add_metadata_set(mxf, descriptor);
+}
+
+static void mxf_read_metadata_pixel_layout(ByteIOContext *pb, MXFDescriptor *descriptor)
+{
+ int code;
+
+ do {
+ code = get_byte(pb);
+ dprintf("pixel layout: code 0x%x\n", code);
+ switch (code) {
+ case 0x52: /* R */
+ descriptor->bits_per_sample += get_byte(pb);
+ break;
+ case 0x47: /* G */
+ descriptor->bits_per_sample += get_byte(pb);
+ break;
+ case 0x42: /* B */
+ descriptor->bits_per_sample += get_byte(pb);
+ break;
+ default:
+ get_byte(pb);
+ }
+ } while (code != 0); /* SMPTE 377M E.2.46 */
+}
+
+static int mxf_read_metadata_generic_descriptor(MXFContext *mxf, KLVPacket *klv)
+{
+ ByteIOContext *pb = &mxf->fc->pb;
+ MXFDescriptor *descriptor = av_mallocz(sizeof(*descriptor));
+ int bytes_read = 0;
+
+ while (bytes_read < klv->length) {
+ int tag = get_be16(pb);
+ int size = get_be16(pb); /* KLV specified by 0x53 */
+
+ dprintf("tag 0x%04X, size %d\n", tag, size);
+ switch (tag) {
+ case 0x3C0A:
+ get_buffer(pb, descriptor->uid, 16);
+ break;
+ case 0x3004:
+ get_buffer(pb, descriptor->essence_container_ul, 16);
+ break;
+ case 0x3006:
+ descriptor->linked_track_id = get_be32(pb);
+ break;
+ case 0x3201: /* PictureEssenceCoding */
+ get_buffer(pb, descriptor->essence_codec_ul, 16);
+ break;
+ case 0x3203:
+ descriptor->width = get_be32(pb);
+ break;
+ case 0x3202:
+ descriptor->height = get_be32(pb);
+ break;
+ case 0x320E:
+ descriptor->aspect_ratio.num = get_be32(pb);
+ descriptor->aspect_ratio.den = get_be32(pb);
+ break;
+ case 0x3D03:
+ descriptor->sample_rate.num = get_be32(pb);
+ descriptor->sample_rate.den = get_be32(pb);
+ break;
+ case 0x3D06: /* SoundEssenceCompression */
+ get_buffer(pb, descriptor->essence_codec_ul, 16);
+ break;
+ case 0x3D07:
+ descriptor->channels = get_be32(pb);
+ break;
+ case 0x3D01:
+ descriptor->bits_per_sample = get_be32(pb);
+ break;
+ case 0x3401:
+ mxf_read_metadata_pixel_layout(pb, descriptor);
+ break;
+ case 0x8201: /* Private tag used by SONY C0023S01.mxf */
+ descriptor->extradata = av_malloc(size);
+ descriptor->extradata_size = size;
+ get_buffer(pb, descriptor->extradata, size);
+ break;
+ default:
+ url_fskip(pb, size);
+ }
+ bytes_read += size + 4;
+ }
+ descriptor->type = Descriptor;
+ return mxf_add_metadata_set(mxf, descriptor);
+}
+
+/* SMPTE RP224 http://www.smpte-ra.org/mdd/index.html */
+static const MXFDataDefinitionUL mxf_data_definition_uls[] = {
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x02,0x01,0x00,0x00,0x00 }, CODEC_TYPE_VIDEO },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x01,0x03,0x02,0x02,0x02,0x00,0x00,0x00 }, CODEC_TYPE_AUDIO },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x05,0x01,0x03,0x02,0x02,0x02,0x02,0x00,0x00 }, CODEC_TYPE_AUDIO },
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, CODEC_TYPE_DATA },
+};
+
+static const MXFCodecUL mxf_codec_uls[] = {
+ /* PictureEssenceCoding */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x02,0x02,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* 422P@ML I-Frame */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x02,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* 422P@HL I-Frame */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x04,0x03,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* 422P@HL Long GoP */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x01,0x11,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* MP@ML Long GoP */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x02,0x03,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* 422P@ML Long GoP */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x03,0x03,0x00 }, CODEC_ID_MPEG2VIDEO, Frame }, /* MP@HL Long GoP */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x20,0x02,0x03 }, CODEC_ID_MPEG4, Frame }, /* XDCAM proxy_pal030926.mxf */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x03,0x04,0x01,0x02,0x02,0x01,0x20,0x02,0x04 }, CODEC_ID_MPEG4, Frame }, /* XDCAM Proxy C0023S01.mxf */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x05 }, CODEC_ID_MPEG2VIDEO, Frame }, /* D-10 30Mbps PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x01,0x02,0x01,0x01 }, CODEC_ID_MPEG2VIDEO, Frame }, /* D-10 50Mbps PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x04,0x00 }, CODEC_ID_DVVIDEO, Frame }, /* DVCPRO50 PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x02,0x00 }, CODEC_ID_DVVIDEO, Frame }, /* DVCPRO25 PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x01,0x02,0x00 }, CODEC_ID_DVVIDEO, Frame }, /* DV25 IEC PAL */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x07,0x04,0x01,0x02,0x02,0x03,0x01,0x01,0x00 }, CODEC_ID_JPEG2000, Frame }, /* JPEG2000 Codestream */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x01,0x7F,0x00,0x00,0x00 }, CODEC_ID_RAWVIDEO, Frame }, /* Uncompressed */
+ /* SoundEssenceCompression */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x00,0x00,0x00,0x00 }, CODEC_ID_PCM_S16LE, Frame }, /* Uncompressed */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x01,0x7F,0x00,0x00,0x00 }, CODEC_ID_PCM_S16LE, Frame },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x07,0x04,0x02,0x02,0x01,0x7E,0x00,0x00,0x00 }, CODEC_ID_PCM_S16BE, Frame }, /* From Omneon MXF file */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x01,0x01,0x00 }, CODEC_ID_PCM_ALAW, Frame },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x04,0x04,0x02,0x02,0x02,0x03,0x01,0x01,0x00 }, CODEC_ID_PCM_ALAW, Frame }, /* XDCAM Proxy C0023S01.mxf */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x01,0x00 }, CODEC_ID_AC3, Frame },
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x05,0x00 }, CODEC_ID_MP2, Frame }, /* MP2 or MP3 */
+ //{ { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x02,0x02,0x02,0x03,0x02,0x1C,0x00 }, CODEC_ID_DOLBY_E, Frame }, /* Dolby-E */
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, CODEC_ID_NONE, Frame },
+};
+
+static const MXFCodecUL mxf_picture_essence_container_uls[] = {
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0x60,0x01 }, CODEC_ID_MPEG2VIDEO, Frame }, /* MPEG-ES Frame wrapped */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0xe0,0x02 }, CODEC_ID_MPEG2VIDEO, Clip }, /* MPEG-ES Clip wrapped, 0xe0 MPV stream id */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x04,0x61,0x07 }, CODEC_ID_MPEG2VIDEO, Clip }, /* MPEG-ES Custom wrapped, 0x61 ??? stream id */
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, CODEC_ID_NONE, Frame },
+};
+
+static const MXFCodecUL mxf_sound_essence_container_uls[] = {
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x06,0x01,0x00 }, CODEC_ID_PCM_S16LE, Frame }, /* BWF Frame wrapped */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x06,0x03,0x00 }, CODEC_ID_PCM_S16LE, Frame }, /* AES Frame wrapped */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0x40,0x01 }, CODEC_ID_MP2, Frame }, /* MPEG-ES Frame wrapped, 0x40 ??? stream id */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0xc0,0x01 }, CODEC_ID_MP2, Frame }, /* MPEG-ES Frame wrapped, 0xc0 MPA stream id */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x02,0x0D,0x01,0x03,0x01,0x02,0x04,0xc0,0x02 }, CODEC_ID_MP2, Clip }, /* MPEG-ES Clip wrapped, 0xc0 MPA stream id */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x05,0x01 }, CODEC_ID_PCM_S16BE, Frame }, /* D-10 Mapping 30Mbps PAL Extended Template */
+ { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x01,0x01,0x01 }, CODEC_ID_PCM_S16BE, Frame }, /* D-10 Mapping 50Mbps PAL Extended Template */
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, CODEC_ID_NONE, Frame },
+};
+
+static const MXFCodecUL *mxf_get_codec_ul(const MXFCodecUL *uls, UID *uid)
+{
+ while (uls->id != CODEC_ID_NONE) {
+ if(!memcmp(uls->uid, *uid, 16))
+ break;
+ uls++;
+ }
+ return uls;
+}
+
+static enum CodecType mxf_get_codec_type(const MXFDataDefinitionUL *uls, UID *uid)
+{
+ while (uls->type != CODEC_TYPE_DATA) {
+ if(!memcmp(uls->uid, *uid, 16))
+ break;
+ uls++;
+ }
+ return uls->type;
+}
+
+static void *mxf_resolve_strong_ref(MXFContext *mxf, UID *strong_ref)
+{
+ int i;
+
+ if (!strong_ref)
+ return NULL;
+ for (i = 0; i < mxf->metadata_sets_count; i++) {
+ if (!memcmp(*strong_ref, mxf->metadata_sets[i]->uid, 16)) {
+ return mxf->metadata_sets[i];
+ }
+ }
+ return NULL;
+}
+
+static int mxf_parse_structural_metadata(MXFContext *mxf)
+{
+ MXFPackage *material_package = NULL;
+ MXFPackage *temp_package = NULL;
+ int i, j, k;
+
+ dprintf("metadata sets count %d\n", mxf->metadata_sets_count);
+ /* TODO: handle multiple material packages (OP3x) */
+ for (i = 0; i < mxf->packages_count; i++) {
+ if (!(temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[i]))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve package strong ref\n");
+ return -1;
+ }
+ if (temp_package->type == MaterialPackage) {
+ material_package = temp_package;
+ break;
+ }
+ }
+ if (!material_package) {
+ av_log(mxf->fc, AV_LOG_ERROR, "no material package found\n");
+ return -1;
+ }
+
+ for (i = 0; i < material_package->tracks_count; i++) {
+ MXFPackage *source_package = NULL;
+ MXFTrack *material_track = NULL;
+ MXFTrack *source_track = NULL;
+ MXFTrack *temp_track = NULL;
+ MXFDescriptor *descriptor = NULL;
+ MXFStructuralComponent *component = NULL;
+ const MXFCodecUL *codec_ul = NULL;
+ const MXFCodecUL *container_ul = NULL;
+ AVStream *st;
+
+ if (!(material_track = mxf_resolve_strong_ref(mxf, &material_package->tracks_refs[i]))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track strong ref\n");
+ continue;
+ }
+
+ if (!(material_track->sequence = mxf_resolve_strong_ref(mxf, &material_track->sequence_ref))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve material track sequence strong ref\n");
+ return -1;
+ }
+
+ /* TODO: handle multiple source clips */
+ for (j = 0; j < material_track->sequence->structural_components_count; j++) {
+ /* TODO: handle timecode component */
+ component = mxf_resolve_strong_ref(mxf, &material_track->sequence->structural_components_refs[j]);
+ if (!component || component->type != SourceClip)
+ continue;
+
+ for (k = 0; k < mxf->packages_count; k++) {
+ if (!(temp_package = mxf_resolve_strong_ref(mxf, &mxf->packages_refs[k]))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
+ return -1;
+ }
+ if (!memcmp(temp_package->package_uid, component->source_package_uid, 16)) {
+ source_package = temp_package;
+ break;
+ }
+ }
+ if (!source_package) {
+ av_log(mxf->fc, AV_LOG_ERROR, "material track %d: no corresponding source package found\n", material_track->track_id);
+ break;
+ }
+ for (k = 0; k < source_package->tracks_count; k++) {
+ if (!(temp_track = mxf_resolve_strong_ref(mxf, &source_package->tracks_refs[k]))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track strong ref\n");
+ return -1;
+ }
+ if (temp_track->track_id == component->source_track_id) {
+ source_track = temp_track;
+ break;
+ }
+ }
+ if (!source_track) {
+ av_log(mxf->fc, AV_LOG_ERROR, "material track %d: no corresponding source track found\n", material_track->track_id);
+ break;
+ }
+ }
+ if (!source_track)
+ continue;
+
+ st = av_new_stream(mxf->fc, source_track->track_id);
+ st->priv_data = source_track;
+ st->duration = component->duration;
+ if (st->duration == -1)
+ st->duration = AV_NOPTS_VALUE;
+ st->start_time = component->start_position;
+ av_set_pts_info(st, 64, material_track->edit_rate.num, material_track->edit_rate.den);
+
+ if (!(source_track->sequence = mxf_resolve_strong_ref(mxf, &source_track->sequence_ref))) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve source track sequence strong ref\n");
+ return -1;
+ }
+
+#ifdef DEBUG
+ PRINT_KEY("data definition ul", source_track->sequence->data_definition_ul);
+#endif
+ st->codec->codec_type = mxf_get_codec_type(mxf_data_definition_uls, &source_track->sequence->data_definition_ul);
+
+ source_package->descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor_ref);
+ if (source_package->descriptor) {
+ if (source_package->descriptor->type == MultipleDescriptor) {
+ for (j = 0; j < source_package->descriptor->sub_descriptors_count; j++) {
+ MXFDescriptor *sub_descriptor = mxf_resolve_strong_ref(mxf, &source_package->descriptor->sub_descriptors_refs[j]);
+
+ if (!sub_descriptor) {
+ av_log(mxf->fc, AV_LOG_ERROR, "could not resolve sub descriptor strong ref\n");
+ continue;
+ }
+ if (sub_descriptor->linked_track_id == source_track->track_id) {
+ descriptor = sub_descriptor;
+ break;
+ }
+ }
+ } else
+ descriptor = source_package->descriptor;
+ }
+ if (!descriptor) {
+ av_log(mxf->fc, AV_LOG_INFO, "source track %d: stream %d, no descriptor found\n", source_track->track_id, st->index);
+ continue;
+ }
+#ifdef DEBUG
+ PRINT_KEY("essence codec ul", descriptor->essence_codec_ul);
+ PRINT_KEY("essence container ul", descriptor->essence_container_ul);
+#endif
+ /* TODO: drop PictureEssenceCoding and SoundEssenceCompression, only check EssenceContainer */
+ codec_ul = mxf_get_codec_ul(mxf_codec_uls, &descriptor->essence_codec_ul);
+ st->codec->codec_id = codec_ul->id;
+ if (descriptor->extradata) {
+ st->codec->extradata = descriptor->extradata;
+ st->codec->extradata_size = descriptor->extradata_size;
+ }
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ container_ul = mxf_get_codec_ul(mxf_picture_essence_container_uls, &descriptor->essence_container_ul);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ st->codec->codec_id = container_ul->id;
+ st->codec->width = descriptor->width;
+ st->codec->height = descriptor->height;
+ st->codec->bits_per_sample = descriptor->bits_per_sample; /* Uncompressed */
+ st->need_parsing = 2; /* only parse headers */
+ } else if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ container_ul = mxf_get_codec_ul(mxf_sound_essence_container_uls, &descriptor->essence_container_ul);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ st->codec->codec_id = container_ul->id;
+ st->codec->channels = descriptor->channels;
+ st->codec->bits_per_sample = descriptor->bits_per_sample;
+ st->codec->sample_rate = descriptor->sample_rate.num / descriptor->sample_rate.den;
+ /* TODO: implement CODEC_ID_RAWAUDIO */
+ if (st->codec->codec_id == CODEC_ID_PCM_S16LE) {
+ if (descriptor->bits_per_sample == 24)
+ st->codec->codec_id = CODEC_ID_PCM_S24LE;
+ else if (descriptor->bits_per_sample == 32)
+ st->codec->codec_id = CODEC_ID_PCM_S32LE;
+ } else if (st->codec->codec_id == CODEC_ID_PCM_S16BE) {
+ if (descriptor->bits_per_sample == 24)
+ st->codec->codec_id = CODEC_ID_PCM_S24BE;
+ else if (descriptor->bits_per_sample == 32)
+ st->codec->codec_id = CODEC_ID_PCM_S32BE;
+ if (descriptor->essence_container_ul[13] == 0x01) /* D-10 Mapping */
+ st->codec->channels = 8; /* force channels to 8 */
+ } else if (st->codec->codec_id == CODEC_ID_MP2) {
+ st->need_parsing = 1;
+ }
+ }
+ if (container_ul && container_ul->wrapping == Clip) {
+ dprintf("stream %d: clip wrapped essence\n", st->index);
+ st->need_parsing = 1;
+ }
+ }
+ return 0;
+}
+
+static const MXFMetadataReadTableEntry mxf_metadata_read_table[] = {
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x2F,0x00 }, mxf_read_metadata_preface },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x18,0x00 }, mxf_read_metadata_content_storage },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x37,0x00 }, mxf_read_metadata_source_package },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x36,0x00 }, mxf_read_metadata_material_package },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x0F,0x00 }, mxf_read_metadata_sequence },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x11,0x00 }, mxf_read_metadata_source_clip },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x44,0x00 }, mxf_read_metadata_multiple_descriptor },
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x42,0x00 }, mxf_read_metadata_generic_descriptor }, /* Generic Sound */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x28,0x00 }, mxf_read_metadata_generic_descriptor }, /* CDCI */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x29,0x00 }, mxf_read_metadata_generic_descriptor }, /* RGBA */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x51,0x00 }, mxf_read_metadata_generic_descriptor }, /* MPEG 2 Video */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x48,0x00 }, mxf_read_metadata_generic_descriptor }, /* Wave */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x47,0x00 }, mxf_read_metadata_generic_descriptor }, /* AES3 */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3A,0x00 }, mxf_read_metadata_track }, /* Static Track */
+ { { 0x06,0x0E,0x2B,0x34,0x02,0x53,0x01,0x01,0x0d,0x01,0x01,0x01,0x01,0x01,0x3B,0x00 }, mxf_read_metadata_track }, /* Generic Track */
+ { { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }, NULL },
+};
+
+static int mxf_read_sync(ByteIOContext *pb, const uint8_t *key, unsigned size)
+{
+ int i, b;
+ for (i = 0; i < size && !url_feof(pb); i++) {
+ b = get_byte(pb);
+ if (b == key[0])
+ i = 0;
+ else if (b != key[i])
+ i = -1;
+ }
+ return i == size;
+}
+
+static int mxf_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ MXFContext *mxf = s->priv_data;
+ KLVPacket klv;
+
+ if (!mxf_read_sync(&s->pb, mxf_header_partition_pack_key, 14)) {
+ av_log(s, AV_LOG_ERROR, "could not find header partition pack key\n");
+ return -1;
+ }
+ url_fseek(&s->pb, -14, SEEK_CUR);
+ mxf->fc = s;
+ while (!url_feof(&s->pb)) {
+ const MXFMetadataReadTableEntry *function;
+
+ if (klv_read_packet(&klv, &s->pb) < 0) {
+ av_log(s, AV_LOG_ERROR, "error reading KLV packet\n");
+ return -1;
+ }
+#ifdef DEBUG
+ PRINT_KEY("read header", klv.key);
+#endif
+ if (IS_KLV_KEY(klv.key, mxf_essence_element_key)) {
+ /* FIXME avoid seek */
+ url_fseek(&s->pb, klv.offset, SEEK_SET);
+ break;
+ }
+
+ for (function = mxf_metadata_read_table; function->read; function++) {
+ if (IS_KLV_KEY(klv.key, function->key)) {
+ if (function->read(mxf, &klv) < 0) {
+ av_log(s, AV_LOG_ERROR, "error reading header metadata\n");
+ return -1;
+ }
+ break;
+ }
+ }
+ if (!function->read)
+ url_fskip(&s->pb, klv.length);
+ }
+ return mxf_parse_structural_metadata(mxf);
+}
+
+static int mxf_read_close(AVFormatContext *s)
+{
+ MXFContext *mxf = s->priv_data;
+ int i;
+
+ av_freep(&mxf->packages_refs);
+ av_freep(&mxf->essence_container_data_sets_refs);
+ av_freep(&mxf->essence_containers_uls);
+ for (i = 0; i < mxf->metadata_sets_count; i++) {
+ switch (mxf->metadata_sets[i]->type) {
+ case MultipleDescriptor:
+ av_freep(&((MXFDescriptor *)mxf->metadata_sets[i])->sub_descriptors_refs);
+ break;
+ case Sequence:
+ av_freep(&((MXFSequence *)mxf->metadata_sets[i])->structural_components_refs);
+ break;
+ case SourcePackage:
+ case MaterialPackage:
+ av_freep(&((MXFPackage *)mxf->metadata_sets[i])->tracks_refs);
+ break;
+ default:
+ break;
+ }
+ av_freep(&mxf->metadata_sets[i]);
+ }
+ av_freep(&mxf->metadata_sets);
+ return 0;
+}
+
+static int mxf_probe(AVProbeData *p) {
+ uint8_t *bufp = p->buf;
+ uint8_t *end = p->buf + p->buf_size;
+
+ if (p->buf_size < sizeof(mxf_header_partition_pack_key))
+ return 0;
+
+ /* Must skip Run-In Sequence and search for MXF header partition pack key SMPTE 377M 5.5 */
+ end -= sizeof(mxf_header_partition_pack_key);
+ for (; bufp < end; bufp++) {
+ if (IS_KLV_KEY(bufp, mxf_header_partition_pack_key))
+ return AVPROBE_SCORE_MAX;
+ }
+ return 0;
+}
+
+/* rudimentary binary seek */
+/* XXX: use MXF Index */
+static int mxf_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
+{
+ AVStream *st = s->streams[stream_index];
+ int64_t seconds;
+
+ if (!s->bit_rate)
+ return -1;
+ if (sample_time < 0)
+ sample_time = 0;
+ seconds = av_rescale(sample_time, st->time_base.num, st->time_base.den);
+ url_fseek(&s->pb, (s->bit_rate * seconds) >> 3, SEEK_SET);
+ if (!mxf_read_sync(&s->pb, mxf_essence_element_key, 12))
+ return -1;
+
+ /* found KLV key */
+ url_fseek(&s->pb, -12, SEEK_CUR);
+ av_update_cur_dts(s, st, sample_time);
+ return 0;
+}
+
+AVInputFormat mxf_demuxer = {
+ "mxf",
+ "MXF format",
+ sizeof(MXFContext),
+ mxf_probe,
+ mxf_read_header,
+ mxf_read_packet,
+ mxf_read_close,
+ mxf_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/nsvdec.c b/contrib/ffmpeg/libavformat/nsvdec.c
new file mode 100644
index 000000000..9a5fe97f8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nsvdec.c
@@ -0,0 +1,763 @@
+/*
+ * NSV demuxer
+ * Copyright (c) 2004 The FFmpeg Project.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+
+//#define DEBUG
+//#define DEBUG_DUMP_INDEX // XXX dumbdriving-271.nsv breaks with it commented!!
+//#define DEBUG_SEEK
+#define CHECK_SUBSEQUENT_NSVS
+//#define DISABLE_AUDIO
+
+/* max bytes to crawl for trying to resync
+ * stupid streaming servers don't start at chunk boundaries...
+ */
+#define NSV_MAX_RESYNC (500*1024)
+#define NSV_MAX_RESYNC_TRIES 300
+
+/*
+ * First version by Francois Revol - revol@free.fr
+ * References:
+ * (1) http://www.multimedia.cx/nsv-format.txt
+ * seems someone came to the same conclusions as me, and updated it:
+ * (2) http://www.stud.ktu.lt/~vitslav/nsv/nsv-format.txt
+ * http://www.stud.ktu.lt/~vitslav/nsv/
+ * official docs
+ * (3) http://ultravox.aol.com/NSVFormat.rtf
+ * Sample files:
+ * (S1) http://www.nullsoft.com/nsv/samples/
+ * http://www.nullsoft.com/nsv/samples/faster.nsv
+ * http://streamripper.sourceforge.net/openbb/read.php?TID=492&page=4
+ */
+
+/*
+ * notes on the header (Francois Revol):
+ *
+ * It is followed by strings, then a table, but nothing tells
+ * where the table begins according to (1). After checking faster.nsv,
+ * I believe NVSf[16-19] gives the size of the strings data
+ * (that is the offset of the data table after the header).
+ * After checking all samples from (S1) all confirms this.
+ *
+ * Then, about NSVf[12-15], faster.nsf has 179700. When veiwing it in VLC,
+ * I noticed there was about 1 NVSs chunk/s, so I ran
+ * strings faster.nsv | grep NSVs | wc -l
+ * which gave me 180. That leads me to think that NSVf[12-15] might be the
+ * file length in milliseconds.
+ * Let's try that:
+ * for f in *.nsv; do HTIME="$(od -t x4 "$f" | head -1 | sed 's/.* //')"; echo "'$f' $((0x$HTIME))s = $((0x$HTIME/1000/60)):$((0x$HTIME/1000%60))"; done
+ * except for nstrailer (which doesn't have an NSVf header), it repports correct time.
+ *
+ * nsvtrailer.nsv (S1) does not have any NSVf header, only NSVs chunks,
+ * so the header seems to not be mandatory. (for streaming).
+ *
+ * index slice duration check (excepts nsvtrailer.nsv):
+ * for f in [^n]*.nsv; do DUR="$(ffmpeg -i "$f" 2>/dev/null | grep 'NSVf duration' | cut -d ' ' -f 4)"; IC="$(ffmpeg -i "$f" 2>/dev/null | grep 'INDEX ENTRIES' | cut -d ' ' -f 2)"; echo "duration $DUR, slite time $(($DUR/$IC))"; done
+ */
+
+/*
+ * TODO:
+ * - handle timestamps !!!
+ * - use index
+ * - mime-type in probe()
+ * - seek
+ */
+
+#ifdef DEBUG
+#define PRINT(_v) printf _v
+#else
+#define PRINT(_v)
+#endif
+
+#if 0
+struct NSVf_header {
+ uint32_t chunk_tag; /* 'NSVf' */
+ uint32_t chunk_size;
+ uint32_t file_size; /* max 4GB ??? noone learns anything it seems :^) */
+ uint32_t file_length; //unknown1; /* what about MSB of file_size ? */
+ uint32_t info_strings_size; /* size of the info strings */ //unknown2;
+ uint32_t table_entries;
+ uint32_t table_entries_used; /* the left ones should be -1 */
+};
+
+struct NSVs_header {
+ uint32_t chunk_tag; /* 'NSVs' */
+ uint32_t v4cc; /* or 'NONE' */
+ uint32_t a4cc; /* or 'NONE' */
+ uint16_t vwidth; /* assert(vwidth%16==0) */
+ uint16_t vheight; /* assert(vheight%16==0) */
+ uint8_t framerate; /* value = (framerate&0x80)?frtable[frameratex0x7f]:framerate */
+ uint16_t unknown;
+};
+
+struct nsv_avchunk_header {
+ uint8_t vchunk_size_lsb;
+ uint16_t vchunk_size_msb; /* value = (vchunk_size_msb << 4) | (vchunk_size_lsb >> 4) */
+ uint16_t achunk_size;
+};
+
+struct nsv_pcm_header {
+ uint8_t bits_per_sample;
+ uint8_t channel_count;
+ uint16_t sample_rate;
+};
+#endif
+
+/* variation from avi.h */
+/*typedef struct CodecTag {
+ int id;
+ unsigned int tag;
+} CodecTag;*/
+
+/* tags */
+
+#define T_NSVF MKTAG('N', 'S', 'V', 'f') /* file header */
+#define T_NSVS MKTAG('N', 'S', 'V', 's') /* chunk header */
+#define T_TOC2 MKTAG('T', 'O', 'C', '2') /* extra index marker */
+#define T_NONE MKTAG('N', 'O', 'N', 'E') /* null a/v 4CC */
+#define T_SUBT MKTAG('S', 'U', 'B', 'T') /* subtitle aux data */
+#define T_ASYN MKTAG('A', 'S', 'Y', 'N') /* async a/v aux marker */
+#define T_KEYF MKTAG('K', 'E', 'Y', 'F') /* video keyframe aux marker (addition) */
+
+#define TB_NSVF MKBETAG('N', 'S', 'V', 'f')
+#define TB_NSVS MKBETAG('N', 'S', 'V', 's')
+
+/* hardcoded stream indices */
+#define NSV_ST_VIDEO 0
+#define NSV_ST_AUDIO 1
+#define NSV_ST_SUBT 2
+
+enum NSVStatus {
+ NSV_UNSYNC,
+ NSV_FOUND_NSVF,
+ NSV_HAS_READ_NSVF,
+ NSV_FOUND_NSVS,
+ NSV_HAS_READ_NSVS,
+ NSV_FOUND_BEEF,
+ NSV_GOT_VIDEO,
+ NSV_GOT_AUDIO,
+};
+
+typedef struct NSVStream {
+ int frame_offset; /* current frame (video) or byte (audio) counter
+ (used to compute the pts) */
+ int scale;
+ int rate;
+ int sample_size; /* audio only data */
+ int start;
+
+ int new_frame_offset; /* temporary storage (used during seek) */
+ int cum_len; /* temporary storage (used during seek) */
+} NSVStream;
+
+typedef struct {
+ int base_offset;
+ int NSVf_end;
+ uint32_t *nsvf_index_data;
+ int index_entries;
+ enum NSVStatus state;
+ AVPacket ahead[2]; /* [v, a] if .data is !NULL there is something */
+ /* cached */
+ int64_t duration;
+ uint32_t vtag, atag;
+ uint16_t vwidth, vheight;
+ int16_t avsync;
+ //DVDemuxContext* dv_demux;
+} NSVContext;
+
+static const CodecTag nsv_codec_video_tags[] = {
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', ' ') },
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
+ { CODEC_ID_VP5, MKTAG('V', 'P', '5', ' ') },
+ { CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
+/*
+ { CODEC_ID_VP4, MKTAG('V', 'P', '4', ' ') },
+ { CODEC_ID_VP4, MKTAG('V', 'P', '4', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', ' ') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '1') },
+*/
+ { CODEC_ID_XVID, MKTAG('X', 'V', 'I', 'D') }, /* cf sample xvid decoder from nsv_codec_sdk.zip */
+ { CODEC_ID_RAWVIDEO, MKTAG('R', 'G', 'B', '3') },
+ { 0, 0 },
+};
+
+static const CodecTag nsv_codec_audio_tags[] = {
+ { CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') },
+ { CODEC_ID_AAC, MKTAG('A', 'A', 'C', ' ') },
+ { CODEC_ID_AAC, MKTAG('A', 'A', 'C', 'P') }, /* _CUTTED__MUXED_2 Heads - Out Of The City.nsv */
+ { CODEC_ID_PCM_U16LE, MKTAG('P', 'C', 'M', ' ') },
+ { 0, 0 },
+};
+
+//static int nsv_load_index(AVFormatContext *s);
+static int nsv_read_chunk(AVFormatContext *s, int fill_header);
+
+#ifdef DEBUG
+static void print_tag(const char *str, unsigned int tag, int size)
+{
+ printf("%s: tag=%c%c%c%c\n",
+ str, tag & 0xff,
+ (tag >> 8) & 0xff,
+ (tag >> 16) & 0xff,
+ (tag >> 24) & 0xff);
+}
+#endif
+
+/* try to find something we recognize, and set the state accordingly */
+static int nsv_resync(AVFormatContext *s)
+{
+ NSVContext *nsv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t v = 0;
+ int i;
+
+ PRINT(("%s(), offset = %"PRId64", state = %d\n", __FUNCTION__, url_ftell(pb), nsv->state));
+
+ //nsv->state = NSV_UNSYNC;
+
+ for (i = 0; i < NSV_MAX_RESYNC; i++) {
+ if (url_feof(pb)) {
+ PRINT(("NSV EOF\n"));
+ nsv->state = NSV_UNSYNC;
+ return -1;
+ }
+ v <<= 8;
+ v |= get_byte(pb);
+/*
+ if (i < 8) {
+ PRINT(("NSV resync: [%d] = %02x\n", i, v & 0x0FF));
+ }
+*/
+
+ if ((v & 0x0000ffff) == 0xefbe) { /* BEEF */
+ PRINT(("NSV resynced on BEEF after %d bytes\n", i+1));
+ nsv->state = NSV_FOUND_BEEF;
+ return 0;
+ }
+ /* we read as big endian, thus the MK*BE* */
+ if (v == TB_NSVF) { /* NSVf */
+ PRINT(("NSV resynced on NSVf after %d bytes\n", i+1));
+ nsv->state = NSV_FOUND_NSVF;
+ return 0;
+ }
+ if (v == MKBETAG('N', 'S', 'V', 's')) { /* NSVs */
+ PRINT(("NSV resynced on NSVs after %d bytes\n", i+1));
+ nsv->state = NSV_FOUND_NSVS;
+ return 0;
+ }
+
+ }
+ PRINT(("NSV sync lost\n"));
+ return -1;
+}
+
+static int nsv_parse_NSVf_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NSVContext *nsv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned int file_size, size;
+ int64_t duration;
+ int strings_size;
+ int table_entries;
+ int table_entries_used;
+
+ PRINT(("%s()\n", __FUNCTION__));
+
+ nsv->state = NSV_UNSYNC; /* in case we fail */
+
+ size = get_le32(pb);
+ if (size < 28)
+ return -1;
+ nsv->NSVf_end = size;
+
+ //s->file_size = (uint32_t)get_le32(pb);
+ file_size = (uint32_t)get_le32(pb);
+ PRINT(("NSV NSVf chunk_size %u\n", size));
+ PRINT(("NSV NSVf file_size %u\n", file_size));
+
+ nsv->duration = duration = get_le32(pb); /* in ms */
+ PRINT(("NSV NSVf duration %"PRId64" ms\n", duration));
+ // XXX: store it in AVStreams
+
+ strings_size = get_le32(pb);
+ table_entries = get_le32(pb);
+ table_entries_used = get_le32(pb);
+ PRINT(("NSV NSVf info-strings size: %d, table entries: %d, bis %d\n",
+ strings_size, table_entries, table_entries_used));
+ if (url_feof(pb))
+ return -1;
+
+ PRINT(("NSV got header; filepos %"PRId64"\n", url_ftell(pb)));
+
+ if (strings_size > 0) {
+ char *strings; /* last byte will be '\0' to play safe with str*() */
+ char *p, *endp;
+ char *token, *value;
+ char quote;
+
+ p = strings = av_mallocz(strings_size + 1);
+ endp = strings + strings_size;
+ get_buffer(pb, strings, strings_size);
+ while (p < endp) {
+ while (*p == ' ')
+ p++; /* strip out spaces */
+ if (p >= endp-2)
+ break;
+ token = p;
+ p = strchr(p, '=');
+ if (!p || p >= endp-2)
+ break;
+ *p++ = '\0';
+ quote = *p++;
+ value = p;
+ p = strchr(p, quote);
+ if (!p || p >= endp)
+ break;
+ *p++ = '\0';
+ PRINT(("NSV NSVf INFO: %s='%s'\n", token, value));
+ if (!strcmp(token, "ASPECT")) {
+ /* don't care */
+ } else if (!strcmp(token, "CREATOR") || !strcmp(token, "Author")) {
+ strncpy(s->author, value, 512-1);
+ } else if (!strcmp(token, "Copyright")) {
+ strncpy(s->copyright, value, 512-1);
+ } else if (!strcmp(token, "TITLE") || !strcmp(token, "Title")) {
+ strncpy(s->title, value, 512-1);
+ }
+ }
+ av_free(strings);
+ }
+ if (url_feof(pb))
+ return -1;
+
+ PRINT(("NSV got infos; filepos %"PRId64"\n", url_ftell(pb)));
+
+ if (table_entries_used > 0) {
+ nsv->index_entries = table_entries_used;
+ if((unsigned)table_entries >= UINT_MAX / sizeof(uint32_t))
+ return -1;
+ nsv->nsvf_index_data = av_malloc(table_entries * sizeof(uint32_t));
+#warning "FIXME: Byteswap buffer as needed"
+ get_buffer(pb, (unsigned char *)nsv->nsvf_index_data, table_entries * sizeof(uint32_t));
+ }
+
+ PRINT(("NSV got index; filepos %"PRId64"\n", url_ftell(pb)));
+
+#ifdef DEBUG_DUMP_INDEX
+#define V(v) ((v<0x20 || v > 127)?'.':v)
+ /* dump index */
+ PRINT(("NSV %d INDEX ENTRIES:\n", table_entries));
+ PRINT(("NSV [dataoffset][fileoffset]\n", table_entries));
+ for (i = 0; i < table_entries; i++) {
+ unsigned char b[8];
+ url_fseek(pb, size + nsv->nsvf_index_data[i], SEEK_SET);
+ get_buffer(pb, b, 8);
+ PRINT(("NSV [0x%08lx][0x%08lx]: %02x %02x %02x %02x %02x %02x %02x %02x"
+ "%c%c%c%c%c%c%c%c\n",
+ nsv->nsvf_index_data[i], size + nsv->nsvf_index_data[i],
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+ V(b[0]), V(b[1]), V(b[2]), V(b[3]), V(b[4]), V(b[5]), V(b[6]), V(b[7]) ));
+ }
+ //url_fseek(pb, size, SEEK_SET); /* go back to end of header */
+#undef V
+#endif
+
+ url_fseek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */
+
+ if (url_feof(pb))
+ return -1;
+ nsv->state = NSV_HAS_READ_NSVF;
+ return 0;
+}
+
+static int nsv_parse_NSVs_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NSVContext *nsv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint32_t vtag, atag;
+ uint16_t vwidth, vheight;
+ AVRational framerate;
+ int i;
+ AVStream *st;
+ NSVStream *nst;
+ PRINT(("%s()\n", __FUNCTION__));
+
+ vtag = get_le32(pb);
+ atag = get_le32(pb);
+ vwidth = get_le16(pb);
+ vheight = get_le16(pb);
+ i = get_byte(pb);
+
+ PRINT(("NSV NSVs framerate code %2x\n", i));
+ if(i&0x80) { /* odd way of giving native framerates from docs */
+ int t=(i & 0x7F)>>2;
+ if(t<16) framerate = (AVRational){1, t+1};
+ else framerate = (AVRational){t-15, 1};
+
+ if(i&1){
+ framerate.num *= 1000;
+ framerate.den *= 1001;
+ }
+
+ if((i&3)==3) framerate.num *= 24;
+ else if((i&3)==2) framerate.num *= 25;
+ else framerate.num *= 30;
+ }
+ else
+ framerate= (AVRational){i, 1};
+
+ nsv->avsync = get_le16(pb);
+#ifdef DEBUG
+ print_tag("NSV NSVs vtag", vtag, 0);
+ print_tag("NSV NSVs atag", atag, 0);
+ PRINT(("NSV NSVs vsize %dx%d\n", vwidth, vheight));
+#endif
+
+ /* XXX change to ap != NULL ? */
+ if (s->nb_streams == 0) { /* streams not yet published, let's do that */
+ nsv->vtag = vtag;
+ nsv->atag = atag;
+ nsv->vwidth = vwidth;
+ nsv->vheight = vwidth;
+ if (vtag != T_NONE) {
+ st = av_new_stream(s, NSV_ST_VIDEO);
+ if (!st)
+ goto fail;
+
+ nst = av_mallocz(sizeof(NSVStream));
+ if (!nst)
+ goto fail;
+ st->priv_data = nst;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_tag = vtag;
+ st->codec->codec_id = codec_get_id(nsv_codec_video_tags, vtag);
+ st->codec->width = vwidth;
+ st->codec->height = vheight;
+ st->codec->bits_per_sample = 24; /* depth XXX */
+
+ av_set_pts_info(st, 64, framerate.den, framerate.num);
+ st->start_time = 0;
+ st->duration = av_rescale(nsv->duration, framerate.num, 1000*framerate.den);
+ }
+ if (atag != T_NONE) {
+#ifndef DISABLE_AUDIO
+ st = av_new_stream(s, NSV_ST_AUDIO);
+ if (!st)
+ goto fail;
+
+ nst = av_mallocz(sizeof(NSVStream));
+ if (!nst)
+ goto fail;
+ st->priv_data = nst;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = atag;
+ st->codec->codec_id = codec_get_id(nsv_codec_audio_tags, atag);
+
+ st->need_parsing = 1; /* for PCM we will read a chunk later and put correct info */
+
+ /* set timebase to common denominator of ms and framerate */
+ av_set_pts_info(st, 64, 1, framerate.num*1000);
+ st->start_time = 0;
+ st->duration = (int64_t)nsv->duration * framerate.num;
+#endif
+ }
+#ifdef CHECK_SUBSEQUENT_NSVS
+ } else {
+ if (nsv->vtag != vtag || nsv->atag != atag || nsv->vwidth != vwidth || nsv->vheight != vwidth) {
+ PRINT(("NSV NSVs header values differ from the first one!!!\n"));
+ //return -1;
+ }
+#endif /* CHECK_SUBSEQUENT_NSVS */
+ }
+
+ nsv->state = NSV_HAS_READ_NSVS;
+ return 0;
+fail:
+ /* XXX */
+ nsv->state = NSV_UNSYNC;
+ return -1;
+}
+
+static int nsv_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NSVContext *nsv = s->priv_data;
+ int i, err;
+
+ PRINT(("%s()\n", __FUNCTION__));
+ PRINT(("filename '%s'\n", s->filename));
+
+ nsv->state = NSV_UNSYNC;
+ nsv->ahead[0].data = nsv->ahead[1].data = NULL;
+
+ for (i = 0; i < NSV_MAX_RESYNC_TRIES; i++) {
+ if (nsv_resync(s) < 0)
+ return -1;
+ if (nsv->state == NSV_FOUND_NSVF)
+ err = nsv_parse_NSVf_header(s, ap);
+ /* we need the first NSVs also... */
+ if (nsv->state == NSV_FOUND_NSVS) {
+ err = nsv_parse_NSVs_header(s, ap);
+ break; /* we just want the first one */
+ }
+ }
+ if (s->nb_streams < 1) /* no luck so far */
+ return -1;
+ /* now read the first chunk, so we can attempt to decode more info */
+ err = nsv_read_chunk(s, 1);
+
+ PRINT(("parsed header\n"));
+ return 0;
+}
+
+static int nsv_read_chunk(AVFormatContext *s, int fill_header)
+{
+ NSVContext *nsv = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st[2] = {NULL, NULL};
+ NSVStream *nst;
+ AVPacket *pkt;
+ int i, err = 0;
+ uint8_t auxcount; /* number of aux metadata, also 4 bits of vsize */
+ uint32_t vsize;
+ uint16_t asize;
+ uint16_t auxsize;
+ uint32_t auxtag;
+
+ PRINT(("%s(%d)\n", __FUNCTION__, fill_header));
+
+ if (nsv->ahead[0].data || nsv->ahead[1].data)
+ return 0; //-1; /* hey! eat what you've in your plate first! */
+
+null_chunk_retry:
+ if (url_feof(pb))
+ return -1;
+
+ for (i = 0; i < NSV_MAX_RESYNC_TRIES && nsv->state < NSV_FOUND_NSVS && !err; i++)
+ err = nsv_resync(s);
+ if (err < 0)
+ return err;
+ if (nsv->state == NSV_FOUND_NSVS)
+ err = nsv_parse_NSVs_header(s, NULL);
+ if (err < 0)
+ return err;
+ if (nsv->state != NSV_HAS_READ_NSVS && nsv->state != NSV_FOUND_BEEF)
+ return -1;
+
+ auxcount = get_byte(pb);
+ vsize = get_le16(pb);
+ asize = get_le16(pb);
+ vsize = (vsize << 4) | (auxcount >> 4);
+ auxcount &= 0x0f;
+ PRINT(("NSV CHUNK %d aux, %u bytes video, %d bytes audio\n", auxcount, vsize, asize));
+ /* skip aux stuff */
+ for (i = 0; i < auxcount; i++) {
+ auxsize = get_le16(pb);
+ auxtag = get_le32(pb);
+ PRINT(("NSV aux data: '%c%c%c%c', %d bytes\n",
+ (auxtag & 0x0ff),
+ ((auxtag >> 8) & 0x0ff),
+ ((auxtag >> 16) & 0x0ff),
+ ((auxtag >> 24) & 0x0ff),
+ auxsize));
+ url_fskip(pb, auxsize);
+ vsize -= auxsize + sizeof(uint16_t) + sizeof(uint32_t); /* that's becoming braindead */
+ }
+
+ if (url_feof(pb))
+ return -1;
+ if (!vsize && !asize) {
+ nsv->state = NSV_UNSYNC;
+ goto null_chunk_retry;
+ }
+
+ /* map back streams to v,a */
+ if (s->streams[0])
+ st[s->streams[0]->id] = s->streams[0];
+ if (s->streams[1])
+ st[s->streams[1]->id] = s->streams[1];
+
+ if (vsize/* && st[NSV_ST_VIDEO]*/) {
+ nst = st[NSV_ST_VIDEO]->priv_data;
+ pkt = &nsv->ahead[NSV_ST_VIDEO];
+ av_get_packet(pb, pkt, vsize);
+ pkt->stream_index = st[NSV_ST_VIDEO]->index;//NSV_ST_VIDEO;
+ pkt->dts = nst->frame_offset++;
+ pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */
+/*
+ for (i = 0; i < MIN(8, vsize); i++)
+ PRINT(("NSV video: [%d] = %02x\n", i, pkt->data[i]));
+*/
+ }
+ if (asize/*st[NSV_ST_AUDIO]*/) {
+ nst = st[NSV_ST_AUDIO]->priv_data;
+ pkt = &nsv->ahead[NSV_ST_AUDIO];
+ /* read raw audio specific header on the first audio chunk... */
+ /* on ALL audio chunks ?? seems so! */
+ if (asize && st[NSV_ST_AUDIO]->codec->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) {
+ uint8_t bps;
+ uint8_t channels;
+ uint16_t samplerate;
+ bps = get_byte(pb);
+ channels = get_byte(pb);
+ samplerate = get_le16(pb);
+ asize-=4;
+ PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate));
+ if (fill_header) {
+ st[NSV_ST_AUDIO]->need_parsing = 0; /* we know everything */
+ if (bps != 16) {
+ PRINT(("NSV AUDIO bit/sample != 16 (%d)!!!\n", bps));
+ }
+ bps /= channels; // ???
+ if (bps == 8)
+ st[NSV_ST_AUDIO]->codec->codec_id = CODEC_ID_PCM_U8;
+ samplerate /= 4;/* UGH ??? XXX */
+ channels = 1;
+ st[NSV_ST_AUDIO]->codec->channels = channels;
+ st[NSV_ST_AUDIO]->codec->sample_rate = samplerate;
+ PRINT(("NSV RAWAUDIO: bps %d, nchan %d, srate %d\n", bps, channels, samplerate));
+ }
+ }
+ av_get_packet(pb, pkt, asize);
+ pkt->stream_index = st[NSV_ST_AUDIO]->index;//NSV_ST_AUDIO;
+ pkt->flags |= nsv->state == NSV_HAS_READ_NSVS ? PKT_FLAG_KEY : 0; /* keyframe only likely on a sync frame */
+ if( nsv->state == NSV_HAS_READ_NSVS && st[NSV_ST_VIDEO] ) {
+ /* on a nsvs frame we have new information on a/v sync */
+ pkt->dts = (((NSVStream*)st[NSV_ST_VIDEO]->priv_data)->frame_offset-1);
+ pkt->dts *= (int64_t)1000 * st[NSV_ST_VIDEO]->time_base.num;
+ pkt->dts += (int64_t)nsv->avsync * st[NSV_ST_VIDEO]->time_base.den;
+ PRINT(("NSV AUDIO: sync:%d, dts:%"PRId64, nsv->avsync, pkt->dts));
+ }
+ nst->frame_offset++;
+ }
+
+ nsv->state = NSV_UNSYNC;
+ return 0;
+}
+
+
+static int nsv_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ NSVContext *nsv = s->priv_data;
+ int i, err = 0;
+
+ PRINT(("%s()\n", __FUNCTION__));
+
+ /* in case we don't already have something to eat ... */
+ if (nsv->ahead[0].data == NULL && nsv->ahead[1].data == NULL)
+ err = nsv_read_chunk(s, 0);
+ if (err < 0)
+ return err;
+
+ /* now pick one of the plates */
+ for (i = 0; i < 2; i++) {
+ if (nsv->ahead[i].data) {
+ PRINT(("%s: using cached packet[%d]\n", __FUNCTION__, i));
+ /* avoid the cost of new_packet + memcpy(->data) */
+ memcpy(pkt, &nsv->ahead[i], sizeof(AVPacket));
+ nsv->ahead[i].data = NULL; /* we ate that one */
+ return pkt->size;
+ }
+ }
+
+ /* this restaurant is not approvisionned :^] */
+ return -1;
+}
+
+static int nsv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+#if 0
+ NSVContext *avi = s->priv_data;
+ AVStream *st;
+ NSVStream *ast;
+ int frame_number, i;
+ int64_t pos;
+#endif
+
+ return -1;
+}
+
+static int nsv_read_close(AVFormatContext *s)
+{
+/* int i; */
+ NSVContext *nsv = s->priv_data;
+
+ if (nsv->index_entries)
+ av_free(nsv->nsvf_index_data);
+
+#if 0
+
+ for(i=0;i<s->nb_streams;i++) {
+ AVStream *st = s->streams[i];
+ NSVStream *ast = st->priv_data;
+ if(ast){
+ av_free(ast->index_entries);
+ av_free(ast);
+ }
+ av_free(st->codec->palctrl);
+ }
+
+#endif
+ return 0;
+}
+
+static int nsv_probe(AVProbeData *p)
+{
+ int i;
+// PRINT(("nsv_probe(), buf_size %d\n", p->buf_size));
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'N' && p->buf[1] == 'S' &&
+ p->buf[2] == 'V' && p->buf[3] == 'f')
+ return AVPROBE_SCORE_MAX;
+ /* streamed files might not have any header */
+ if (p->buf[0] == 'N' && p->buf[1] == 'S' &&
+ p->buf[2] == 'V' && p->buf[3] == 's')
+ return AVPROBE_SCORE_MAX;
+ /* XXX: do streamed files always start at chunk boundary ?? */
+ /* or do we need to search NSVs in the byte stream ? */
+ /* seems the servers don't bother starting clean chunks... */
+ /* sometimes even the first header is at 9KB or something :^) */
+ for (i = 1; i < p->buf_size - 3; i++) {
+ if (p->buf[i+0] == 'N' && p->buf[i+1] == 'S' &&
+ p->buf[i+2] == 'V' && p->buf[i+3] == 's')
+ return AVPROBE_SCORE_MAX-20;
+ }
+ /* so we'll have more luck on extension... */
+ if (match_ext(p->filename, "nsv"))
+ return AVPROBE_SCORE_MAX-20;
+ /* FIXME: add mime-type check */
+ return 0;
+}
+
+AVInputFormat nsv_demuxer = {
+ "nsv",
+ "NullSoft Video format",
+ sizeof(NSVContext),
+ nsv_probe,
+ nsv_read_header,
+ nsv_read_packet,
+ nsv_read_close,
+ nsv_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/nut.c b/contrib/ffmpeg/libavformat/nut.c
new file mode 100644
index 000000000..df64caf15
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nut.c
@@ -0,0 +1,1457 @@
+/*
+ * "NUT" Container Format muxer and demuxer (DRAFT-200403??)
+ * Copyright (c) 2003 Alex Beregszaszi
+ * Copyright (c) 2004 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ * Visit the official site at http://www.nut.hu/
+ *
+ */
+
+/*
+ * TODO:
+ * - index writing
+ * - index packet reading support
+*/
+
+//#define DEBUG 1
+
+#include <limits.h>
+#include "avformat.h"
+#include "mpegaudio.h"
+#include "riff.h"
+#include "adler32.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+//#define TRACE
+
+//from /dev/random
+
+#define MAIN_STARTCODE (0x7A561F5F04ADULL + (((uint64_t)('N'<<8) + 'M')<<48))
+#define STREAM_STARTCODE (0x11405BF2F9DBULL + (((uint64_t)('N'<<8) + 'S')<<48))
+#define KEYFRAME_STARTCODE (0xE4ADEECA4569ULL + (((uint64_t)('N'<<8) + 'K')<<48))
+#define INDEX_STARTCODE (0xDD672F23E64EULL + (((uint64_t)('N'<<8) + 'X')<<48))
+#define INFO_STARTCODE (0xAB68B596BA78ULL + (((uint64_t)('N'<<8) + 'I')<<48))
+
+#define ID_STRING "nut/multimedia container\0"
+
+#define MAX_DISTANCE (1024*16-1)
+#define MAX_SHORT_DISTANCE (1024*4-1)
+
+#define FLAG_DATA_SIZE 1
+#define FLAG_KEY_FRAME 2
+#define FLAG_INVALID 4
+
+typedef struct {
+ uint8_t flags;
+ uint8_t stream_id_plus1;
+ uint16_t size_mul;
+ uint16_t size_lsb;
+ int16_t timestamp_delta;
+ uint8_t reserved_count;
+} FrameCode;
+
+typedef struct {
+ int last_key_frame;
+ int msb_timestamp_shift;
+ int rate_num;
+ int rate_den;
+ int64_t last_pts;
+ int64_t last_sync_pos; ///<pos of last 1/2 type frame
+ int decode_delay;
+} StreamContext;
+
+typedef struct {
+ AVFormatContext *avf;
+ int written_packet_size;
+ int64_t packet_start[3]; //0-> startcode less, 1-> short startcode 2-> long startcodes
+ FrameCode frame_code[256];
+ unsigned int stream_count;
+ uint64_t next_startcode; ///< stores the next startcode if it has alraedy been parsed but the stream isnt seekable
+ StreamContext *stream;
+ int max_distance;
+ int max_short_distance;
+ int rate_num;
+ int rate_den;
+ int short_startcode;
+} NUTContext;
+
+static char *info_table[][2]={
+ {NULL , NULL }, // end
+ {NULL , NULL },
+ {NULL , "UTF8"},
+ {NULL , "v"},
+ {NULL , "s"},
+ {"StreamId" , "v"},
+ {"SegmentId" , "v"},
+ {"StartTimestamp" , "v"},
+ {"EndTimestamp" , "v"},
+ {"Author" , "UTF8"},
+ {"Title" , "UTF8"},
+ {"Description" , "UTF8"},
+ {"Copyright" , "UTF8"},
+ {"Encoder" , "UTF8"},
+ {"Keyword" , "UTF8"},
+ {"Cover" , "JPEG"},
+ {"Cover" , "PNG"},
+};
+
+static void update(NUTContext *nut, int stream_index, int64_t frame_start, int frame_type, int frame_code, int key_frame, int size, int64_t pts){
+ StreamContext *stream= &nut->stream[stream_index];
+
+ stream->last_key_frame= key_frame;
+ nut->packet_start[ frame_type ]= frame_start;
+ stream->last_pts= pts;
+}
+
+static void reset(AVFormatContext *s, int64_t global_ts){
+ NUTContext *nut = s->priv_data;
+ int i;
+
+ for(i=0; i<s->nb_streams; i++){
+ StreamContext *stream= &nut->stream[i];
+
+ stream->last_key_frame= 1;
+
+ stream->last_pts= av_rescale(global_ts, stream->rate_num*(int64_t)nut->rate_den, stream->rate_den*(int64_t)nut->rate_num);
+ }
+}
+
+static void build_frame_code(AVFormatContext *s){
+ NUTContext *nut = s->priv_data;
+ int key_frame, index, pred, stream_id;
+ int start=0;
+ int end= 255;
+ int keyframe_0_esc= s->nb_streams > 2;
+ int pred_table[10];
+
+ if(keyframe_0_esc){
+ /* keyframe = 0 escape */
+ FrameCode *ft= &nut->frame_code[start];
+ ft->flags= FLAG_DATA_SIZE;
+ ft->stream_id_plus1= 0;
+ ft->size_mul=1;
+ ft->timestamp_delta=0;
+ start++;
+ }
+
+ for(stream_id= 0; stream_id<s->nb_streams; stream_id++){
+ int start2= start + (end-start)*stream_id / s->nb_streams;
+ int end2 = start + (end-start)*(stream_id+1) / s->nb_streams;
+ AVCodecContext *codec = s->streams[stream_id]->codec;
+ int is_audio= codec->codec_type == CODEC_TYPE_AUDIO;
+ int intra_only= /*codec->intra_only || */is_audio;
+ int pred_count;
+
+ for(key_frame=0; key_frame<2; key_frame++){
+ if(intra_only && keyframe_0_esc && key_frame==0)
+ continue;
+
+ {
+ FrameCode *ft= &nut->frame_code[start2];
+ ft->flags= FLAG_KEY_FRAME*key_frame;
+ ft->flags|= FLAG_DATA_SIZE;
+ ft->stream_id_plus1= stream_id + 1;
+ ft->size_mul=1;
+ ft->timestamp_delta=0;
+ start2++;
+ }
+ }
+
+ key_frame= intra_only;
+#if 1
+ if(is_audio){
+ int frame_bytes= codec->frame_size*(int64_t)codec->bit_rate / (8*codec->sample_rate);
+ int pts;
+ for(pts=0; pts<2; pts++){
+ for(pred=0; pred<2; pred++){
+ FrameCode *ft= &nut->frame_code[start2];
+ ft->flags= FLAG_KEY_FRAME*key_frame;
+ ft->stream_id_plus1= stream_id + 1;
+ ft->size_mul=frame_bytes + 2;
+ ft->size_lsb=frame_bytes + pred;
+ ft->timestamp_delta=pts;
+ start2++;
+ }
+ }
+ }else{
+ FrameCode *ft= &nut->frame_code[start2];
+ ft->flags= FLAG_KEY_FRAME | FLAG_DATA_SIZE;
+ ft->stream_id_plus1= stream_id + 1;
+ ft->size_mul=1;
+ ft->timestamp_delta=1;
+ start2++;
+ }
+#endif
+
+ if(codec->has_b_frames){
+ pred_count=5;
+ pred_table[0]=-2;
+ pred_table[1]=-1;
+ pred_table[2]=1;
+ pred_table[3]=3;
+ pred_table[4]=4;
+ }else if(codec->codec_id == CODEC_ID_VORBIS){
+ pred_count=3;
+ pred_table[0]=2;
+ pred_table[1]=9;
+ pred_table[2]=16;
+ }else{
+ pred_count=1;
+ pred_table[0]=1;
+ }
+
+ for(pred=0; pred<pred_count; pred++){
+ int start3= start2 + (end2-start2)*pred / pred_count;
+ int end3 = start2 + (end2-start2)*(pred+1) / pred_count;
+
+ for(index=start3; index<end3; index++){
+ FrameCode *ft= &nut->frame_code[index];
+ ft->flags= FLAG_KEY_FRAME*key_frame;
+ ft->flags|= FLAG_DATA_SIZE;
+ ft->stream_id_plus1= stream_id + 1;
+//FIXME use single byte size and pred from last
+ ft->size_mul= end3-start3;
+ ft->size_lsb= index - start3;
+ ft->timestamp_delta= pred_table[pred];
+ }
+ }
+ }
+ memmove(&nut->frame_code['N'+1], &nut->frame_code['N'], sizeof(FrameCode)*(255-'N'));
+ nut->frame_code['N'].flags= FLAG_INVALID;
+}
+
+static uint64_t get_v(ByteIOContext *bc)
+{
+ uint64_t val = 0;
+
+ for(;;)
+ {
+ int tmp = get_byte(bc);
+
+ if (tmp&0x80)
+ val= (val<<7) + tmp - 0x80;
+ else{
+//av_log(NULL, AV_LOG_DEBUG, "get_v()= %"PRId64"\n", (val<<7) + tmp);
+ return (val<<7) + tmp;
+ }
+ }
+ return -1;
+}
+
+static int get_str(ByteIOContext *bc, char *string, unsigned int maxlen){
+ unsigned int len= get_v(bc);
+
+ if(len && maxlen)
+ get_buffer(bc, string, FFMIN(len, maxlen));
+ while(len > maxlen){
+ get_byte(bc);
+ len--;
+ }
+
+ if(maxlen)
+ string[FFMIN(len, maxlen-1)]= 0;
+
+ if(maxlen == len)
+ return -1;
+ else
+ return 0;
+}
+
+static int64_t get_s(ByteIOContext *bc){
+ int64_t v = get_v(bc) + 1;
+
+ if (v&1) return -(v>>1);
+ else return (v>>1);
+}
+
+static uint64_t get_vb(ByteIOContext *bc){
+ uint64_t val=0;
+ unsigned int i= get_v(bc);
+
+ if(i>8)
+ return UINT64_MAX;
+
+ while(i--)
+ val = (val<<8) + get_byte(bc);
+
+//av_log(NULL, AV_LOG_DEBUG, "get_vb()= %"PRId64"\n", val);
+ return val;
+}
+
+#ifdef TRACE
+static inline uint64_t get_v_trace(ByteIOContext *bc, char *file, char *func, int line){
+ uint64_t v= get_v(bc);
+
+ printf("get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+
+static inline int64_t get_s_trace(ByteIOContext *bc, char *file, char *func, int line){
+ int64_t v= get_s(bc);
+
+ printf("get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+
+static inline uint64_t get_vb_trace(ByteIOContext *bc, char *file, char *func, int line){
+ uint64_t v= get_vb(bc);
+
+ printf("get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+#define get_v(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_vb(bc) get_vb_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#endif
+
+
+static int get_packetheader(NUTContext *nut, ByteIOContext *bc, int calculate_checksum)
+{
+ int64_t start, size;
+ start= url_ftell(bc) - 8;
+
+ size= get_v(bc);
+
+ init_checksum(bc, calculate_checksum ? av_adler32_update : NULL, 1);
+
+ nut->packet_start[2] = start;
+ nut->written_packet_size= size;
+
+ return size;
+}
+
+static int check_checksum(ByteIOContext *bc){
+ unsigned long checksum= get_checksum(bc);
+ return checksum != get_be32(bc);
+}
+
+/**
+ *
+ */
+static int get_length(uint64_t val){
+ int i;
+
+ for (i=7; val>>i; i+=7);
+
+ return i;
+}
+
+static uint64_t find_any_startcode(ByteIOContext *bc, int64_t pos){
+ uint64_t state=0;
+
+ if(pos >= 0)
+ url_fseek(bc, pos, SEEK_SET); //note, this may fail if the stream isnt seekable, but that shouldnt matter, as in this case we simply start where we are currently
+
+ while(!url_feof(bc)){
+ state= (state<<8) | get_byte(bc);
+ if((state>>56) != 'N')
+ continue;
+ switch(state){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case KEYFRAME_STARTCODE:
+ case INFO_STARTCODE:
+ case INDEX_STARTCODE:
+ return state;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * find the given startcode.
+ * @param code the startcode
+ * @param pos the start position of the search, or -1 if the current position
+ * @returns the position of the startcode or -1 if not found
+ */
+static int64_t find_startcode(ByteIOContext *bc, uint64_t code, int64_t pos){
+ for(;;){
+ uint64_t startcode= find_any_startcode(bc, pos);
+ if(startcode == code)
+ return url_ftell(bc) - 8;
+ else if(startcode == 0)
+ return -1;
+ pos=-1;
+ }
+}
+
+static int64_t lsb2full(StreamContext *stream, int64_t lsb){
+ int64_t mask = (1<<stream->msb_timestamp_shift)-1;
+ int64_t delta= stream->last_pts - mask/2;
+ return ((lsb - delta)&mask) + delta;
+}
+
+#ifdef CONFIG_MUXERS
+
+static void put_v(ByteIOContext *bc, uint64_t val)
+{
+ int i;
+
+//av_log(NULL, AV_LOG_DEBUG, "put_v()= %"PRId64"\n", val);
+ val &= 0x7FFFFFFFFFFFFFFFULL; // FIXME can only encode upto 63 bits currently
+ i= get_length(val);
+
+ for (i-=7; i>0; i-=7){
+ put_byte(bc, 0x80 | (val>>i));
+ }
+
+ put_byte(bc, val&0x7f);
+}
+
+/**
+ * stores a string as vb.
+ */
+static void put_str(ByteIOContext *bc, const char *string){
+ int len= strlen(string);
+
+ put_v(bc, len);
+ put_buffer(bc, string, len);
+}
+
+static void put_s(ByteIOContext *bc, int64_t val){
+ if (val<=0) put_v(bc, -2*val );
+ else put_v(bc, 2*val-1);
+}
+
+static void put_vb(ByteIOContext *bc, uint64_t val){
+ int i;
+
+ for (i=8; val>>i; i+=8);
+
+ put_v(bc, i>>3);
+ for(i-=8; i>=0; i-=8)
+ put_byte(bc, (val>>i)&0xFF);
+}
+
+#ifdef TRACE
+static inline void put_v_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
+ printf("get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+
+ put_v(bc, v);
+}
+
+static inline void put_s_trace(ByteIOContext *bc, int64_t v, char *file, char *func, int line){
+ printf("get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+
+ put_s(bc, v);
+}
+
+static inline void put_vb_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
+ printf("get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+
+ put_vb(bc, v);
+}
+#define put_v(bc, v) put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define put_vb(bc, v) put_vb_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#endif
+
+static int put_packetheader(NUTContext *nut, ByteIOContext *bc, int max_size, int calculate_checksum)
+{
+ put_flush_packet(bc);
+ nut->packet_start[2]= url_ftell(bc) - 8;
+ nut->written_packet_size = max_size;
+
+ /* packet header */
+ put_v(bc, nut->written_packet_size); /* forward ptr */
+
+ if(calculate_checksum)
+ init_checksum(bc, av_adler32_update, 1);
+
+ return 0;
+}
+
+/**
+ *
+ * must not be called more then once per packet
+ */
+static int update_packetheader(NUTContext *nut, ByteIOContext *bc, int additional_size, int calculate_checksum){
+ int64_t start= nut->packet_start[2];
+ int64_t cur= url_ftell(bc);
+ int size= cur - start - get_length(nut->written_packet_size)/7 - 8;
+
+ if(calculate_checksum)
+ size += 4;
+
+ if(size != nut->written_packet_size){
+ int i;
+
+ assert( size <= nut->written_packet_size );
+
+ url_fseek(bc, start + 8, SEEK_SET);
+ for(i=get_length(size); i < get_length(nut->written_packet_size); i+=7)
+ put_byte(bc, 0x80);
+ put_v(bc, size);
+
+ url_fseek(bc, cur, SEEK_SET);
+ nut->written_packet_size= size; //FIXME may fail if multiple updates with differing sizes, as get_length may differ
+
+ if(calculate_checksum)
+ put_be32(bc, get_checksum(bc));
+ }
+
+ return 0;
+}
+
+static int nut_write_header(AVFormatContext *s)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ AVCodecContext *codec;
+ int i, j, tmp_time, tmp_flags,tmp_stream, tmp_mul, tmp_size, tmp_fields;
+
+ if (strcmp(s->filename, "./data/b-libav.nut")) {
+ av_log(s, AV_LOG_ERROR, " libavformat NUT is non-compliant and disabled\n");
+ return -1;
+ }
+
+ nut->avf= s;
+
+ nut->stream =
+ av_mallocz(sizeof(StreamContext)*s->nb_streams);
+
+
+ put_buffer(bc, ID_STRING, strlen(ID_STRING));
+ put_byte(bc, 0);
+ nut->packet_start[2]= url_ftell(bc);
+
+ /* main header */
+ put_be64(bc, MAIN_STARTCODE);
+ put_packetheader(nut, bc, 120+5*256, 1);
+ put_v(bc, 2); /* version */
+ put_v(bc, s->nb_streams);
+ put_v(bc, MAX_DISTANCE);
+ put_v(bc, MAX_SHORT_DISTANCE);
+
+ put_v(bc, nut->rate_num=1);
+ put_v(bc, nut->rate_den=2);
+ put_v(bc, nut->short_startcode=0x4EFE79);
+
+ build_frame_code(s);
+ assert(nut->frame_code['N'].flags == FLAG_INVALID);
+
+ tmp_time= tmp_flags= tmp_stream= tmp_mul= tmp_size= /*tmp_res=*/ INT_MAX;
+ for(i=0; i<256;){
+ tmp_fields=0;
+ tmp_size= 0;
+ if(tmp_time != nut->frame_code[i].timestamp_delta) tmp_fields=1;
+ if(tmp_mul != nut->frame_code[i].size_mul ) tmp_fields=2;
+ if(tmp_stream != nut->frame_code[i].stream_id_plus1) tmp_fields=3;
+ if(tmp_size != nut->frame_code[i].size_lsb ) tmp_fields=4;
+// if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
+
+ tmp_time = nut->frame_code[i].timestamp_delta;
+ tmp_flags = nut->frame_code[i].flags;
+ tmp_stream= nut->frame_code[i].stream_id_plus1;
+ tmp_mul = nut->frame_code[i].size_mul;
+ tmp_size = nut->frame_code[i].size_lsb;
+// tmp_res = nut->frame_code[i].res;
+
+ for(j=0; i<256; j++,i++){
+ if(nut->frame_code[i].timestamp_delta != tmp_time ) break;
+ if(nut->frame_code[i].flags != tmp_flags ) break;
+ if(nut->frame_code[i].stream_id_plus1 != tmp_stream) break;
+ if(nut->frame_code[i].size_mul != tmp_mul ) break;
+ if(nut->frame_code[i].size_lsb != tmp_size+j) break;
+// if(nut->frame_code[i].res != tmp_res ) break;
+ }
+ if(j != tmp_mul - tmp_size) tmp_fields=6;
+
+ put_v(bc, tmp_flags);
+ put_v(bc, tmp_fields);
+ if(tmp_fields>0) put_s(bc, tmp_time);
+ if(tmp_fields>1) put_v(bc, tmp_mul);
+ if(tmp_fields>2) put_v(bc, tmp_stream);
+ if(tmp_fields>3) put_v(bc, tmp_size);
+ if(tmp_fields>4) put_v(bc, 0 /*tmp_res*/);
+ if(tmp_fields>5) put_v(bc, j);
+ }
+
+ update_packetheader(nut, bc, 0, 1);
+
+ /* stream headers */
+ for (i = 0; i < s->nb_streams; i++)
+ {
+ int nom, denom, ssize;
+
+ codec = s->streams[i]->codec;
+
+ put_be64(bc, STREAM_STARTCODE);
+ put_packetheader(nut, bc, 120 + codec->extradata_size, 1);
+ put_v(bc, i /*s->streams[i]->index*/);
+ switch(codec->codec_type){
+ case CODEC_TYPE_VIDEO: put_v(bc, 0); break;
+ case CODEC_TYPE_AUDIO: put_v(bc, 1); break;
+// case CODEC_TYPE_TEXT : put_v(bc, 2); break;
+ case CODEC_TYPE_DATA : put_v(bc, 3); break;
+ default: return -1;
+ }
+ if (codec->codec_tag)
+ put_vb(bc, codec->codec_tag);
+ else if (codec->codec_type == CODEC_TYPE_VIDEO)
+ {
+ put_vb(bc, codec_get_bmp_tag(codec->codec_id));
+ }
+ else if (codec->codec_type == CODEC_TYPE_AUDIO)
+ {
+ put_vb(bc, codec_get_wav_tag(codec->codec_id));
+ }
+ else
+ put_vb(bc, 0);
+
+ ff_parse_specific_params(codec, &nom, &ssize, &denom);
+
+ nut->stream[i].rate_num= nom;
+ nut->stream[i].rate_den= denom;
+ av_set_pts_info(s->streams[i], 60, denom, nom);
+
+ put_v(bc, codec->bit_rate);
+ put_vb(bc, 0); /* no language code */
+ put_v(bc, nom);
+ put_v(bc, denom);
+ if(nom / denom < 1000)
+ nut->stream[i].msb_timestamp_shift = 7;
+ else
+ nut->stream[i].msb_timestamp_shift = 14;
+ put_v(bc, nut->stream[i].msb_timestamp_shift);
+ put_v(bc, codec->has_b_frames);
+ put_byte(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
+
+ if(codec->extradata_size){
+ put_v(bc, 1);
+ put_v(bc, codec->extradata_size);
+ put_buffer(bc, codec->extradata, codec->extradata_size);
+ }
+ put_v(bc, 0); /* end of codec specific headers */
+
+ switch(codec->codec_type)
+ {
+ case CODEC_TYPE_AUDIO:
+ put_v(bc, codec->sample_rate);
+ put_v(bc, 1);
+ put_v(bc, codec->channels);
+ break;
+ case CODEC_TYPE_VIDEO:
+ put_v(bc, codec->width);
+ put_v(bc, codec->height);
+ put_v(bc, codec->sample_aspect_ratio.num);
+ put_v(bc, codec->sample_aspect_ratio.den);
+ put_v(bc, 0); /* csp type -- unknown */
+ break;
+ default:
+ break;
+ }
+ update_packetheader(nut, bc, 0, 1);
+ }
+
+ /* info header */
+ put_be64(bc, INFO_STARTCODE);
+ put_packetheader(nut, bc, 30+strlen(s->author)+strlen(s->title)+
+ strlen(s->comment)+strlen(s->copyright)+strlen(LIBAVFORMAT_IDENT), 1);
+ if (s->author[0])
+ {
+ put_v(bc, 9); /* type */
+ put_str(bc, s->author);
+ }
+ if (s->title[0])
+ {
+ put_v(bc, 10); /* type */
+ put_str(bc, s->title);
+ }
+ if (s->comment[0])
+ {
+ put_v(bc, 11); /* type */
+ put_str(bc, s->comment);
+ }
+ if (s->copyright[0])
+ {
+ put_v(bc, 12); /* type */
+ put_str(bc, s->copyright);
+ }
+ /* encoder */
+ if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)){
+ put_v(bc, 13); /* type */
+ put_str(bc, LIBAVFORMAT_IDENT);
+ }
+
+ put_v(bc, 0); /* eof info */
+ update_packetheader(nut, bc, 0, 1);
+
+ put_flush_packet(bc);
+
+ return 0;
+}
+
+static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ NUTContext *nut = s->priv_data;
+ StreamContext *stream= &nut->stream[pkt->stream_index];
+ ByteIOContext *bc = &s->pb;
+ int key_frame = 0, full_pts=0;
+ AVCodecContext *enc;
+ int64_t coded_pts;
+ int frame_type, best_length, frame_code, flags, i, size_mul, size_lsb, time_delta;
+ const int64_t frame_start= url_ftell(bc);
+ int64_t pts= pkt->pts;
+ int size= pkt->size;
+ int stream_index= pkt->stream_index;
+
+ enc = s->streams[stream_index]->codec;
+ key_frame = !!(pkt->flags & PKT_FLAG_KEY);
+
+ frame_type=0;
+ if(frame_start + size + 20 - FFMAX(nut->packet_start[1], nut->packet_start[2]) > MAX_DISTANCE)
+ frame_type=2;
+ if(key_frame && !stream->last_key_frame)
+ frame_type=2;
+
+ if(frame_type>1){
+ int64_t global_ts= av_rescale(pts, stream->rate_den*(int64_t)nut->rate_num, stream->rate_num*(int64_t)nut->rate_den);
+ reset(s, global_ts);
+ put_be64(bc, KEYFRAME_STARTCODE);
+ put_v(bc, global_ts);
+ }
+ assert(stream->last_pts != AV_NOPTS_VALUE);
+ coded_pts = pts & ((1<<stream->msb_timestamp_shift)-1);
+ if(lsb2full(stream, coded_pts) != pts)
+ full_pts=1;
+
+ if(full_pts)
+ coded_pts= pts + (1<<stream->msb_timestamp_shift);
+
+ best_length=INT_MAX;
+ frame_code= -1;
+ for(i=0; i<256; i++){
+ int stream_id_plus1= nut->frame_code[i].stream_id_plus1;
+ int fc_key_frame;
+ int length=0;
+ size_mul= nut->frame_code[i].size_mul;
+ size_lsb= nut->frame_code[i].size_lsb;
+ time_delta= nut->frame_code[i].timestamp_delta;
+ flags= nut->frame_code[i].flags;
+
+ assert(size_mul > size_lsb);
+
+ if(stream_id_plus1 == 0) length+= get_length(stream_index);
+ else if(stream_id_plus1 - 1 != stream_index)
+ continue;
+ fc_key_frame= !!(flags & FLAG_KEY_FRAME);
+
+ assert(key_frame==0 || key_frame==1);
+ if(fc_key_frame != key_frame)
+ continue;
+
+ if(flags & FLAG_DATA_SIZE){
+ if(size % size_mul != size_lsb)
+ continue;
+ length += get_length(size / size_mul);
+ }else if(size != size_lsb)
+ continue;
+
+ if(full_pts && time_delta)
+ continue;
+
+ if(!time_delta){
+ length += get_length(coded_pts);
+ }else{
+ if(time_delta != pts - stream->last_pts)
+ continue;
+ }
+
+ if(length < best_length){
+ best_length= length;
+ frame_code=i;
+ }
+// av_log(s, AV_LOG_DEBUG, "%d %d %d %d %d %d %d %d %d %d\n", key_frame, frame_type, full_pts, size, stream_index, flags, size_mul, size_lsb, stream_id_plus1, length);
+ }
+
+ assert(frame_code != -1);
+ flags= nut->frame_code[frame_code].flags;
+ size_mul= nut->frame_code[frame_code].size_mul;
+ size_lsb= nut->frame_code[frame_code].size_lsb;
+ time_delta= nut->frame_code[frame_code].timestamp_delta;
+#ifdef TRACE
+ best_length /= 7;
+ best_length ++; //frame_code
+ if(frame_type==2){
+ best_length += 8; // startcode
+ }
+ av_log(s, AV_LOG_DEBUG, "kf:%d ft:%d pt:%d fc:%2X len:%2d size:%d stream:%d flag:%d mul:%d lsb:%d s+1:%d pts_delta:%d pts:%"PRId64" fs:%"PRId64"\n", key_frame, frame_type, full_pts ? 1 : 0, frame_code, best_length, size, stream_index, flags, size_mul, size_lsb, nut->frame_code[frame_code].stream_id_plus1,(int)(pts - stream->last_pts), pts, frame_start);
+// av_log(s, AV_LOG_DEBUG, "%d %d %d\n", stream->lru_pts_delta[0], stream->lru_pts_delta[1], stream->lru_pts_delta[2]);
+#endif
+
+ assert(frame_type != 1); //short startcode not implemented yet
+ put_byte(bc, frame_code);
+
+ if(nut->frame_code[frame_code].stream_id_plus1 == 0)
+ put_v(bc, stream_index);
+ if (!time_delta){
+ put_v(bc, coded_pts);
+ }
+ if(flags & FLAG_DATA_SIZE)
+ put_v(bc, size / size_mul);
+ else
+ assert(size == size_lsb);
+ if(size > MAX_DISTANCE){
+ assert(frame_type > 1);
+ }
+
+ put_buffer(bc, pkt->data, size);
+
+ update(nut, stream_index, frame_start, frame_type, frame_code, key_frame, size, pts);
+
+ return 0;
+}
+
+static int nut_write_trailer(AVFormatContext *s)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+
+#if 0
+ int i;
+
+ /* WRITE INDEX */
+
+ for (i = 0; s->nb_streams; i++)
+ {
+ put_be64(bc, INDEX_STARTCODE);
+ put_packetheader(nut, bc, 64, 1);
+ put_v(bc, s->streams[i]->id);
+ put_v(bc, ...);
+ update_packetheader(nut, bc, 0, 1);
+ }
+#endif
+
+ put_flush_packet(bc);
+
+ av_freep(&nut->stream);
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+static int nut_probe(AVProbeData *p)
+{
+ int i;
+ uint64_t code= 0xff;
+
+ for (i = 0; i < p->buf_size; i++) {
+ code = (code << 8) | p->buf[i];
+ if (code == MAIN_STARTCODE)
+ return AVPROBE_SCORE_MAX;
+ }
+ return 0;
+}
+
+static int decode_main_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ uint64_t tmp;
+ int i, j, tmp_stream, tmp_mul, tmp_time, tmp_size, count, tmp_res;
+
+ get_packetheader(nut, bc, 1);
+
+ tmp = get_v(bc);
+ if (tmp != 2){
+ av_log(s, AV_LOG_ERROR, "bad version (%"PRId64")\n", tmp);
+ return -1;
+ }
+
+ nut->stream_count = get_v(bc);
+ if(nut->stream_count > MAX_STREAMS){
+ av_log(s, AV_LOG_ERROR, "too many streams\n");
+ return -1;
+ }
+ nut->max_distance = get_v(bc);
+ nut->max_short_distance = get_v(bc);
+ nut->rate_num= get_v(bc);
+ nut->rate_den= get_v(bc);
+ nut->short_startcode= get_v(bc);
+ if(nut->short_startcode>>16 != 'N'){
+ av_log(s, AV_LOG_ERROR, "invalid short startcode %X\n", nut->short_startcode);
+ return -1;
+ }
+
+ for(i=0; i<256;){
+ int tmp_flags = get_v(bc);
+ int tmp_fields= get_v(bc);
+ if(tmp_fields>0) tmp_time = get_s(bc);
+ if(tmp_fields>1) tmp_mul = get_v(bc);
+ if(tmp_fields>2) tmp_stream= get_v(bc);
+ if(tmp_fields>3) tmp_size = get_v(bc);
+ else tmp_size = 0;
+ if(tmp_fields>4) tmp_res = get_v(bc);
+ else tmp_res = 0;
+ if(tmp_fields>5) count = get_v(bc);
+ else count = tmp_mul - tmp_size;
+
+ while(tmp_fields-- > 6)
+ get_v(bc);
+
+ if(count == 0 || i+count > 256){
+ av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i);
+ return -1;
+ }
+ if(tmp_stream > nut->stream_count + 1){
+ av_log(s, AV_LOG_ERROR, "illegal stream number\n");
+ return -1;
+ }
+
+ for(j=0; j<count; j++,i++){
+ nut->frame_code[i].flags = tmp_flags ;
+ nut->frame_code[i].timestamp_delta = tmp_time ;
+ nut->frame_code[i].stream_id_plus1 = tmp_stream;
+ nut->frame_code[i].size_mul = tmp_mul ;
+ nut->frame_code[i].size_lsb = tmp_size+j;
+ nut->frame_code[i].reserved_count = tmp_res ;
+ }
+ }
+ if(nut->frame_code['N'].flags != FLAG_INVALID){
+ av_log(s, AV_LOG_ERROR, "illegal frame_code table\n");
+ return -1;
+ }
+
+ if(check_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Main header checksum mismatch\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int decode_stream_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ int class, nom, denom, stream_id;
+ uint64_t tmp;
+ AVStream *st;
+
+ get_packetheader(nut, bc, 1);
+ stream_id= get_v(bc);
+ if(stream_id >= nut->stream_count || s->streams[stream_id])
+ return -1;
+
+ st = av_new_stream(s, stream_id);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ class = get_v(bc);
+ tmp = get_vb(bc);
+ st->codec->codec_tag= tmp;
+ switch(class)
+ {
+ case 0:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = codec_get_bmp_id(tmp);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_ERROR, "Unknown codec?!\n");
+ break;
+ case 1:
+ case 32: //compatibility
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = codec_get_wav_id(tmp);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_ERROR, "Unknown codec?!\n");
+ break;
+ case 2:
+// st->codec->codec_type = CODEC_TYPE_TEXT;
+// break;
+ case 3:
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ break;
+ default:
+ av_log(s, AV_LOG_ERROR, "Unknown stream class (%d)\n", class);
+ return -1;
+ }
+ s->bit_rate += get_v(bc);
+ get_vb(bc); /* language code */
+ nom = get_v(bc);
+ denom = get_v(bc);
+ nut->stream[stream_id].msb_timestamp_shift = get_v(bc);
+ st->codec->has_b_frames=
+ nut->stream[stream_id].decode_delay= get_v(bc);
+ get_byte(bc); /* flags */
+
+ /* codec specific data headers */
+ while(get_v(bc) != 0){
+ st->codec->extradata_size= get_v(bc);
+ if((unsigned)st->codec->extradata_size > (1<<30))
+ return -1;
+ st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(bc, st->codec->extradata, st->codec->extradata_size);
+// url_fskip(bc, get_v(bc));
+ }
+
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) /* VIDEO */
+ {
+ st->codec->width = get_v(bc);
+ st->codec->height = get_v(bc);
+ st->codec->sample_aspect_ratio.num= get_v(bc);
+ st->codec->sample_aspect_ratio.den= get_v(bc);
+ get_v(bc); /* csp type */
+ }
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) /* AUDIO */
+ {
+ st->codec->sample_rate = get_v(bc);
+ get_v(bc); // samplerate_den
+ st->codec->channels = get_v(bc);
+ }
+ if(check_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Stream header %d checksum mismatch\n", stream_id);
+ return -1;
+ }
+ av_set_pts_info(s->streams[stream_id], 60, denom, nom);
+ nut->stream[stream_id].rate_num= nom;
+ nut->stream[stream_id].rate_den= denom;
+ return 0;
+}
+
+static int decode_info_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+
+ get_packetheader(nut, bc, 1);
+
+ for(;;){
+ int id= get_v(bc);
+ char *name, *type, custom_name[256], custom_type[256];
+
+ if(!id)
+ break;
+ else if(id >= sizeof(info_table)/sizeof(info_table[0])){
+ av_log(s, AV_LOG_ERROR, "info id is too large %d %zd\n", id, sizeof(info_table)/sizeof(info_table[0]));
+ return -1;
+ }
+
+ type= info_table[id][1];
+ name= info_table[id][0];
+//av_log(s, AV_LOG_DEBUG, "%d %s %s\n", id, type, name);
+
+ if(!type){
+ get_str(bc, custom_type, sizeof(custom_type));
+ type= custom_type;
+ }
+ if(!name){
+ get_str(bc, custom_name, sizeof(custom_name));
+ name= custom_name;
+ }
+
+ if(!strcmp(type, "v")){
+ get_v(bc);
+ }else{
+ if(!strcmp(name, "Author"))
+ get_str(bc, s->author, sizeof(s->author));
+ else if(!strcmp(name, "Title"))
+ get_str(bc, s->title, sizeof(s->title));
+ else if(!strcmp(name, "Copyright"))
+ get_str(bc, s->copyright, sizeof(s->copyright));
+ else if(!strcmp(name, "Description"))
+ get_str(bc, s->comment, sizeof(s->comment));
+ else
+ get_str(bc, NULL, 0);
+ }
+ }
+ if(check_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Info header checksum mismatch\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int nut_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos;
+ int inited_stream_count;
+
+ nut->avf= s;
+
+ /* main header */
+ pos=0;
+ for(;;){
+ pos= find_startcode(bc, MAIN_STARTCODE, pos)+1;
+ if (pos<0){
+ av_log(s, AV_LOG_ERROR, "no main startcode found\n");
+ return -1;
+ }
+ if(decode_main_header(nut) >= 0)
+ break;
+ }
+
+
+ s->bit_rate = 0;
+
+ nut->stream = av_malloc(sizeof(StreamContext)*nut->stream_count);
+
+ /* stream headers */
+ pos=0;
+ for(inited_stream_count=0; inited_stream_count < nut->stream_count;){
+ pos= find_startcode(bc, STREAM_STARTCODE, pos)+1;
+ if (pos<0+1){
+ av_log(s, AV_LOG_ERROR, "not all stream headers found\n");
+ return -1;
+ }
+ if(decode_stream_header(nut) >= 0)
+ inited_stream_count++;
+ }
+
+ /* info headers */
+ pos=0;
+ for(;;){
+ uint64_t startcode= find_any_startcode(bc, pos);
+ pos= url_ftell(bc);
+
+ if(startcode==0){
+ av_log(s, AV_LOG_ERROR, "EOF before video frames\n");
+ return -1;
+ }else if(startcode == KEYFRAME_STARTCODE){
+ nut->next_startcode= startcode;
+ break;
+ }else if(startcode != INFO_STARTCODE){
+ continue;
+ }
+
+ decode_info_header(nut);
+ }
+
+ return 0;
+}
+
+static int decode_frame_header(NUTContext *nut, int *key_frame_ret, int64_t *pts_ret, int *stream_id_ret, int frame_code, int frame_type, int64_t frame_start){
+ AVFormatContext *s= nut->avf;
+ StreamContext *stream;
+ ByteIOContext *bc = &s->pb;
+ int size, flags, size_mul, size_lsb, stream_id, time_delta;
+ int64_t pts = 0;
+
+ if(frame_type < 2 && frame_start - nut->packet_start[2] > nut->max_distance){
+ av_log(s, AV_LOG_ERROR, "last frame must have been damaged\n");
+ return -1;
+ }
+
+ if(frame_type)
+ nut->packet_start[ frame_type ]= frame_start; //otherwise 1 goto 1 may happen
+
+ flags= nut->frame_code[frame_code].flags;
+ size_mul= nut->frame_code[frame_code].size_mul;
+ size_lsb= nut->frame_code[frame_code].size_lsb;
+ stream_id= nut->frame_code[frame_code].stream_id_plus1 - 1;
+ time_delta= nut->frame_code[frame_code].timestamp_delta;
+
+ if(stream_id==-1)
+ stream_id= get_v(bc);
+ if(stream_id >= s->nb_streams){
+ av_log(s, AV_LOG_ERROR, "illegal stream_id\n");
+ return -1;
+ }
+ stream= &nut->stream[stream_id];
+
+// av_log(s, AV_LOG_DEBUG, "ft:%d ppts:%d %d %d\n", frame_type, stream->lru_pts_delta[0], stream->lru_pts_delta[1], stream->lru_pts_delta[2]);
+
+ *key_frame_ret= !!(flags & FLAG_KEY_FRAME);
+
+ if(!time_delta){
+ int64_t mask = (1<<stream->msb_timestamp_shift)-1;
+ pts= get_v(bc);
+ if(pts > mask){
+ pts -= mask+1;
+ }else{
+ if(stream->last_pts == AV_NOPTS_VALUE){
+ av_log(s, AV_LOG_ERROR, "no reference pts available\n");
+ return -1;
+ }
+ pts= lsb2full(stream, pts);
+ }
+ }else{
+ if(stream->last_pts == AV_NOPTS_VALUE){
+ av_log(s, AV_LOG_ERROR, "no reference pts available\n");
+ return -1;
+ }
+ pts= stream->last_pts + time_delta;
+ }
+
+ if(*key_frame_ret){
+// av_log(s, AV_LOG_DEBUG, "stream:%d start:%"PRId64" pts:%"PRId64" length:%"PRId64"\n",stream_id, frame_start, av_pts, frame_start - nut->stream[stream_id].last_sync_pos);
+ av_add_index_entry(
+ s->streams[stream_id],
+ frame_start,
+ pts,
+ 0,
+ frame_start - nut->stream[stream_id].last_sync_pos,
+ AVINDEX_KEYFRAME);
+ nut->stream[stream_id].last_sync_pos= frame_start;
+// assert(nut->packet_start == frame_start);
+ }
+
+ assert(size_mul > size_lsb);
+ size= size_lsb;
+ if(flags & FLAG_DATA_SIZE)
+ size+= size_mul*get_v(bc);
+
+#ifdef TRACE
+av_log(s, AV_LOG_DEBUG, "fs:%"PRId64" fc:%d ft:%d kf:%d pts:%"PRId64" size:%d mul:%d lsb:%d flags:%d delta:%d\n", frame_start, frame_code, frame_type, *key_frame_ret, pts, size, size_mul, size_lsb, flags, time_delta);
+#endif
+
+ if(frame_type==0 && url_ftell(bc) - nut->packet_start[2] + size > nut->max_distance){
+ av_log(s, AV_LOG_ERROR, "frame size too large\n");
+ return -1;
+ }
+
+ *stream_id_ret = stream_id;
+ *pts_ret = pts;
+
+ update(nut, stream_id, frame_start, frame_type, frame_code, *key_frame_ret, size, pts);
+
+ return size;
+}
+
+static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code, int frame_type, int64_t frame_start){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ int size, stream_id, key_frame, discard;
+ int64_t pts, last_IP_pts;
+
+ size= decode_frame_header(nut, &key_frame, &pts, &stream_id, frame_code, frame_type, frame_start);
+ if(size < 0)
+ return -1;
+
+ discard= s->streams[ stream_id ]->discard;
+ last_IP_pts= s->streams[ stream_id ]->last_IP_pts;
+ if( (discard >= AVDISCARD_NONKEY && !key_frame)
+ ||(discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && last_IP_pts > pts)
+ || discard >= AVDISCARD_ALL){
+ url_fskip(bc, size);
+ return 1;
+ }
+
+ av_get_packet(bc, pkt, size);
+ pkt->stream_index = stream_id;
+ if (key_frame)
+ pkt->flags |= PKT_FLAG_KEY;
+ pkt->pts = pts;
+
+ return 0;
+}
+
+static int nut_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int i, frame_code=0, ret;
+
+ for(;;){
+ int64_t pos= url_ftell(bc);
+ int frame_type= 0;
+ uint64_t tmp= nut->next_startcode;
+ nut->next_startcode=0;
+
+ if (url_feof(bc))
+ return -1;
+
+ if(tmp){
+ pos-=8;
+ }else{
+ frame_code = get_byte(bc);
+ if(frame_code == 'N'){
+ tmp= frame_code;
+ for(i=1; i<8; i++)
+ tmp = (tmp<<8) + get_byte(bc);
+ }
+ }
+ switch(tmp){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case INDEX_STARTCODE:
+ get_packetheader(nut, bc, 0);
+ assert(nut->packet_start[2] == pos);
+ url_fseek(bc, nut->written_packet_size, SEEK_CUR);
+ break;
+ case INFO_STARTCODE:
+ if(decode_info_header(nut)<0)
+ goto resync;
+ break;
+ case KEYFRAME_STARTCODE:
+ frame_type = 2;
+ reset(s, get_v(bc));
+ frame_code = get_byte(bc);
+ case 0:
+ ret= decode_frame(nut, pkt, frame_code, frame_type, pos);
+ if(ret==0)
+ return 0;
+ else if(ret==1) //ok but discard packet
+ break;
+ default:
+resync:
+av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", nut->packet_start[2]+1);
+ tmp= find_any_startcode(bc, nut->packet_start[2]+1);
+ if(tmp==0)
+ return -1;
+av_log(s, AV_LOG_DEBUG, "sync\n");
+ nut->next_startcode= tmp;
+ }
+ }
+}
+
+static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos_arg, int64_t pos_limit){
+ NUTContext *nut = s->priv_data;
+ StreamContext *stream;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos, pts;
+ uint64_t code;
+ int frame_code,step, stream_id, i,size, key_frame;
+av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", stream_index, *pos_arg, pos_limit);
+
+ if(*pos_arg < 0)
+ return AV_NOPTS_VALUE;
+
+ pos= *pos_arg;
+ step= FFMIN(16*1024, pos);
+ do{
+ pos-= step;
+ code= find_any_startcode(bc, pos);
+
+ if(code && url_ftell(bc) - 8 <= *pos_arg)
+ break;
+ step= FFMIN(2*step, pos);
+ }while(step);
+
+ if(!code) //nothing found, not even after pos_arg
+ return AV_NOPTS_VALUE;
+
+ url_fseek(bc, -8, SEEK_CUR);
+ for(i=0; i<s->nb_streams; i++)
+ nut->stream[i].last_sync_pos= url_ftell(bc);
+
+ for(;;){
+ int frame_type=0;
+ int64_t pos= url_ftell(bc);
+ uint64_t tmp=0;
+
+ if(pos > pos_limit || url_feof(bc))
+ return AV_NOPTS_VALUE;
+
+ frame_code = get_byte(bc);
+ if(frame_code == 'N'){
+ tmp= frame_code;
+ for(i=1; i<8; i++)
+ tmp = (tmp<<8) + get_byte(bc);
+ }
+//av_log(s, AV_LOG_DEBUG, "before switch %"PRIX64" at=%"PRId64"\n", tmp, pos);
+
+ switch(tmp){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case INDEX_STARTCODE:
+ case INFO_STARTCODE:
+ get_packetheader(nut, bc, 0);
+ assert(nut->packet_start[2]==pos);
+ url_fseek(bc, nut->written_packet_size, SEEK_CUR);
+ break;
+ case KEYFRAME_STARTCODE:
+ frame_type=2;
+ reset(s, get_v(bc));
+ frame_code = get_byte(bc);
+ case 0:
+ size= decode_frame_header(nut, &key_frame, &pts, &stream_id, frame_code, frame_type, pos);
+ if(size < 0)
+ goto resync;
+
+ stream= &nut->stream[stream_id];
+ if(stream_id != stream_index || !key_frame || pos < *pos_arg){
+ url_fseek(bc, size, SEEK_CUR);
+ break;
+ }
+
+ *pos_arg= pos;
+ return pts;
+ default:
+resync:
+av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", nut->packet_start[2]+1);
+ if(!find_any_startcode(bc, nut->packet_start[2]+1))
+ return AV_NOPTS_VALUE;
+
+ url_fseek(bc, -8, SEEK_CUR);
+ }
+ }
+ return AV_NOPTS_VALUE;
+}
+
+static int nut_read_seek(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
+// NUTContext *nut = s->priv_data;
+ int64_t pos;
+
+ if(av_seek_frame_binary(s, stream_index, target_ts, flags) < 0)
+ return -1;
+
+ pos= url_ftell(&s->pb);
+ nut_read_timestamp(s, stream_index, &pos, pos-1);
+
+ return 0;
+}
+
+static int nut_read_close(AVFormatContext *s)
+{
+ NUTContext *nut = s->priv_data;
+
+ av_freep(&nut->stream);
+
+ return 0;
+}
+
+#ifdef CONFIG_NUT_DEMUXER
+AVInputFormat nut_demuxer = {
+ "nut",
+ "nut format",
+ sizeof(NUTContext),
+ nut_probe,
+ nut_read_header,
+ nut_read_packet,
+ nut_read_close,
+ nut_read_seek,
+ nut_read_timestamp,
+ .extensions = "nut",
+};
+#endif
+#ifdef CONFIG_NUT_MUXER
+AVOutputFormat nut_muxer = {
+ "nut",
+ "nut format",
+ "video/x-nut",
+ "nut",
+ sizeof(NUTContext),
+#ifdef CONFIG_LIBVORBIS
+ CODEC_ID_VORBIS,
+#elif defined(CONFIG_MP3LAME)
+ CODEC_ID_MP3,
+#else
+ CODEC_ID_MP2, /* AC3 needs liba52 decoder */
+#endif
+ CODEC_ID_MPEG4,
+ nut_write_header,
+ nut_write_packet,
+ nut_write_trailer,
+ .flags = AVFMT_GLOBALHEADER,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/nut.h b/contrib/ffmpeg/libavformat/nut.h
new file mode 100644
index 000000000..82bbf6f17
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nut.h
@@ -0,0 +1,97 @@
+/*
+ * "NUT" Container Format (de)muxer
+ * Copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+//#include <limits.h>
+#include "avformat.h"
+#include "crc.h"
+//#include "mpegaudio.h"
+#include "riff.h"
+//#include "adler32.h"
+
+#define MAIN_STARTCODE (0x7A561F5F04ADULL + (((uint64_t)('N'<<8) + 'M')<<48))
+#define STREAM_STARTCODE (0x11405BF2F9DBULL + (((uint64_t)('N'<<8) + 'S')<<48))
+#define SYNCPOINT_STARTCODE (0xE4ADEECA4569ULL + (((uint64_t)('N'<<8) + 'K')<<48))
+#define INDEX_STARTCODE (0xDD672F23E64EULL + (((uint64_t)('N'<<8) + 'X')<<48))
+#define INFO_STARTCODE (0xAB68B596BA78ULL + (((uint64_t)('N'<<8) + 'I')<<48))
+
+#define ID_STRING "nut/multimedia container\0"
+
+#define MAX_DISTANCE (1024*32-1)
+
+typedef enum{
+ FLAG_KEY = 1, ///<if set, frame is keyframe
+ FLAG_EOR = 2, ///<if set, stream has no relevance on presentation. (EOR)
+ FLAG_CODED_PTS = 8, ///<if set, coded_pts is in the frame header
+ FLAG_STREAM_ID = 16, ///<if set, stream_id is coded in the frame header
+ FLAG_SIZE_MSB = 32, ///<if set, data_size_msb is at frame header, otherwise data_size_msb is 0
+ FLAG_CHECKSUM = 64, ///<if set then the frame header contains a checksum
+ FLAG_RESERVED = 128, ///<if set, reserved_count is coded in the frame header
+ FLAG_CODED =4096, ///<if set, coded_flags are stored in the frame header.
+ FLAG_INVALID =8192, ///<if set, frame_code is invalid.
+}flag_t;
+
+typedef struct {
+ uint64_t pos;
+ uint64_t back_ptr;
+// uint64_t global_key_pts;
+ int64_t ts;
+} syncpoint_t;
+
+typedef struct {
+ uint16_t flags;
+ uint8_t stream_id;
+ uint16_t size_mul;
+ uint16_t size_lsb;
+ int16_t pts_delta;
+ uint8_t reserved_count;
+} FrameCode; // maybe s/FrameCode/framecode_t/ or change all to java style but dont mix
+
+typedef struct {
+ int last_flags;
+ int skip_until_key_frame;
+ int64_t last_pts;
+ int time_base_id;
+ AVRational time_base;
+ int msb_pts_shift;
+ int max_pts_distance;
+ int decode_delay; //FIXME duplicate of has_b_frames
+} StreamContext;// maybe s/StreamContext/streamcontext_t/
+
+typedef struct {
+ AVFormatContext *avf;
+// int written_packet_size;
+// int64_t packet_start[3]; //0-> startcode less, 1-> short startcode 2-> long startcodes
+ FrameCode frame_code[256];
+ uint64_t next_startcode; ///< stores the next startcode if it has alraedy been parsed but the stream isnt seekable
+ StreamContext *stream;
+ unsigned int max_distance;
+ unsigned int time_base_count;
+ int64_t last_syncpoint_pos;
+ AVRational *time_base;
+ struct AVTreeNode *syncpoints;
+} NUTContext;
+
+
+//FIXME move to a common spot, like crc.c/h
+static unsigned long av_crc04C11DB7_update(unsigned long checksum, const uint8_t *buf, unsigned int len){
+ return av_crc(av_crc04C11DB7, checksum, buf, len);
+}
diff --git a/contrib/ffmpeg/libavformat/nutdec.c b/contrib/ffmpeg/libavformat/nutdec.c
new file mode 100644
index 000000000..7e0f8cd93
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nutdec.c
@@ -0,0 +1,889 @@
+/*
+ * "NUT" Container Format demuxer
+ * Copyright (c) 2004-2006 Michael Niedermayer
+ * Copyright (c) 2003 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "tree.h"
+#include "nut.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+static uint64_t get_v(ByteIOContext *bc){
+ uint64_t val = 0;
+
+ for(;;)
+ {
+ int tmp = get_byte(bc);
+
+ if (tmp&0x80)
+ val= (val<<7) + tmp - 0x80;
+ else{
+ return (val<<7) + tmp;
+ }
+ }
+ return -1;
+}
+
+static int get_str(ByteIOContext *bc, char *string, unsigned int maxlen){
+ unsigned int len= get_v(bc);
+
+ if(len && maxlen)
+ get_buffer(bc, string, FFMIN(len, maxlen));
+ while(len > maxlen){
+ get_byte(bc);
+ len--;
+ }
+
+ if(maxlen)
+ string[FFMIN(len, maxlen-1)]= 0;
+
+ if(maxlen == len)
+ return -1;
+ else
+ return 0;
+}
+
+static int64_t get_s(ByteIOContext *bc){
+ int64_t v = get_v(bc) + 1;
+
+ if (v&1) return -(v>>1);
+ else return (v>>1);
+}
+
+static uint64_t get_fourcc(ByteIOContext *bc){
+ unsigned int len= get_v(bc);
+
+ if (len==2) return get_le16(bc);
+ else if(len==4) return get_le32(bc);
+ else return -1;
+}
+
+#ifdef TRACE
+static inline uint64_t get_v_trace(ByteIOContext *bc, char *file, char *func, int line){
+ uint64_t v= get_v(bc);
+
+ printf("get_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+
+static inline int64_t get_s_trace(ByteIOContext *bc, char *file, char *func, int line){
+ int64_t v= get_s(bc);
+
+ printf("get_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+
+static inline uint64_t get_vb_trace(ByteIOContext *bc, char *file, char *func, int line){
+ uint64_t v= get_vb(bc);
+
+ printf("get_vb %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
+ return v;
+}
+#define get_v(bc) get_v_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_s(bc) get_s_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#define get_vb(bc) get_vb_trace(bc, __FILE__, __PRETTY_FUNCTION__, __LINE__)
+#endif
+
+static int get_packetheader(NUTContext *nut, ByteIOContext *bc, int calculate_checksum)
+{
+ int64_t start, size;
+// start= url_ftell(bc) - 8;
+
+ size= get_v(bc);
+
+ init_checksum(bc, calculate_checksum ? av_crc04C11DB7_update : NULL, 0);
+
+// nut->packet_start[2] = start;
+// nut->written_packet_size= size;
+
+ return size;
+}
+
+static uint64_t find_any_startcode(ByteIOContext *bc, int64_t pos){
+ uint64_t state=0;
+
+ if(pos >= 0)
+ url_fseek(bc, pos, SEEK_SET); //note, this may fail if the stream isnt seekable, but that shouldnt matter, as in this case we simply start where we are currently
+
+ while(!url_feof(bc)){
+ state= (state<<8) | get_byte(bc);
+ if((state>>56) != 'N')
+ continue;
+ switch(state){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case SYNCPOINT_STARTCODE:
+ case INFO_STARTCODE:
+ case INDEX_STARTCODE:
+ return state;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * find the given startcode.
+ * @param code the startcode
+ * @param pos the start position of the search, or -1 if the current position
+ * @returns the position of the startcode or -1 if not found
+ */
+static int64_t find_startcode(ByteIOContext *bc, uint64_t code, int64_t pos){
+ for(;;){
+ uint64_t startcode= find_any_startcode(bc, pos);
+ if(startcode == code)
+ return url_ftell(bc) - 8;
+ else if(startcode == 0)
+ return -1;
+ pos=-1;
+ }
+}
+
+static int64_t lsb2full(StreamContext *stream, int64_t lsb){
+ int64_t mask = (1<<stream->msb_pts_shift)-1;
+ int64_t delta= stream->last_pts - mask/2;
+ return ((lsb - delta)&mask) + delta;
+}
+
+static int nut_probe(AVProbeData *p){
+ int i;
+ uint64_t code= 0;
+
+ for (i = 0; i < p->buf_size; i++) {
+ code = (code << 8) | p->buf[i];
+ if (code == MAIN_STARTCODE)
+ return AVPROBE_SCORE_MAX;
+ }
+ return 0;
+}
+
+#define GET_V(dst, check) \
+ tmp= get_v(bc);\
+ if(!(check)){\
+ av_log(s, AV_LOG_ERROR, "Error " #dst " is (%"PRId64")\n", tmp);\
+ return -1;\
+ }\
+ dst= tmp;
+
+static int skip_reserved(ByteIOContext *bc, int64_t pos){
+ pos -= url_ftell(bc);
+ if(pos<0){
+ url_fseek(bc, pos, SEEK_CUR);
+ return -1;
+ }else{
+ while(pos--)
+ get_byte(bc);
+ return 0;
+ }
+}
+
+static int decode_main_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ uint64_t tmp, end;
+ unsigned int stream_count;
+ int i, j, tmp_stream, tmp_mul, tmp_pts, tmp_size, count, tmp_res;
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ GET_V(tmp , tmp >=2 && tmp <= 3)
+ GET_V(stream_count , tmp > 0 && tmp <=MAX_STREAMS)
+
+ nut->max_distance = get_v(bc);
+ if(nut->max_distance > 65536){
+ av_log(s, AV_LOG_DEBUG, "max_distance %d\n", nut->max_distance);
+ nut->max_distance= 65536;
+ }
+
+ GET_V(nut->time_base_count, tmp>0 && tmp<INT_MAX / sizeof(AVRational))
+ nut->time_base= av_malloc(nut->time_base_count * sizeof(AVRational));
+
+ for(i=0; i<nut->time_base_count; i++){
+ GET_V(nut->time_base[i].num, tmp>0 && tmp<(1ULL<<31))
+ GET_V(nut->time_base[i].den, tmp>0 && tmp<(1ULL<<31))
+ if(ff_gcd(nut->time_base[i].num, nut->time_base[i].den) != 1){
+ av_log(s, AV_LOG_ERROR, "time base invalid\n");
+ return -1;
+ }
+ }
+ tmp_pts=0;
+ tmp_mul=1;
+ tmp_stream=0;
+ for(i=0; i<256;){
+ int tmp_flags = get_v(bc);
+ int tmp_fields= get_v(bc);
+ if(tmp_fields>0) tmp_pts = get_s(bc);
+ if(tmp_fields>1) tmp_mul = get_v(bc);
+ if(tmp_fields>2) tmp_stream= get_v(bc);
+ if(tmp_fields>3) tmp_size = get_v(bc);
+ else tmp_size = 0;
+ if(tmp_fields>4) tmp_res = get_v(bc);
+ else tmp_res = 0;
+ if(tmp_fields>5) count = get_v(bc);
+ else count = tmp_mul - tmp_size;
+
+ while(tmp_fields-- > 6)
+ get_v(bc);
+
+ if(count == 0 || i+count > 256){
+ av_log(s, AV_LOG_ERROR, "illegal count %d at %d\n", count, i);
+ return -1;
+ }
+ if(tmp_stream >= stream_count){
+ av_log(s, AV_LOG_ERROR, "illegal stream number\n");
+ return -1;
+ }
+
+ for(j=0; j<count; j++,i++){
+ if (i == 'N') {
+ nut->frame_code[i].flags= FLAG_INVALID;
+ j--;
+ continue;
+ }
+ nut->frame_code[i].flags = tmp_flags ;
+ nut->frame_code[i].pts_delta = tmp_pts ;
+ nut->frame_code[i].stream_id = tmp_stream;
+ nut->frame_code[i].size_mul = tmp_mul ;
+ nut->frame_code[i].size_lsb = tmp_size+j;
+ nut->frame_code[i].reserved_count = tmp_res ;
+ }
+ }
+ assert(nut->frame_code['N'].flags == FLAG_INVALID);
+
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Main header checksum mismatch\n");
+ return -1;
+ }
+
+ nut->stream = av_mallocz(sizeof(StreamContext)*stream_count);
+ for(i=0; i<stream_count; i++){
+ av_new_stream(s, i);
+ }
+
+ return 0;
+}
+
+static int decode_stream_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ StreamContext *stc;
+ int class, stream_id;
+ uint64_t tmp, end;
+ AVStream *st;
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ GET_V(stream_id, tmp < s->nb_streams && !nut->stream[tmp].time_base.num);
+ stc= &nut->stream[stream_id];
+
+ st = s->streams[stream_id];
+ if (!st)
+ return AVERROR_NOMEM;
+
+ class = get_v(bc);
+ tmp = get_fourcc(bc);
+ st->codec->codec_tag= tmp;
+ switch(class)
+ {
+ case 0:
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = codec_get_bmp_id(tmp);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_ERROR, "Unknown codec?!\n");
+ break;
+ case 1:
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = codec_get_wav_id(tmp);
+ if (st->codec->codec_id == CODEC_ID_NONE)
+ av_log(s, AV_LOG_ERROR, "Unknown codec?!\n");
+ break;
+ case 2:
+// st->codec->codec_type = CODEC_TYPE_TEXT;
+// break;
+ case 3:
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ break;
+ default:
+ av_log(s, AV_LOG_ERROR, "Unknown stream class (%d)\n", class);
+ return -1;
+ }
+ GET_V(stc->time_base_id , tmp < nut->time_base_count);
+ GET_V(stc->msb_pts_shift , tmp < 16);
+ stc->max_pts_distance= get_v(bc);
+ GET_V(stc->decode_delay , tmp < 1000); //sanity limit, raise this if moors law is true
+ st->codec->has_b_frames= stc->decode_delay;
+ get_v(bc); //stream flags
+
+ GET_V(st->codec->extradata_size, tmp < (1<<30));
+ if(st->codec->extradata_size){
+ st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(bc, st->codec->extradata, st->codec->extradata_size);
+ }
+
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO){
+ GET_V(st->codec->width , tmp > 0)
+ GET_V(st->codec->height, tmp > 0)
+ st->codec->sample_aspect_ratio.num= get_v(bc);
+ st->codec->sample_aspect_ratio.den= get_v(bc);
+ if((!st->codec->sample_aspect_ratio.num) != (!st->codec->sample_aspect_ratio.den)){
+ av_log(s, AV_LOG_ERROR, "invalid aspect ratio\n");
+ return -1;
+ }
+ get_v(bc); /* csp type */
+ }else if (st->codec->codec_type == CODEC_TYPE_AUDIO){
+ GET_V(st->codec->sample_rate , tmp > 0)
+ tmp= get_v(bc); // samplerate_den
+ if(tmp > st->codec->sample_rate){
+ av_log(s, AV_LOG_ERROR, "bleh, libnut muxed this ;)\n");
+ st->codec->sample_rate= tmp;
+ }
+ GET_V(st->codec->channels, tmp > 0)
+ }
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Stream header %d checksum mismatch\n", stream_id);
+ return -1;
+ }
+ stc->time_base= nut->time_base[stc->time_base_id];
+ av_set_pts_info(s->streams[stream_id], 63, stc->time_base.num, stc->time_base.den);
+ return 0;
+}
+
+static int decode_info_header(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ uint64_t tmp;
+ unsigned int stream_id_plus1, chapter_start, chapter_len, count;
+ int chapter_id, i;
+ int64_t value, end;
+ char name[256], str_value[1024], type_str[256], *type= type_str;
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ GET_V(stream_id_plus1, tmp <= s->nb_streams)
+ chapter_id = get_s(bc);
+ chapter_start= get_v(bc);
+ chapter_len = get_v(bc);
+ count = get_v(bc);
+ for(i=0; i<count; i++){
+ get_str(bc, name, sizeof(name));
+ value= get_s(bc);
+ if(value == -1){
+ type= "UTF-8";
+ get_str(bc, str_value, sizeof(str_value));
+ }else if(value == -2){
+ get_str(bc, type, sizeof(type));
+ get_str(bc, str_value, sizeof(str_value));
+ }else if(value == -3){
+ type= "s";
+ value= get_s(bc);
+ }else if(value == -4){
+ type= "t";
+ value= get_v(bc);
+ }else if(value < -4){
+ type= "r";
+ get_s(bc);
+ }else{
+ type= "v";
+ }
+
+ if(chapter_id==0 && !strcmp(type, "UTF-8")){
+ if (!strcmp(name, "Author"))
+ pstrcpy(s->author , sizeof(s->author) , str_value);
+ else if(!strcmp(name, "Title"))
+ pstrcpy(s->title , sizeof(s->title) , str_value);
+ else if(!strcmp(name, "Copyright"))
+ pstrcpy(s->copyright, sizeof(s->copyright), str_value);
+ else if(!strcmp(name, "Description"))
+ pstrcpy(s->comment , sizeof(s->comment) , str_value);
+ }
+ }
+
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Info header checksum mismatch\n");
+ return -1;
+ }
+ return 0;
+}
+
+int sp_pos_cmp(syncpoint_t *a, syncpoint_t *b){
+ return (a->pos - b->pos>>32) - (b->pos - a->pos>>32);
+}
+
+int sp_pts_cmp(syncpoint_t *a, syncpoint_t *b){
+ return (a->ts - b->ts>>32) - (b->ts - a->ts>>32);
+}
+
+static void add_sp(NUTContext *nut, int64_t pos, int64_t back_ptr, int64_t ts){
+ syncpoint_t *sp2, *sp= av_mallocz(sizeof(syncpoint_t));
+
+ sp->pos= pos;
+ sp->back_ptr= back_ptr;
+ sp->ts= ts;
+ sp2= av_tree_insert(&nut->syncpoints, sp, sp_pos_cmp);
+ if(sp2 && sp2 != sp)
+ av_free(sp);
+}
+
+static int decode_syncpoint(NUTContext *nut, int64_t *ts, int64_t *back_ptr){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ int64_t end, tmp;
+ int i;
+ AVRational time_base;
+
+ nut->last_syncpoint_pos= url_ftell(bc)-8;
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ tmp= get_v(bc);
+ *back_ptr= nut->last_syncpoint_pos - 16*get_v(bc);
+ if(*back_ptr < 0)
+ return -1;
+
+ time_base= nut->time_base[tmp % nut->time_base_count];
+ for(i=0; i<s->nb_streams; i++){
+ nut->stream[i].last_pts= av_rescale_rnd(
+ tmp / nut->time_base_count,
+ time_base.num * (int64_t)nut->stream[i].time_base.den,
+ time_base.den * (int64_t)nut->stream[i].time_base.num,
+ AV_ROUND_DOWN);
+ //last_key_frame ?
+ }
+ //FIXME put this in a reset func maybe
+
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "sync point checksum mismatch\n");
+ return -1;
+ }
+
+ *ts= tmp / s->nb_streams * av_q2d(nut->time_base[tmp % s->nb_streams])*AV_TIME_BASE;
+ add_sp(nut, nut->last_syncpoint_pos, *back_ptr, *ts);
+
+ return 0;
+}
+
+static int find_and_decode_index(NUTContext *nut){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ uint64_t tmp, end;
+ int i, j, syncpoint_count;
+ int64_t filesize= url_fsize(bc);
+ int64_t *syncpoints;
+ int8_t *has_keyframe;
+
+ url_fseek(bc, filesize-12, SEEK_SET);
+ url_fseek(bc, filesize-get_be64(bc), SEEK_SET);
+ if(get_be64(bc) != INDEX_STARTCODE){
+ av_log(s, AV_LOG_ERROR, "no index at the end\n");
+ return -1;
+ }
+
+ end= get_packetheader(nut, bc, 1);
+ end += url_ftell(bc);
+
+ get_v(bc); //max_pts
+ GET_V(syncpoint_count, tmp < INT_MAX/8 && tmp > 0)
+ syncpoints= av_malloc(sizeof(int64_t)*syncpoint_count);
+ has_keyframe= av_malloc(sizeof(int8_t)*(syncpoint_count+1));
+ for(i=0; i<syncpoint_count; i++){
+ GET_V(syncpoints[i], tmp>0)
+ if(i)
+ syncpoints[i] += syncpoints[i-1];
+ }
+
+ for(i=0; i<s->nb_streams; i++){
+ int64_t last_pts= -1;
+ for(j=0; j<syncpoint_count;){
+ uint64_t x= get_v(bc);
+ int type= x&1;
+ int n= j;
+ x>>=1;
+ if(type){
+ int flag= x&1;
+ x>>=1;
+ if(n+x >= syncpoint_count + 1){
+ av_log(s, AV_LOG_ERROR, "index overflow A\n");
+ return -1;
+ }
+ while(x--)
+ has_keyframe[n++]= flag;
+ has_keyframe[n++]= !flag;
+ }else{
+ while(x != 1){
+ if(n>=syncpoint_count + 1){
+ av_log(s, AV_LOG_ERROR, "index overflow B\n");
+ return -1;
+ }
+ has_keyframe[n++]= x&1;
+ x>>=1;
+ }
+ }
+ if(has_keyframe[0]){
+ av_log(s, AV_LOG_ERROR, "keyframe before first syncpoint in index\n");
+ return -1;
+ }
+ assert(n<=syncpoint_count+1);
+ for(; j<n; j++){
+ if(has_keyframe[j]){
+ uint64_t B, A= get_v(bc);
+ if(!A){
+ A= get_v(bc);
+ B= get_v(bc);
+ //eor_pts[j][i] = last_pts + A + B
+ }else
+ B= 0;
+ av_add_index_entry(
+ s->streams[i],
+ 16*syncpoints[j-1],
+ last_pts + A,
+ 0,
+ 0,
+ AVINDEX_KEYFRAME);
+ last_pts += A + B;
+ }
+ }
+ }
+ }
+
+ if(skip_reserved(bc, end) || get_checksum(bc)){
+ av_log(s, AV_LOG_ERROR, "Index checksum mismatch\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int nut_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos;
+ int inited_stream_count;
+
+ nut->avf= s;
+
+ /* main header */
+ pos=0;
+ do{
+ pos= find_startcode(bc, MAIN_STARTCODE, pos)+1;
+ if (pos<0+1){
+ av_log(s, AV_LOG_ERROR, "no main startcode found\n");
+ return -1;
+ }
+ }while(decode_main_header(nut) < 0);
+
+ /* stream headers */
+ pos=0;
+ for(inited_stream_count=0; inited_stream_count < s->nb_streams;){
+ pos= find_startcode(bc, STREAM_STARTCODE, pos)+1;
+ if (pos<0+1){
+ av_log(s, AV_LOG_ERROR, "not all stream headers found\n");
+ return -1;
+ }
+ if(decode_stream_header(nut) >= 0)
+ inited_stream_count++;
+ }
+
+ /* info headers */
+ pos=0;
+ for(;;){
+ uint64_t startcode= find_any_startcode(bc, pos);
+ pos= url_ftell(bc);
+
+ if(startcode==0){
+ av_log(s, AV_LOG_ERROR, "EOF before video frames\n");
+ return -1;
+ }else if(startcode == SYNCPOINT_STARTCODE){
+ nut->next_startcode= startcode;
+ break;
+ }else if(startcode != INFO_STARTCODE){
+ continue;
+ }
+
+ decode_info_header(nut);
+ }
+
+ s->data_offset= pos-8;
+
+ if(!url_is_streamed(bc)){
+ int64_t orig_pos= url_ftell(bc);
+ find_and_decode_index(nut);
+ url_fseek(bc, orig_pos, SEEK_SET);
+ }
+ assert(nut->next_startcode == SYNCPOINT_STARTCODE);
+
+ return 0;
+}
+
+static int decode_frame_header(NUTContext *nut, int64_t *pts, int *stream_id, int frame_code){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ StreamContext *stc;
+ int size, flags, size_mul, pts_delta, i, reserved_count;
+ uint64_t tmp;
+
+ if(url_ftell(bc) > nut->last_syncpoint_pos + nut->max_distance){
+ av_log(s, AV_LOG_ERROR, "last frame must have been damaged %Ld > %Ld + %d\n", url_ftell(bc), nut->last_syncpoint_pos, nut->max_distance);
+ return -1;
+ }
+
+ flags = nut->frame_code[frame_code].flags;
+ size_mul = nut->frame_code[frame_code].size_mul;
+ size = nut->frame_code[frame_code].size_lsb;
+ *stream_id = nut->frame_code[frame_code].stream_id;
+ pts_delta = nut->frame_code[frame_code].pts_delta;
+ reserved_count = nut->frame_code[frame_code].reserved_count;
+
+ if(flags & FLAG_INVALID)
+ return -1;
+ if(flags & FLAG_CODED)
+ flags ^= get_v(bc);
+ if(flags & FLAG_STREAM_ID){
+ GET_V(*stream_id, tmp < s->nb_streams)
+ }
+ stc= &nut->stream[*stream_id];
+ if(flags&FLAG_CODED_PTS){
+ int coded_pts= get_v(bc);
+//FIXME check last_pts validity?
+ if(coded_pts < (1<<stc->msb_pts_shift)){
+ *pts=lsb2full(stc, coded_pts);
+ }else
+ *pts=coded_pts - (1<<stc->msb_pts_shift);
+ }else
+ *pts= stc->last_pts + pts_delta;
+ if(flags&FLAG_SIZE_MSB){
+ size += size_mul*get_v(bc);
+ }
+ if(flags&FLAG_RESERVED)
+ reserved_count= get_v(bc);
+ for(i=0; i<reserved_count; i++)
+ get_v(bc);
+ if(flags&FLAG_CHECKSUM){
+ get_be32(bc); //FIXME check this
+ }else if(size > 2*nut->max_distance || FFABS(stc->last_pts - *pts) > stc->max_pts_distance){
+ av_log(s, AV_LOG_ERROR, "frame size > 2max_distance and no checksum\n");
+ return -1;
+ }
+
+ stc->last_pts= *pts;
+ stc->last_flags= flags;
+
+ return size;
+}
+
+static int decode_frame(NUTContext *nut, AVPacket *pkt, int frame_code){
+ AVFormatContext *s= nut->avf;
+ ByteIOContext *bc = &s->pb;
+ int size, stream_id, discard;
+ int64_t pts, last_IP_pts;
+ StreamContext *stc;
+
+ size= decode_frame_header(nut, &pts, &stream_id, frame_code);
+ if(size < 0)
+ return -1;
+
+ stc= &nut->stream[stream_id];
+
+ if (stc->last_flags & FLAG_KEY)
+ stc->skip_until_key_frame=0;
+
+ discard= s->streams[ stream_id ]->discard;
+ last_IP_pts= s->streams[ stream_id ]->last_IP_pts;
+ if( (discard >= AVDISCARD_NONKEY && !(stc->last_flags & FLAG_KEY))
+ ||(discard >= AVDISCARD_BIDIR && last_IP_pts != AV_NOPTS_VALUE && last_IP_pts > pts)
+ || discard >= AVDISCARD_ALL
+ || stc->skip_until_key_frame){
+ url_fskip(bc, size);
+ return 1;
+ }
+
+ av_get_packet(bc, pkt, size);
+ pkt->stream_index = stream_id;
+ if (stc->last_flags & FLAG_KEY)
+ pkt->flags |= PKT_FLAG_KEY;
+ pkt->pts = pts;
+
+ return 0;
+}
+
+static int nut_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int i, frame_code=0, ret, skip;
+ int64_t ts, back_ptr;
+
+ for(;;){
+ int64_t pos= url_ftell(bc);
+ uint64_t tmp= nut->next_startcode;
+ nut->next_startcode=0;
+
+ if (url_feof(bc))
+ return -1;
+
+ if(tmp){
+ pos-=8;
+ }else{
+ frame_code = get_byte(bc);
+ if(frame_code == 'N'){
+ tmp= frame_code;
+ for(i=1; i<8; i++)
+ tmp = (tmp<<8) + get_byte(bc);
+ }
+ }
+ switch(tmp){
+ case MAIN_STARTCODE:
+ case STREAM_STARTCODE:
+ case INDEX_STARTCODE:
+ skip= get_packetheader(nut, bc, 0);
+ url_fseek(bc, skip, SEEK_CUR);
+ break;
+ case INFO_STARTCODE:
+ if(decode_info_header(nut)<0)
+ goto resync;
+ break;
+ case SYNCPOINT_STARTCODE:
+ if(decode_syncpoint(nut, &ts, &back_ptr)<0)
+ goto resync;
+ frame_code = get_byte(bc);
+ case 0:
+ ret= decode_frame(nut, pkt, frame_code);
+ if(ret==0)
+ return 0;
+ else if(ret==1) //ok but discard packet
+ break;
+ default:
+resync:
+av_log(s, AV_LOG_DEBUG, "syncing from %"PRId64"\n", pos);
+ tmp= find_any_startcode(bc, nut->last_syncpoint_pos+1);
+ if(tmp==0)
+ return -1;
+av_log(s, AV_LOG_DEBUG, "sync\n");
+ nut->next_startcode= tmp;
+ }
+ }
+}
+
+static int64_t nut_read_timestamp(AVFormatContext *s, int stream_index, int64_t *pos_arg, int64_t pos_limit){
+ NUTContext *nut = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos, pts, back_ptr;
+av_log(s, AV_LOG_DEBUG, "read_timestamp(X,%d,%"PRId64",%"PRId64")\n", stream_index, *pos_arg, pos_limit);
+
+ pos= *pos_arg;
+resync:
+ do{
+ pos= find_startcode(bc, SYNCPOINT_STARTCODE, pos)+1;
+ if(pos < 1){
+ assert(nut->next_startcode == 0);
+ av_log(s, AV_LOG_ERROR, "read_timestamp failed\n");
+ return AV_NOPTS_VALUE;
+ }
+ }while(decode_syncpoint(nut, &pts, &back_ptr) < 0);
+ *pos_arg = pos-1;
+ assert(nut->last_syncpoint_pos == *pos_arg);
+
+ av_log(s, AV_LOG_DEBUG, "return %Ld %Ld\n", pts,back_ptr );
+ if (stream_index == -1) return pts;
+ else if(stream_index == -2) return back_ptr;
+
+assert(0);
+}
+
+static int read_seek(AVFormatContext *s, int stream_index, int64_t pts, int flags){
+ NUTContext *nut = s->priv_data;
+ AVStream *st= s->streams[stream_index];
+ syncpoint_t dummy={.ts= pts*av_q2d(st->time_base)*AV_TIME_BASE};
+ syncpoint_t nopts_sp= {.ts= AV_NOPTS_VALUE, .back_ptr= AV_NOPTS_VALUE};
+ syncpoint_t *sp, *next_node[2]= {&nopts_sp, &nopts_sp};
+ int64_t pos, pos2, ts;
+ int i;
+
+ if(st->index_entries){
+ int index= av_index_search_timestamp(st, pts, flags);
+ if(index<0)
+ return -1;
+
+ pos2= st->index_entries[index].pos;
+ ts = st->index_entries[index].timestamp;
+ }else{
+ av_tree_find(nut->syncpoints, &dummy, sp_pts_cmp, next_node);
+ av_log(s, AV_LOG_DEBUG, "%Ld-%Ld %Ld-%Ld\n", next_node[0]->pos, next_node[1]->pos,
+ next_node[0]->ts , next_node[1]->ts);
+ pos= av_gen_search(s, -1, dummy.ts, next_node[0]->pos, next_node[1]->pos, next_node[1]->pos,
+ next_node[0]->ts , next_node[1]->ts, AVSEEK_FLAG_BACKWARD, &ts, nut_read_timestamp);
+
+ if(!(flags & AVSEEK_FLAG_BACKWARD)){
+ dummy.pos= pos+16;
+ next_node[1]= &nopts_sp;
+ av_tree_find(nut->syncpoints, &dummy, sp_pos_cmp, next_node);
+ pos2= av_gen_search(s, -2, dummy.pos, next_node[0]->pos , next_node[1]->pos, next_node[1]->pos,
+ next_node[0]->back_ptr, next_node[1]->back_ptr, flags, &ts, nut_read_timestamp);
+ if(pos2>=0)
+ pos= pos2;
+ //FIXME dir but i think it doesnt matter
+ }
+ dummy.pos= pos;
+ sp= av_tree_find(nut->syncpoints, &dummy, sp_pos_cmp, NULL);
+
+ assert(sp);
+ pos2= sp->back_ptr - 15;
+ }
+ av_log(NULL, AV_LOG_DEBUG, "SEEKTO: %"PRId64"\n", pos2);
+ pos= find_startcode(&s->pb, SYNCPOINT_STARTCODE, pos2);
+ url_fseek(&s->pb, pos, SEEK_SET);
+ av_log(NULL, AV_LOG_DEBUG, "SP: %"PRId64"\n", pos);
+ if(pos2 > pos || pos2 + 15 < pos){
+ av_log(NULL, AV_LOG_ERROR, "no syncpoint at backptr pos\n");
+ }
+ for(i=0; i<s->nb_streams; i++)
+ nut->stream[i].skip_until_key_frame=1;
+
+ return 0;
+}
+
+static int nut_read_close(AVFormatContext *s)
+{
+ NUTContext *nut = s->priv_data;
+
+ av_freep(&nut->time_base);
+ av_freep(&nut->stream);
+
+ return 0;
+}
+
+#ifdef CONFIG_NUT_DEMUXER
+AVInputFormat nut_demuxer = {
+ "nut",
+ "nut format",
+ sizeof(NUTContext),
+ nut_probe,
+ nut_read_header,
+ nut_read_packet,
+ nut_read_close,
+ read_seek,
+ .extensions = "nut",
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/nuv.c b/contrib/ffmpeg/libavformat/nuv.c
new file mode 100644
index 000000000..3b96eb940
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/nuv.c
@@ -0,0 +1,241 @@
+/*
+ * NuppelVideo demuxer.
+ * Copyright (c) 2006 Reimar Doeffinger.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "riff.h"
+
+typedef struct {
+ int v_id;
+ int a_id;
+} NUVContext;
+
+typedef enum {
+ NUV_VIDEO = 'V',
+ NUV_EXTRADATA = 'D',
+ NUV_AUDIO = 'A',
+ NUV_SEEKP = 'R',
+ NUV_MYTHEXT = 'X'
+} frametype_t;
+
+static int nuv_probe(AVProbeData *p) {
+ if (p->buf_size < 12)
+ return 0;
+ if (!memcmp(p->buf, "NuppelVideo", 12))
+ return AVPROBE_SCORE_MAX;
+ if (!memcmp(p->buf, "MythTVVideo", 12))
+ return AVPROBE_SCORE_MAX;
+ return 0;
+}
+
+//! little macro to sanitize packet size
+#define PKTSIZE(s) (s & 0xffffff)
+
+/**
+ * \brief read until we found all data needed for decoding
+ * \param vst video stream of which to change parameters
+ * \param ast video stream of which to change parameters
+ * \param myth set if this is a MythTVVideo format file
+ * \return 1 if all required codec data was found
+ */
+static int get_codec_data(ByteIOContext *pb, AVStream *vst,
+ AVStream *ast, int myth) {
+ frametype_t frametype;
+ if (!vst && !myth)
+ return 1; // no codec data needed
+ while (!url_feof(pb)) {
+ int size, subtype;
+ frametype = get_byte(pb);
+ switch (frametype) {
+ case NUV_EXTRADATA:
+ subtype = get_byte(pb);
+ url_fskip(pb, 6);
+ size = PKTSIZE(get_le32(pb));
+ if (vst && subtype == 'R') {
+ vst->codec->extradata_size = size;
+ vst->codec->extradata = av_malloc(size);
+ get_buffer(pb, vst->codec->extradata, size);
+ size = 0;
+ if (!myth)
+ return 1;
+ }
+ break;
+ case NUV_MYTHEXT:
+ url_fskip(pb, 7);
+ size = PKTSIZE(get_le32(pb));
+ if (size != 128 * 4)
+ break;
+ get_le32(pb); // version
+ if (vst) {
+ vst->codec->codec_tag = get_le32(pb);
+ vst->codec->codec_id =
+ codec_get_id(codec_bmp_tags, vst->codec->codec_tag);
+ } else
+ url_fskip(pb, 4);
+
+ if (ast) {
+ ast->codec->codec_tag = get_le32(pb);
+ ast->codec->sample_rate = get_le32(pb);
+ ast->codec->bits_per_sample = get_le32(pb);
+ ast->codec->channels = get_le32(pb);
+ ast->codec->codec_id =
+ wav_codec_get_id(ast->codec->codec_tag,
+ ast->codec->bits_per_sample);
+ } else
+ url_fskip(pb, 4 * 4);
+
+ size -= 6 * 4;
+ url_fskip(pb, size);
+ return 1;
+ case NUV_SEEKP:
+ size = 11;
+ break;
+ default:
+ url_fskip(pb, 7);
+ size = PKTSIZE(get_le32(pb));
+ break;
+ }
+ url_fskip(pb, size);
+ }
+ return 0;
+}
+
+static int nuv_header(AVFormatContext *s, AVFormatParameters *ap) {
+ NUVContext *ctx = (NUVContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ char id_string[12], version_string[5];
+ double aspect, fps;
+ int is_mythtv, width, height, v_packs, a_packs;
+ int stream_nr = 0;
+ AVStream *vst = NULL, *ast = NULL;
+ get_buffer(pb, id_string, 12);
+ is_mythtv = !memcmp(id_string, "MythTVVideo", 12);
+ get_buffer(pb, version_string, 5);
+ url_fskip(pb, 3); // padding
+ width = get_le32(pb);
+ height = get_le32(pb);
+ get_le32(pb); // unused, "desiredwidth"
+ get_le32(pb); // unused, "desiredheight"
+ get_byte(pb); // 'P' == progressive, 'I' == interlaced
+ url_fskip(pb, 3); // padding
+ aspect = av_int2dbl(get_le64(pb));
+ fps = av_int2dbl(get_le64(pb));
+
+ // number of packets per stream type, -1 means unknown, e.g. streaming
+ v_packs = get_le32(pb);
+ a_packs = get_le32(pb);
+ get_le32(pb); // text
+
+ get_le32(pb); // keyframe distance (?)
+
+ if (v_packs) {
+ ctx->v_id = stream_nr++;
+ vst = av_new_stream(s, ctx->v_id);
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = CODEC_ID_NUV;
+ vst->codec->codec_tag = MKTAG('R', 'J', 'P', 'G');
+ vst->codec->width = width;
+ vst->codec->height = height;
+ vst->codec->bits_per_sample = 10;
+ vst->codec->sample_aspect_ratio = av_d2q(aspect, 10000);
+ vst->r_frame_rate = av_d2q(1.0 / fps, 10000);
+ av_set_pts_info(vst, 32, 1, 1000);
+ } else
+ ctx->v_id = -1;
+
+ if (a_packs) {
+ ctx->a_id = stream_nr++;
+ ast = av_new_stream(s, ctx->a_id);
+ ast->codec->codec_type = CODEC_TYPE_AUDIO;
+ ast->codec->codec_id = CODEC_ID_PCM_S16LE;
+ ast->codec->channels = 2;
+ ast->codec->sample_rate = 44100;
+ ast->codec->bit_rate = 2 * 2 * 44100 * 8;
+ ast->codec->block_align = 2 * 2;
+ ast->codec->bits_per_sample = 16;
+ av_set_pts_info(ast, 32, 1, 1000);
+ } else
+ ctx->a_id = -1;
+
+ get_codec_data(pb, vst, ast, is_mythtv);
+ return 0;
+}
+
+#define HDRSIZE 12
+
+static int nuv_packet(AVFormatContext *s, AVPacket *pkt) {
+ NUVContext *ctx = (NUVContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ uint8_t hdr[HDRSIZE];
+ frametype_t frametype;
+ int ret, size;
+ while (!url_feof(pb)) {
+ ret = get_buffer(pb, hdr, HDRSIZE);
+ if (ret <= 0)
+ return ret ? ret : -1;
+ frametype = hdr[0];
+ size = PKTSIZE(LE_32(&hdr[8]));
+ switch (frametype) {
+ case NUV_VIDEO:
+ case NUV_EXTRADATA:
+ if (ctx->v_id < 0) {
+ av_log(s, AV_LOG_ERROR, "Video packet in file without video stream!\n");
+ url_fskip(pb, size);
+ break;
+ }
+ ret = av_new_packet(pkt, HDRSIZE + size);
+ if (ret < 0)
+ return ret;
+ pkt->pos = url_ftell(pb);
+ pkt->pts = LE_32(&hdr[4]);
+ pkt->stream_index = ctx->v_id;
+ memcpy(pkt->data, hdr, HDRSIZE);
+ ret = get_buffer(pb, pkt->data + HDRSIZE, size);
+ return ret;
+ case NUV_AUDIO:
+ if (ctx->a_id < 0) {
+ av_log(s, AV_LOG_ERROR, "Audio packet in file without audio stream!\n");
+ url_fskip(pb, size);
+ break;
+ }
+ ret = av_get_packet(pb, pkt, size);
+ pkt->pts = LE_32(&hdr[4]);
+ pkt->stream_index = ctx->a_id;
+ return ret;
+ case NUV_SEEKP:
+ // contains no data, size value is invalid
+ break;
+ default:
+ url_fskip(pb, size);
+ break;
+ }
+ }
+ return AVERROR_IO;
+}
+
+AVInputFormat nuv_demuxer = {
+ "nuv",
+ "NuppelVideo format",
+ sizeof(NUVContext),
+ nuv_probe,
+ nuv_header,
+ nuv_packet,
+ NULL,
+ NULL,
+};
diff --git a/contrib/ffmpeg/libavformat/ogg.c b/contrib/ffmpeg/libavformat/ogg.c
new file mode 100644
index 000000000..369fa4639
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ogg.c
@@ -0,0 +1,283 @@
+/*
+ * Ogg bitstream support
+ * Mark Hills <mark@pogo.org.uk>
+ *
+ * Uses libogg, but requires libvorbisenc to construct correct headers
+ * when containing Vorbis stream -- currently the only format supported
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+
+#include <ogg/ogg.h>
+
+#include "avformat.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+#define DECODER_BUFFER_SIZE 4096
+
+
+typedef struct OggContext {
+ /* output */
+ ogg_stream_state os ;
+ int header_handled ;
+ ogg_packet op;
+
+ /* input */
+ ogg_sync_state oy ;
+} OggContext ;
+
+
+#ifdef CONFIG_MUXERS
+static int ogg_write_header(AVFormatContext *avfcontext)
+{
+ OggContext *context = avfcontext->priv_data;
+ ogg_packet *op= &context->op;
+ int n;
+
+ ogg_stream_init(&context->os, 31415);
+
+ for(n = 0 ; n < avfcontext->nb_streams ; n++) {
+ AVCodecContext *codec = avfcontext->streams[n]->codec;
+ uint8_t *headers = codec->extradata;
+ int headers_len = codec->extradata_size;
+ uint8_t *header_start[3];
+ int header_len[3];
+ int i, j;
+
+ av_set_pts_info(avfcontext->streams[n], 60, 1, AV_TIME_BASE);
+
+ for(j=1,i=0;i<2;++i, ++j) {
+ header_len[i]=0;
+ while(j<headers_len && headers[j]==0xff) {
+ header_len[i]+=0xff;
+ ++j;
+ }
+ header_len[i]+=headers[j];
+ }
+ header_len[2]=headers_len-header_len[0]-header_len[1]-j;
+ headers+=j;
+ header_start[0] = headers;
+ header_start[1] = header_start[0] + header_len[0];
+ header_start[2] = header_start[1] + header_len[1];
+
+ for(i=0; i < 3; ++i){
+ op->bytes = header_len[i];
+
+ op->packet= header_start[i];
+ op->b_o_s= op->packetno==0;
+
+ ogg_stream_packetin(&context->os, op);
+
+ op->packetno++; //FIXME multiple streams
+ }
+
+ context->header_handled = 0 ;
+ }
+
+ return 0 ;
+}
+
+static int ogg_write_packet(AVFormatContext *avfcontext, AVPacket *pkt)
+{
+ OggContext *context = avfcontext->priv_data ;
+ AVCodecContext *avctx= avfcontext->streams[pkt->stream_index]->codec;
+ ogg_packet *op= &context->op;
+ ogg_page og ;
+ int64_t pts;
+
+ pts= av_rescale(pkt->pts, avctx->sample_rate, AV_TIME_BASE);
+
+// av_log(avfcontext, AV_LOG_DEBUG, "M%d\n", size);
+
+ /* flush header packets so audio starts on a new page */
+
+ if(!context->header_handled) {
+ while(ogg_stream_flush(&context->os, &og)) {
+ put_buffer(&avfcontext->pb, og.header, og.header_len) ;
+ put_buffer(&avfcontext->pb, og.body, og.body_len) ;
+ put_flush_packet(&avfcontext->pb);
+ }
+ context->header_handled = 1 ;
+ }
+
+ op->packet = (uint8_t*) pkt->data;
+ op->bytes = pkt->size;
+ op->b_o_s = op->packetno == 0;
+ op->granulepos= pts;
+
+ /* correct the fields in the packet -- essential for streaming */
+
+ ogg_stream_packetin(&context->os, op);
+
+ while(ogg_stream_pageout(&context->os, &og)) {
+ put_buffer(&avfcontext->pb, og.header, og.header_len);
+ put_buffer(&avfcontext->pb, og.body, og.body_len);
+ put_flush_packet(&avfcontext->pb);
+ }
+ op->packetno++;
+
+ return 0;
+}
+
+
+static int ogg_write_trailer(AVFormatContext *avfcontext) {
+ OggContext *context = avfcontext->priv_data ;
+ ogg_page og ;
+
+ while(ogg_stream_flush(&context->os, &og)) {
+ put_buffer(&avfcontext->pb, og.header, og.header_len) ;
+ put_buffer(&avfcontext->pb, og.body, og.body_len) ;
+ put_flush_packet(&avfcontext->pb);
+ }
+
+ ogg_stream_clear(&context->os) ;
+ return 0 ;
+}
+
+
+AVOutputFormat ogg_muxer = {
+ "ogg",
+ "Ogg Vorbis",
+ "audio/x-vorbis",
+ "ogg",
+ sizeof(OggContext),
+ CODEC_ID_VORBIS,
+ 0,
+ ogg_write_header,
+ ogg_write_packet,
+ ogg_write_trailer,
+} ;
+#endif //CONFIG_MUXERS
+
+#if 0
+static int next_packet(AVFormatContext *avfcontext, ogg_packet *op) {
+ OggContext *context = avfcontext->priv_data ;
+ ogg_page og ;
+ char *buf ;
+
+ while(ogg_stream_packetout(&context->os, op) != 1) {
+
+ /* while no pages are available, read in more data to the sync */
+ while(ogg_sync_pageout(&context->oy, &og) != 1) {
+ buf = ogg_sync_buffer(&context->oy, DECODER_BUFFER_SIZE) ;
+ if(get_buffer(&avfcontext->pb, buf, DECODER_BUFFER_SIZE) <= 0)
+ return 1 ;
+ ogg_sync_wrote(&context->oy, DECODER_BUFFER_SIZE) ;
+ }
+
+ /* got a page. Feed it into the stream and get the packet */
+ if(ogg_stream_pagein(&context->os, &og) != 0)
+ return 1 ;
+ }
+
+ return 0 ;
+}
+
+
+static int ogg_read_header(AVFormatContext *avfcontext, AVFormatParameters *ap)
+{
+ OggContext *context = avfcontext->priv_data;
+ ogg_packet op ;
+ char *buf ;
+ ogg_page og ;
+ AVStream *ast ;
+ AVCodecContext *codec;
+ uint8_t *p;
+ int i;
+
+ ogg_sync_init(&context->oy) ;
+ buf = ogg_sync_buffer(&context->oy, DECODER_BUFFER_SIZE) ;
+
+ if(get_buffer(&avfcontext->pb, buf, DECODER_BUFFER_SIZE) <= 0)
+ return AVERROR_IO ;
+
+ ogg_sync_wrote(&context->oy, DECODER_BUFFER_SIZE) ;
+ ogg_sync_pageout(&context->oy, &og) ;
+ ogg_stream_init(&context->os, ogg_page_serialno(&og)) ;
+ ogg_stream_pagein(&context->os, &og) ;
+
+ /* currently only one vorbis stream supported */
+
+ ast = av_new_stream(avfcontext, 0) ;
+ if(!ast)
+ return AVERROR_NOMEM ;
+ av_set_pts_info(ast, 60, 1, AV_TIME_BASE);
+
+ codec= &ast->codec;
+ codec->codec_type = CODEC_TYPE_AUDIO;
+ codec->codec_id = CODEC_ID_VORBIS;
+ for(i=0; i<3; i++){
+ if(next_packet(avfcontext, &op)){
+ return -1;
+ }
+ if(op.bytes >= (1<<16) || op.bytes < 0)
+ return -1;
+ codec->extradata_size+= 2 + op.bytes;
+ codec->extradata= av_realloc(codec->extradata, codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ memset(codec->extradata + codec->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+ p= codec->extradata + codec->extradata_size - 2 - op.bytes;
+ *(p++)= op.bytes>>8;
+ *(p++)= op.bytes&0xFF;
+ memcpy(p, op.packet, op.bytes);
+ }
+
+ return 0 ;
+}
+
+
+static int ogg_read_packet(AVFormatContext *avfcontext, AVPacket *pkt) {
+ ogg_packet op ;
+
+ if(next_packet(avfcontext, &op))
+ return AVERROR_IO ;
+ if(av_new_packet(pkt, op.bytes) < 0)
+ return AVERROR_IO ;
+ pkt->stream_index = 0 ;
+ memcpy(pkt->data, op.packet, op.bytes);
+ if(avfcontext->streams[0]->codec.sample_rate && op.granulepos!=-1)
+ pkt->pts= av_rescale(op.granulepos, AV_TIME_BASE, avfcontext->streams[0]->codec.sample_rate);
+// printf("%"PRId64" %d %d\n", pkt->pts, (int)op.granulepos, avfcontext->streams[0]->codec.sample_rate);
+
+ return op.bytes;
+}
+
+
+static int ogg_read_close(AVFormatContext *avfcontext) {
+ OggContext *context = avfcontext->priv_data ;
+
+ ogg_stream_clear(&context->os) ;
+ ogg_sync_clear(&context->oy) ;
+
+ return 0 ;
+}
+
+
+static AVInputFormat ogg_iformat = {
+ "ogg",
+ "Ogg Vorbis",
+ sizeof(OggContext),
+ NULL,
+ ogg_read_header,
+ ogg_read_packet,
+ ogg_read_close,
+ .extensions = "ogg",
+} ;
+#endif
diff --git a/contrib/ffmpeg/libavformat/ogg2.c b/contrib/ffmpeg/libavformat/ogg2.c
new file mode 100644
index 000000000..1e5d38620
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ogg2.c
@@ -0,0 +1,697 @@
+/*
+ * Ogg bitstream support
+ * Luca Barbato <lu_zero@gentoo.org>
+ * Based on tcvp implementation
+ *
+ */
+
+/**
+ Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+
+#include <stdio.h>
+#include "ogg2.h"
+#include "avformat.h"
+
+#define MAX_PAGE_SIZE 65307
+#define DECODER_BUFFER_SIZE MAX_PAGE_SIZE
+
+static ogg_codec_t *ogg_codecs[] = {
+ &vorbis_codec,
+ &theora_codec,
+ &flac_codec,
+ &ogm_video_codec,
+ &ogm_audio_codec,
+ &ogm_old_codec,
+ NULL
+};
+
+#if 0 // CONFIG_MUXERS
+static int
+ogg_write_header (AVFormatContext * avfcontext)
+{
+}
+
+static int
+ogg_write_packet (AVFormatContext * avfcontext, AVPacket * pkt)
+{
+}
+
+
+static int
+ogg_write_trailer (AVFormatContext * avfcontext)
+{
+}
+
+
+AVOutputFormat ogg_muxer = {
+ "ogg",
+ "Ogg Vorbis",
+ "audio/x-vorbis",
+ "ogg",
+ sizeof (OggContext),
+ CODEC_ID_VORBIS,
+ 0,
+ ogg_write_header,
+ ogg_write_packet,
+ ogg_write_trailer,
+};
+#endif //CONFIG_MUXERS
+
+//FIXME We could avoid some structure duplication
+static int
+ogg_save (AVFormatContext * s)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_state_t *ost =
+ av_malloc(sizeof (*ost) + (ogg->nstreams-1) * sizeof (*ogg->streams));
+ int i;
+ ost->pos = url_ftell (&s->pb);;
+ ost->curidx = ogg->curidx;
+ ost->next = ogg->state;
+ memcpy(ost->streams, ogg->streams, ogg->nstreams * sizeof(*ogg->streams));
+
+ for (i = 0; i < ogg->nstreams; i++){
+ ogg_stream_t *os = ogg->streams + i;
+ os->buf = av_malloc (os->bufsize);
+ memset (os->buf, 0, os->bufsize);
+ memcpy (os->buf, ost->streams[i].buf, os->bufpos);
+ }
+
+ ogg->state = ost;
+
+ return 0;
+}
+
+static int
+ogg_restore (AVFormatContext * s, int discard)
+{
+ ogg_t *ogg = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ ogg_state_t *ost = ogg->state;
+ int i;
+
+ if (!ost)
+ return 0;
+
+ ogg->state = ost->next;
+
+ if (!discard){
+ for (i = 0; i < ogg->nstreams; i++)
+ av_free (ogg->streams[i].buf);
+
+ url_fseek (bc, ost->pos, SEEK_SET);
+ ogg->curidx = ost->curidx;
+ memcpy (ogg->streams, ost->streams,
+ ogg->nstreams * sizeof (*ogg->streams));
+ }
+
+ av_free (ost);
+
+ return 0;
+}
+
+static int
+ogg_reset (ogg_t * ogg)
+{
+ int i;
+
+ for (i = 0; i < ogg->nstreams; i++){
+ ogg_stream_t *os = ogg->streams + i;
+ os->bufpos = 0;
+ os->pstart = 0;
+ os->psize = 0;
+ os->granule = -1;
+ os->lastgp = -1;
+ os->nsegs = 0;
+ os->segp = 0;
+ }
+
+ ogg->curidx = -1;
+
+ return 0;
+}
+
+static ogg_codec_t *
+ogg_find_codec (uint8_t * buf, int size)
+{
+ int i;
+
+ for (i = 0; ogg_codecs[i]; i++)
+ if (size >= ogg_codecs[i]->magicsize &&
+ !memcmp (buf, ogg_codecs[i]->magic, ogg_codecs[i]->magicsize))
+ return ogg_codecs[i];
+
+ return NULL;
+}
+
+static int
+ogg_find_stream (ogg_t * ogg, int serial)
+{
+ int i;
+
+ for (i = 0; i < ogg->nstreams; i++)
+ if (ogg->streams[i].serial == serial)
+ return i;
+
+ return -1;
+}
+
+static int
+ogg_new_stream (AVFormatContext * s, uint32_t serial)
+{
+
+ ogg_t *ogg = s->priv_data;
+ int idx = ogg->nstreams++;
+ AVStream *st;
+ ogg_stream_t *os;
+
+ ogg->streams = av_realloc (ogg->streams,
+ ogg->nstreams * sizeof (*ogg->streams));
+ memset (ogg->streams + idx, 0, sizeof (*ogg->streams));
+ os = ogg->streams + idx;
+ os->serial = serial;
+ os->bufsize = DECODER_BUFFER_SIZE;
+ os->buf = av_malloc(os->bufsize);
+ os->header = -1;
+
+ st = av_new_stream (s, idx);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 64, 1, 1000000);
+
+ return idx;
+}
+
+static int
+ogg_new_buf(ogg_t *ogg, int idx)
+{
+ ogg_stream_t *os = ogg->streams + idx;
+ uint8_t *nb = av_malloc(os->bufsize);
+ int size = os->bufpos - os->pstart;
+ if(os->buf){
+ memcpy(nb, os->buf + os->pstart, size);
+ av_free(os->buf);
+ }
+ os->buf = nb;
+ os->bufpos = size;
+ os->pstart = 0;
+
+ return 0;
+}
+
+static int
+ogg_read_page (AVFormatContext * s, int *str)
+{
+ ByteIOContext *bc = &s->pb;
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os;
+ int i = 0;
+ int flags, nsegs;
+ uint64_t gp;
+ uint32_t serial;
+ uint32_t seq;
+ uint32_t crc;
+ int size, idx;
+ uint8_t sync[4];
+ int sp = 0;
+
+ if (get_buffer (bc, sync, 4) < 4)
+ return -1;
+
+ do{
+ int c;
+
+ if (sync[sp & 3] == 'O' &&
+ sync[(sp + 1) & 3] == 'g' &&
+ sync[(sp + 2) & 3] == 'g' && sync[(sp + 3) & 3] == 'S')
+ break;
+
+ c = url_fgetc (bc);
+ if (c < 0)
+ return -1;
+ sync[sp++ & 3] = c;
+ }while (i++ < MAX_PAGE_SIZE);
+
+ if (i >= MAX_PAGE_SIZE){
+ av_log (s, AV_LOG_INFO, "ogg, can't find sync word\n");
+ return -1;
+ }
+
+ if (url_fgetc (bc) != 0) /* version */
+ return -1;
+
+ flags = url_fgetc (bc);
+ gp = get_le64 (bc);
+ serial = get_le32 (bc);
+ seq = get_le32 (bc);
+ crc = get_le32 (bc);
+ nsegs = url_fgetc (bc);
+
+ idx = ogg_find_stream (ogg, serial);
+ if (idx < 0){
+ idx = ogg_new_stream (s, serial);
+ if (idx < 0)
+ return -1;
+ }
+
+ os = ogg->streams + idx;
+
+ if(os->psize > 0)
+ ogg_new_buf(ogg, idx);
+
+ if (get_buffer (bc, os->segments, nsegs) < nsegs)
+ return -1;
+
+ os->nsegs = nsegs;
+ os->segp = 0;
+
+ size = 0;
+ for (i = 0; i < nsegs; i++)
+ size += os->segments[i];
+
+ if (flags & OGG_FLAG_CONT){
+ if (!os->psize){
+ while (os->segp < os->nsegs){
+ int seg = os->segments[os->segp++];
+ os->pstart += seg;
+ if (seg < 255)
+ break;
+ }
+ }
+ }else{
+ os->psize = 0;
+ }
+
+ if (os->bufsize - os->bufpos < size){
+ uint8_t *nb = av_malloc (os->bufsize *= 2);
+ memcpy (nb, os->buf, os->bufpos);
+ av_free (os->buf);
+ os->buf = nb;
+ }
+
+ if (get_buffer (bc, os->buf + os->bufpos, size) < size)
+ return -1;
+
+ os->lastgp = os->granule;
+ os->bufpos += size;
+ os->granule = gp;
+ os->flags = flags;
+
+ if (str)
+ *str = idx;
+
+ return 0;
+}
+
+static int
+ogg_packet (AVFormatContext * s, int *str, int *dstart, int *dsize)
+{
+ ogg_t *ogg = s->priv_data;
+ int idx;
+ ogg_stream_t *os;
+ int complete = 0;
+ int segp = 0, psize = 0;
+
+#if 0
+ av_log (s, AV_LOG_DEBUG, "ogg_packet: curidx=%i\n", ogg->curidx);
+#endif
+
+ do{
+ idx = ogg->curidx;
+
+ while (idx < 0){
+ if (ogg_read_page (s, &idx) < 0)
+ return -1;
+ }
+
+ os = ogg->streams + idx;
+
+#if 0
+ av_log (s, AV_LOG_DEBUG,
+ "ogg_packet: idx=%d pstart=%d psize=%d segp=%d nsegs=%d\n",
+ idx, os->pstart, os->psize, os->segp, os->nsegs);
+#endif
+
+ if (!os->codec){
+ if (os->header < 0){
+ os->codec = ogg_find_codec (os->buf, os->bufpos);
+ if (!os->codec){
+ os->header = 0;
+ return 0;
+ }
+ }else{
+ return 0;
+ }
+ }
+
+ segp = os->segp;
+ psize = os->psize;
+
+ while (os->segp < os->nsegs){
+ int ss = os->segments[os->segp++];
+ os->psize += ss;
+ if (ss < 255){
+ complete = 1;
+ break;
+ }
+ }
+
+ if (!complete && os->segp == os->nsegs){
+ ogg->curidx = -1;
+ }
+ }while (!complete);
+
+#if 0
+ av_log (s, AV_LOG_DEBUG,
+ "ogg_packet: idx %i, frame size %i, start %i\n",
+ idx, os->psize, os->pstart);
+#endif
+
+ ogg->curidx = idx;
+
+ if (os->header < 0){
+ int hdr = os->codec->header (s, idx);
+ if (!hdr){
+ os->header = os->seq;
+ os->segp = segp;
+ os->psize = psize;
+ ogg->headers = 1;
+ }else{
+ os->pstart += os->psize;
+ os->psize = 0;
+ }
+ }
+
+ if (os->header > -1 && os->seq > os->header){
+ if (os->codec && os->codec->packet)
+ os->codec->packet (s, idx);
+ if (str)
+ *str = idx;
+ if (dstart)
+ *dstart = os->pstart;
+ if (dsize)
+ *dsize = os->psize;
+ os->pstart += os->psize;
+ os->psize = 0;
+ }
+
+ os->seq++;
+ if (os->segp == os->nsegs)
+ ogg->curidx = -1;
+
+ return 0;
+}
+
+static int
+ogg_get_headers (AVFormatContext * s)
+{
+ ogg_t *ogg = s->priv_data;
+
+ do{
+ if (ogg_packet (s, NULL, NULL, NULL) < 0)
+ return -1;
+ }while (!ogg->headers);
+
+#if 0
+ av_log (s, AV_LOG_DEBUG, "found headers\n");
+#endif
+
+ return 0;
+}
+
+static uint64_t
+ogg_gptopts (AVFormatContext * s, int i, uint64_t gp)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + i;
+ uint64_t pts = AV_NOPTS_VALUE;
+
+ if(os->codec->gptopts){
+ pts = os->codec->gptopts(s, i, gp);
+ } else {
+ pts = gp;
+ }
+
+ return pts;
+}
+
+
+static int
+ogg_get_length (AVFormatContext * s)
+{
+ ogg_t *ogg = s->priv_data;
+ int idx = -1, i;
+ offset_t size, end;
+
+ if(s->pb.is_streamed)
+ return 0;
+
+// already set
+ if (s->duration != AV_NOPTS_VALUE)
+ return 0;
+
+ size = url_fsize(&s->pb);
+ if(size < 0)
+ return 0;
+ end = size > MAX_PAGE_SIZE? size - MAX_PAGE_SIZE: size;
+
+ ogg_save (s);
+ url_fseek (&s->pb, end, SEEK_SET);
+
+ while (!ogg_read_page (s, &i)){
+ if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)
+ idx = i;
+ }
+
+ if (idx != -1){
+ s->streams[idx]->duration =
+ ogg_gptopts (s, idx, ogg->streams[idx].granule);
+ }
+
+ ogg->size = size;
+ ogg_restore (s, 0);
+ ogg_save (s);
+ while (!ogg_read_page (s, &i)) {
+ if (i == idx && ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)
+ break;
+ }
+ if (i == idx) {
+ s->streams[idx]->start_time = ogg_gptopts (s, idx, ogg->streams[idx].granule);
+ s->streams[idx]->duration -= s->streams[idx]->start_time;
+ }
+ ogg_restore (s, 0);
+
+ return 0;
+}
+
+
+static int
+ogg_read_header (AVFormatContext * s, AVFormatParameters * ap)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg->curidx = -1;
+ //linear headers seek from start
+ if (ogg_get_headers (s) < 0){
+ return -1;
+ }
+
+ //linear granulepos seek from end
+ ogg_get_length (s);
+
+ //fill the extradata in the per codec callbacks
+ return 0;
+}
+
+
+static int
+ogg_read_packet (AVFormatContext * s, AVPacket * pkt)
+{
+ ogg_t *ogg;
+ ogg_stream_t *os;
+ int idx = -1;
+ int pstart, psize;
+
+ //Get an ogg packet
+ do{
+ if (ogg_packet (s, &idx, &pstart, &psize) < 0)
+ return AVERROR_IO;
+ }while (idx < 0 || !s->streams[idx]);
+
+ ogg = s->priv_data;
+ os = ogg->streams + idx;
+
+ //Alloc a pkt
+ if (av_new_packet (pkt, psize) < 0)
+ return AVERROR_IO;
+ pkt->stream_index = idx;
+ memcpy (pkt->data, os->buf + pstart, psize);
+ if (os->lastgp != -1LL){
+ pkt->pts = ogg_gptopts (s, idx, os->lastgp);
+ os->lastgp = -1;
+ }
+
+ return psize;
+}
+
+
+static int
+ogg_read_close (AVFormatContext * s)
+{
+ ogg_t *ogg = s->priv_data;
+ int i;
+
+ for (i = 0; i < ogg->nstreams; i++){
+ av_free (ogg->streams[i].buf);
+ av_free (ogg->streams[i].private);
+ }
+ av_free (ogg->streams);
+ return 0;
+}
+
+
+static int
+ogg_read_seek (AVFormatContext * s, int stream_index, int64_t target_ts,
+ int flags)
+{
+ AVStream *st = s->streams[stream_index];
+ ogg_t *ogg = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ uint64_t min = 0, max = ogg->size;
+ uint64_t tmin = st->start_time, tmax = st->start_time + st->duration;
+ int64_t pts = AV_NOPTS_VALUE;
+
+ ogg_save (s);
+
+ if ((uint64_t)target_ts < tmin || target_ts < 0)
+ target_ts = tmin;
+ while (min <= max && tmin < tmax){
+ uint64_t p = min + (max - min) * (target_ts - tmin) / (tmax - tmin);
+ int i = -1;
+
+ url_fseek (bc, p, SEEK_SET);
+
+ while (!ogg_read_page (s, &i)){
+ if (i == stream_index && ogg->streams[i].granule != 0 &&
+ ogg->streams[i].granule != -1)
+ break;
+ }
+
+ if (i == -1)
+ break;
+
+ pts = ogg_gptopts (s, i, ogg->streams[i].granule);
+ p = url_ftell (bc);
+
+ if (FFABS (pts - target_ts) * st->time_base.num < st->time_base.den)
+ break;
+
+ if (pts > target_ts){
+ if (max == p && tmax == pts) {
+ // probably our tmin is wrong, causing us to always end up too late in the file
+ tmin = (target_ts + tmin + 1) / 2;
+ if (tmin == target_ts) {
+ url_fseek(bc, min, SEEK_SET);
+ break;
+ }
+ }
+ max = p;
+ tmax = pts;
+ }else{
+ if (min == p && tmin == pts) {
+ // probably our tmax is wrong, causing us to always end up too early in the file
+ tmax = (target_ts + tmax) / 2;
+ if (tmax == target_ts) {
+ url_fseek(bc, max, SEEK_SET);
+ break;
+ }
+ }
+ min = p;
+ tmin = pts;
+ }
+ }
+
+ if (FFABS (pts - target_ts) * st->time_base.num < st->time_base.den){
+ ogg_restore (s, 1);
+ ogg_reset (ogg);
+ }else{
+ ogg_restore (s, 0);
+ pts = AV_NOPTS_VALUE;
+ }
+
+ av_update_cur_dts(s, st, pts);
+ return 0;
+
+#if 0
+ //later...
+ int64_t pos;
+ if (av_seek_frame_binary (s, stream_index, target_ts, flags) < 0)
+ return -1;
+ pos = url_ftell (&s->pb);
+ ogg_read_timestamp (s, stream_index, &pos, pos - 1);
+#endif
+
+}
+
+#if 0
+static int64_t
+ogg_read_timestamp (AVFormatContext * s, int stream_index, int64_t * pos_arg,
+ int64_t pos_limit)
+{
+ ogg_t *ogg = s->priv_data;
+ ByteIOContext *bc = &s->pb;
+ int64_t pos, pts;
+
+ if (*pos_arg < 0)
+ return AV_NOPTS_VALUE;
+
+ pos = *pos_arg;
+}
+#endif
+
+static int ogg_probe(AVProbeData *p)
+{
+ if (p->buf_size < 6)
+ return 0;
+ if (p->buf[0] == 'O' && p->buf[1] == 'g' &&
+ p->buf[2] == 'g' && p->buf[3] == 'S' &&
+ p->buf[4] == 0x0 && p->buf[5] <= 0x7 )
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+AVInputFormat ogg_demuxer = {
+ "ogg",
+ "Ogg",
+ sizeof (ogg_t),
+ ogg_probe,
+ ogg_read_header,
+ ogg_read_packet,
+ ogg_read_close,
+ ogg_read_seek,
+// ogg_read_timestamp,
+ .extensions = "ogg",
+};
diff --git a/contrib/ffmpeg/libavformat/ogg2.h b/contrib/ffmpeg/libavformat/ogg2.h
new file mode 100644
index 000000000..dd6f24aab
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/ogg2.h
@@ -0,0 +1,85 @@
+/**
+ Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+#ifndef OGG_H
+#define OGG_H
+
+#include "avformat.h"
+
+typedef struct ogg_codec {
+ int8_t *magic;
+ uint8_t magicsize;
+ int8_t *name;
+ int (*header)(AVFormatContext *, int);
+ int (*packet)(AVFormatContext *, int);
+ uint64_t (*gptopts)(AVFormatContext *, int, uint64_t);
+} ogg_codec_t;
+
+typedef struct ogg_stream {
+ uint8_t *buf;
+ unsigned int bufsize;
+ unsigned int bufpos;
+ unsigned int pstart;
+ unsigned int psize;
+ uint32_t serial;
+ uint32_t seq;
+ uint64_t granule, lastgp;
+ int flags;
+ ogg_codec_t *codec;
+ int header;
+ int nsegs, segp;
+ uint8_t segments[255];
+ void *private;
+} ogg_stream_t;
+
+typedef struct ogg_state {
+ uint64_t pos;
+ int curidx;
+ struct ogg_state *next;
+ ogg_stream_t streams[1];
+} ogg_state_t;
+
+typedef struct ogg {
+ ogg_stream_t *streams;
+ int nstreams;
+ int headers;
+ int curidx;
+ uint64_t size;
+ ogg_state_t *state;
+} ogg_t;
+
+#define OGG_FLAG_CONT 1
+#define OGG_FLAG_BOS 2
+#define OGG_FLAG_EOS 4
+
+extern ogg_codec_t vorbis_codec;
+extern ogg_codec_t theora_codec;
+extern ogg_codec_t flac_codec;
+extern ogg_codec_t ogm_video_codec;
+extern ogg_codec_t ogm_audio_codec;
+extern ogg_codec_t ogm_old_codec;
+
+extern int vorbis_comment(AVFormatContext *ms, uint8_t *buf, int size);
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/oggparseflac.c b/contrib/ffmpeg/libavformat/oggparseflac.c
new file mode 100644
index 000000000..8960088d8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/oggparseflac.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2005 Matthieu CASTET
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+#include "avformat.h"
+#include "bitstream.h"
+#include "ogg2.h"
+
+#define FLAC_STREAMINFO_SIZE 0x22
+
+static int
+flac_header (AVFormatContext * s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ GetBitContext gb;
+ int mdt;
+
+ if (os->buf[os->pstart] == 0xff)
+ return 0;
+
+ init_get_bits(&gb, os->buf + os->pstart, os->psize*8);
+ get_bits(&gb, 1); /* metadata_last */
+ mdt = get_bits(&gb, 7);
+
+ if (mdt == 0x7f) {
+ skip_bits(&gb, 4*8); /* "FLAC" */
+ if(get_bits(&gb, 8) != 1) /* unsupported major version */
+ return -1;
+ skip_bits(&gb, 8 + 16); /* minor version + header count */
+ skip_bits(&gb, 4*8); /* "fLaC" */
+
+ /* METADATA_BLOCK_HEADER */
+ if (get_bits(&gb, 32) != FLAC_STREAMINFO_SIZE)
+ return -1;
+
+ skip_bits(&gb, 16*2+24*2);
+
+ st->codec->sample_rate = get_bits_long(&gb, 20);
+ st->codec->channels = get_bits(&gb, 3) + 1;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_FLAC;
+
+ st->codec->extradata =
+ av_malloc(FLAC_STREAMINFO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
+ memcpy (st->codec->extradata, os->buf + os->pstart + 5 + 4 + 4 + 4,
+ FLAC_STREAMINFO_SIZE);
+ st->codec->extradata_size = FLAC_STREAMINFO_SIZE;
+
+ st->time_base.num = 1;
+ st->time_base.den = st->codec->sample_rate;
+ } else if (mdt == 4) {
+ vorbis_comment (s, os->buf + os->pstart + 4, os->psize - 4);
+ }
+
+ return 1;
+}
+
+ogg_codec_t flac_codec = {
+ .magic = "\177FLAC",
+ .magicsize = 5,
+ .header = flac_header
+};
diff --git a/contrib/ffmpeg/libavformat/oggparseogm.c b/contrib/ffmpeg/libavformat/oggparseogm.c
new file mode 100644
index 000000000..8788e5d41
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/oggparseogm.c
@@ -0,0 +1,166 @@
+/**
+ Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+#include <stdlib.h>
+#include "avformat.h"
+#include "bitstream.h"
+#include "bswap.h"
+#include "ogg2.h"
+#include "riff.h"
+
+static int
+ogm_header(AVFormatContext *s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ uint8_t *p = os->buf + os->pstart;
+ uint64_t time_unit;
+ uint64_t spu;
+ uint32_t default_len;
+
+ if(!(*p & 1))
+ return 0;
+ if(*p != 1)
+ return 1;
+
+ p++;
+
+ if(*p == 'v'){
+ int tag;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ p += 8;
+ tag = le2me_32(unaligned32(p));
+ st->codec->codec_id = codec_get_bmp_id(tag);
+ st->codec->codec_tag = tag;
+ } else {
+ int cid;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ p += 8;
+ p[4] = 0;
+ cid = strtol(p, NULL, 16);
+ st->codec->codec_id = codec_get_wav_id(cid);
+ }
+
+ p += 4;
+ p += 4; /* useless size field */
+
+ time_unit = le2me_64(unaligned64(p));
+ p += 8;
+ spu = le2me_64(unaligned64(p));
+ p += 8;
+ default_len = le2me_32(unaligned32(p));
+ p += 4;
+
+ p += 8; /* buffersize + bits_per_sample */
+
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO){
+ st->codec->width = le2me_32(unaligned32(p));
+ p += 4;
+ st->codec->height = le2me_32(unaligned32(p));
+ st->codec->time_base.den = spu * 10000000;
+ st->codec->time_base.num = time_unit;
+ st->time_base = st->codec->time_base;
+ } else {
+ st->codec->channels = le2me_16(unaligned16(p));
+ p += 2;
+ p += 2; /* block_align */
+ st->codec->bit_rate = le2me_32(unaligned32(p)) * 8;
+ st->codec->sample_rate = spu * 10000000 / time_unit;
+ st->time_base.num = 1;
+ st->time_base.den = st->codec->sample_rate;
+ }
+
+ return 1;
+}
+
+static int
+ogm_dshow_header(AVFormatContext *s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ uint8_t *p = os->buf + os->pstart;
+ uint32_t t;
+
+ if(!(*p & 1))
+ return 0;
+ if(*p != 1)
+ return 1;
+
+ t = le2me_32(unaligned32(p + 96));
+
+ if(t == 0x05589f80){
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = codec_get_bmp_id(le2me_32(unaligned32(p + 68)));
+ st->codec->time_base.den = 10000000;
+ st->codec->time_base.num = le2me_64(unaligned64(p + 164));
+ st->codec->width = le2me_32(unaligned32(p + 176));
+ st->codec->height = le2me_32(unaligned32(p + 180));
+ } else if(t == 0x05589f81){
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = codec_get_wav_id(le2me_16(unaligned16(p+124)));
+ st->codec->channels = le2me_16(unaligned16(p + 126));
+ st->codec->sample_rate = le2me_32(unaligned32(p + 128));
+ st->codec->bit_rate = le2me_32(unaligned32(p + 132)) * 8;
+ }
+
+ return 1;
+}
+
+static int
+ogm_packet(AVFormatContext *s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ uint8_t *p = os->buf + os->pstart;
+ int lb;
+
+ lb = ((*p & 2) << 1) | ((*p >> 6) & 3);
+ os->pstart += lb + 1;
+ os->psize -= lb + 1;
+
+ return 0;
+}
+
+ogg_codec_t ogm_video_codec = {
+ .magic = "\001video",
+ .magicsize = 6,
+ .header = ogm_header,
+ .packet = ogm_packet
+};
+
+ogg_codec_t ogm_audio_codec = {
+ .magic = "\001audio",
+ .magicsize = 6,
+ .header = ogm_header,
+ .packet = ogm_packet
+};
+
+ogg_codec_t ogm_old_codec = {
+ .magic = "\001Direct Show Samples embedded in Ogg",
+ .magicsize = 35,
+ .header = ogm_dshow_header,
+ .packet = ogm_packet
+};
diff --git a/contrib/ffmpeg/libavformat/oggparsetheora.c b/contrib/ffmpeg/libavformat/oggparsetheora.c
new file mode 100644
index 000000000..9052bbbea
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/oggparsetheora.c
@@ -0,0 +1,129 @@
+/**
+ Copyright (C) 2005 Matthieu CASTET, Alex Beregszaszi
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+#include <stdlib.h>
+#include "avformat.h"
+#include "bitstream.h"
+#include "bswap.h"
+#include "ogg2.h"
+
+typedef struct theora_params {
+ int gpshift;
+ int gpmask;
+} theora_params_t;
+
+static int
+theora_header (AVFormatContext * s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ theora_params_t *thp = os->private;
+ int cds = st->codec->extradata_size + os->psize + 2;
+ uint8_t *cdp;
+
+ if(!(os->buf[os->pstart] & 0x80))
+ return 0;
+
+ if(!thp){
+ thp = av_mallocz(sizeof(*thp));
+ os->private = thp;
+ }
+
+ if (os->buf[os->pstart] == 0x80) {
+ GetBitContext gb;
+ int version;
+
+ init_get_bits(&gb, os->buf + os->pstart, os->psize*8);
+
+ skip_bits(&gb, 7*8); /* 0x80"theora" */
+
+ version = get_bits(&gb, 8) << 16;
+ version |= get_bits(&gb, 8) << 8;
+ version |= get_bits(&gb, 8);
+
+ if (version < 0x030100)
+ {
+ av_log(s, AV_LOG_ERROR,
+ "Too old or unsupported Theora (%x)\n", version);
+ return -1;
+ }
+
+ st->codec->width = get_bits(&gb, 16) << 4;
+ st->codec->height = get_bits(&gb, 16) << 4;
+
+ if (version >= 0x030400)
+ skip_bits(&gb, 164);
+ else if (version >= 0x030200)
+ skip_bits(&gb, 64);
+ st->codec->time_base.den = get_bits(&gb, 32);
+ st->codec->time_base.num = get_bits(&gb, 32);
+ st->time_base = st->codec->time_base;
+
+ st->codec->sample_aspect_ratio.num = get_bits(&gb, 24);
+ st->codec->sample_aspect_ratio.den = get_bits(&gb, 24);
+
+ if (version >= 0x030200)
+ skip_bits(&gb, 38);
+ if (version >= 0x304000)
+ skip_bits(&gb, 2);
+
+ thp->gpshift = get_bits(&gb, 5);
+ thp->gpmask = (1 << thp->gpshift) - 1;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_THEORA;
+
+ } else if (os->buf[os->pstart] == 0x83) {
+ vorbis_comment (s, os->buf + os->pstart + 7, os->psize - 8);
+ }
+
+ st->codec->extradata = av_realloc (st->codec->extradata, cds);
+ cdp = st->codec->extradata + st->codec->extradata_size;
+ *cdp++ = os->psize >> 8;
+ *cdp++ = os->psize & 0xff;
+ memcpy (cdp, os->buf + os->pstart, os->psize);
+ st->codec->extradata_size = cds;
+
+ return 1;
+}
+
+static uint64_t
+theora_gptopts(AVFormatContext *ctx, int idx, uint64_t gp)
+{
+ ogg_t *ogg = ctx->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ theora_params_t *thp = os->private;
+ uint64_t iframe = gp >> thp->gpshift;
+ uint64_t pframe = gp & thp->gpmask;
+
+ return iframe + pframe;
+}
+
+ogg_codec_t theora_codec = {
+ .magic = "\200theora",
+ .magicsize = 7,
+ .header = theora_header,
+ .gptopts = theora_gptopts
+};
diff --git a/contrib/ffmpeg/libavformat/oggparsevorbis.c b/contrib/ffmpeg/libavformat/oggparsevorbis.c
new file mode 100644
index 000000000..5de221cb4
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/oggparsevorbis.c
@@ -0,0 +1,205 @@
+/**
+ Copyright (C) 2005 Michael Ahlberg, Måns Rullgård
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use, copy,
+ modify, merge, publish, distribute, sublicense, and/or sell copies
+ of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+**/
+
+#include <stdlib.h>
+#include "avformat.h"
+#include "bitstream.h"
+#include "bswap.h"
+#include "ogg2.h"
+
+extern int
+vorbis_comment (AVFormatContext * as, uint8_t *buf, int size)
+{
+ char *p = buf;
+ int s, n, j;
+
+ if (size < 4)
+ return -1;
+
+ s = le2me_32 (unaligned32 (p));
+ p += 4;
+ size -= 4;
+
+ if (size < s + 4)
+ return -1;
+
+ p += s;
+ size -= s;
+
+ n = le2me_32 (unaligned32 (p));
+ p += 4;
+ size -= 4;
+
+ while (size >= 4){
+ char *t, *v;
+ int tl, vl;
+
+ s = le2me_32 (unaligned32 (p));
+ p += 4;
+ size -= 4;
+
+ if (size < s)
+ break;
+
+ t = p;
+ p += s;
+ size -= s;
+ n--;
+
+ v = memchr (t, '=', s);
+ if (!v)
+ continue;
+
+ tl = v - t;
+ vl = s - tl - 1;
+ v++;
+
+ if (tl && vl){
+ char tt[tl + 1];
+ char ct[vl + 1];
+
+ for (j = 0; j < tl; j++)
+ tt[j] = toupper (t[j]);
+ tt[tl] = 0;
+
+ memcpy (ct, v, vl);
+ ct[vl] = 0;
+
+ // took from Vorbis_I_spec
+ if (!strcmp (tt, "AUTHOR"))
+ strncpy (as->author, ct, FFMIN(sizeof (as->author), vl));
+ else if (!strcmp (tt, "TITLE"))
+ strncpy (as->title, ct, FFMIN(sizeof (as->title), vl));
+ else if (!strcmp (tt, "COPYRIGHT"))
+ strncpy (as->copyright, ct, FFMIN(sizeof (as->copyright), vl));
+ else if (!strcmp (tt, "DESCRIPTION"))
+ strncpy (as->comment, ct, FFMIN(sizeof (as->comment), vl));
+ else if (!strcmp (tt, "GENRE"))
+ strncpy (as->genre, ct, FFMIN(sizeof (as->genre), vl));
+ else if (!strcmp (tt, "TRACKNUMBER"))
+ as->track = atoi (ct);
+ //Too bored to add others for today
+ }
+ }
+
+ if (size > 0)
+ av_log (as, AV_LOG_INFO, "%i bytes of comment header remain\n", size);
+ if (n > 0)
+ av_log (as, AV_LOG_INFO,
+ "truncated comment header, %i comments not found\n", n);
+
+ return 0;
+}
+
+
+/** Parse the vorbis header
+ * Vorbis Identification header from Vorbis_I_spec.html#vorbis-spec-codec
+ * [vorbis_version] = read 32 bits as unsigned integer | Not used
+ * [audio_channels] = read 8 bit integer as unsigned | Used
+ * [audio_sample_rate] = read 32 bits as unsigned integer | Used
+ * [bitrate_maximum] = read 32 bits as signed integer | Not used yet
+ * [bitrate_nominal] = read 32 bits as signed integer | Not used yet
+ * [bitrate_minimum] = read 32 bits as signed integer | Used as bitrate
+ * [blocksize_0] = read 4 bits as unsigned integer | Not Used
+ * [blocksize_1] = read 4 bits as unsigned integer | Not Used
+ * [framing_flag] = read one bit | Not Used
+ * */
+
+typedef struct {
+ unsigned int len[3];
+ unsigned char *packet[3];
+} oggvorbis_private_t;
+
+
+static unsigned int
+fixup_vorbis_headers(AVFormatContext * as, oggvorbis_private_t *priv,
+ void **buf)
+{
+ int i,offset, len;
+ unsigned char *ptr;
+
+ len = priv->len[0] + priv->len[1] + priv->len[2];
+ ptr = *buf = av_mallocz(len + len/255 + 64);
+
+ ptr[0] = 2;
+ offset = 1;
+ offset += av_xiphlacing(&ptr[offset], priv->len[0]);
+ offset += av_xiphlacing(&ptr[offset], priv->len[1]);
+ for(i = 0; i < 3; i++) {
+ memcpy(&ptr[offset], priv->packet[i], priv->len[i]);
+ offset += priv->len[i];
+ }
+ *buf = av_realloc(*buf, offset);
+ return offset;
+}
+
+
+static int
+vorbis_header (AVFormatContext * s, int idx)
+{
+ ogg_t *ogg = s->priv_data;
+ ogg_stream_t *os = ogg->streams + idx;
+ AVStream *st = s->streams[idx];
+ oggvorbis_private_t *priv;
+
+ if (os->seq > 2)
+ return 0;
+
+ if(os->seq == 0) {
+ os->private = av_mallocz(sizeof(oggvorbis_private_t));
+ if(!os->private)
+ return 0;
+ }
+
+ priv = os->private;
+ priv->len[os->seq] = os->psize;
+ priv->packet[os->seq] = av_mallocz(os->psize);
+ memcpy(priv->packet[os->seq], os->buf + os->pstart, os->psize);
+ if (os->buf[os->pstart] == 1) {
+ uint8_t *p = os->buf + os->pstart + 11; //skip up to the audio channels
+ st->codec->channels = *p++;
+ st->codec->sample_rate = le2me_32 (unaligned32 (p));
+ p += 8; //skip maximum and and nominal bitrate
+ st->codec->bit_rate = le2me_32 (unaligned32 (p)); //Minimum bitrate
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_VORBIS;
+
+ st->time_base.num = 1;
+ st->time_base.den = st->codec->sample_rate;
+ } else if (os->buf[os->pstart] == 3) {
+ vorbis_comment (s, os->buf + os->pstart + 7, os->psize - 8);
+ } else {
+ st->codec->extradata_size =
+ fixup_vorbis_headers(s, priv, &st->codec->extradata);
+ }
+
+ return os->seq < 3;
+}
+
+ogg_codec_t vorbis_codec = {
+ .magic = "\001vorbis",
+ .magicsize = 7,
+ .header = vorbis_header
+};
diff --git a/contrib/ffmpeg/libavformat/os_support.c b/contrib/ffmpeg/libavformat/os_support.c
new file mode 100644
index 000000000..a66c867f0
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/os_support.c
@@ -0,0 +1,96 @@
+/*
+ * Various utilities for ffmpeg system
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * copyright (c) 2002 Francois Revol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "config.h"
+#include "avformat.h"
+#if defined(CONFIG_WINCE)
+/* Skip includes on WinCE. */
+#elif defined(__MINGW32__)
+#include <sys/types.h>
+#include <sys/timeb.h>
+#elif defined(CONFIG_OS2)
+#include <string.h>
+#include <sys/time.h>
+#else
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/time.h>
+#endif
+#include <time.h>
+
+/**
+ * gets the current time in micro seconds.
+ */
+int64_t av_gettime(void)
+{
+#if defined(CONFIG_WINCE)
+ return timeGetTime() * int64_t_C(1000);
+#elif defined(__MINGW32__)
+ struct timeb tb;
+ _ftime(&tb);
+ return ((int64_t)tb.time * int64_t_C(1000) + (int64_t)tb.millitm) * int64_t_C(1000);
+#else
+ struct timeval tv;
+ gettimeofday(&tv,NULL);
+ return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+#endif
+}
+
+#if !defined(CONFIG_WINCE) && !defined(HAVE_LOCALTIME_R)
+struct tm *localtime_r(const time_t *t, struct tm *tp)
+{
+ struct tm *l;
+
+ l = localtime(t);
+ if (!l)
+ return 0;
+ *tp = *l;
+ return tp;
+}
+#endif /* !defined(CONFIG_WINCE) && !defined(HAVE_LOCALTIME_R) */
+
+#if !defined(HAVE_INET_ATON) && defined(CONFIG_NETWORK)
+#include <stdlib.h>
+#include <strings.h>
+#include "barpainet.h"
+
+int inet_aton (const char * str, struct in_addr * add)
+{
+ const char * pch = str;
+ unsigned int add1 = 0, add2 = 0, add3 = 0, add4 = 0;
+
+ add1 = atoi(pch);
+ pch = strpbrk(pch,".");
+ if (pch == 0 || ++pch == 0) goto done;
+ add2 = atoi(pch);
+ pch = strpbrk(pch,".");
+ if (pch == 0 || ++pch == 0) goto done;
+ add3 = atoi(pch);
+ pch = strpbrk(pch,".");
+ if (pch == 0 || ++pch == 0) goto done;
+ add4 = atoi(pch);
+
+done:
+ add->s_addr=(add4<<24)+(add3<<16)+(add2<<8)+add1;
+
+ return 1;
+}
+#endif /* !defined(HAVE_INET_ATON) && defined(CONFIG_NETWORK) */
diff --git a/contrib/ffmpeg/libavformat/os_support.h b/contrib/ffmpeg/libavformat/os_support.h
new file mode 100644
index 000000000..e76a9aaaf
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/os_support.h
@@ -0,0 +1,53 @@
+/*
+ * various utilities for ffmpeg system
+ * copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef _OS_SUPPORT_H
+#define _OS_SUPPORT_H
+
+/**
+ * @file os_support.h
+ * miscellaneous OS support macros and functions.
+ *
+ * - usleep() (Win32, BeOS, OS/2)
+ * - floatf() (OS/2)
+ * - strcasecmp() (OS/2)
+ */
+
+#ifdef __MINGW32__
+__declspec(dllimport) void __stdcall Sleep(unsigned long dwMilliseconds);
+// # include <windows.h>
+# define usleep(t) Sleep((t) / 1000)
+#endif
+
+#ifdef __BEOS__
+# ifndef usleep
+# include <OS.h>
+# define usleep(t) snooze((bigtime_t)(t))
+# endif
+#endif
+
+#if defined(CONFIG_OS2)
+#include <stdlib.h>
+static inline int usleep(unsigned int t) { return _sleep2(t / 1000); }
+static inline int strcasecmp(const char* s1, const char* s2) { return stricmp(s1,s2); }
+#endif
+
+#endif /* _OS_SUPPORT_H */
diff --git a/contrib/ffmpeg/libavformat/png.c b/contrib/ffmpeg/libavformat/png.c
new file mode 100644
index 000000000..d62bf540a
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/png.c
@@ -0,0 +1,889 @@
+/*
+ * PNG image format
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+/* TODO:
+ * - add 2, 4 and 16 bit depth support
+ * - use filters when generating a png (better compression)
+ */
+
+#ifdef CONFIG_ZLIB
+#include <zlib.h>
+
+//#define DEBUG
+
+#define PNG_COLOR_MASK_PALETTE 1
+#define PNG_COLOR_MASK_COLOR 2
+#define PNG_COLOR_MASK_ALPHA 4
+
+#define PNG_COLOR_TYPE_GRAY 0
+#define PNG_COLOR_TYPE_PALETTE (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_PALETTE)
+#define PNG_COLOR_TYPE_RGB (PNG_COLOR_MASK_COLOR)
+#define PNG_COLOR_TYPE_RGB_ALPHA (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_ALPHA)
+#define PNG_COLOR_TYPE_GRAY_ALPHA (PNG_COLOR_MASK_ALPHA)
+
+#define PNG_FILTER_VALUE_NONE 0
+#define PNG_FILTER_VALUE_SUB 1
+#define PNG_FILTER_VALUE_UP 2
+#define PNG_FILTER_VALUE_AVG 3
+#define PNG_FILTER_VALUE_PAETH 4
+
+#define PNG_IHDR 0x0001
+#define PNG_IDAT 0x0002
+#define PNG_ALLIMAGE 0x0004
+#define PNG_PLTE 0x0008
+
+#define NB_PASSES 7
+
+#define IOBUF_SIZE 4096
+
+typedef struct PNGDecodeState {
+ int state;
+ int width, height;
+ int bit_depth;
+ int color_type;
+ int compression_type;
+ int interlace_type;
+ int filter_type;
+ int channels;
+ int bits_per_pixel;
+ int bpp;
+
+ uint8_t *image_buf;
+ int image_linesize;
+ uint32_t palette[256];
+ uint8_t *crow_buf;
+ uint8_t *last_row;
+ uint8_t *tmp_row;
+ int pass;
+ int crow_size; /* compressed row size (include filter type) */
+ int row_size; /* decompressed row size */
+ int pass_row_size; /* decompress row size of the current pass */
+ int y;
+ z_stream zstream;
+} PNGDecodeState;
+
+static const uint8_t pngsig[8] = {137, 80, 78, 71, 13, 10, 26, 10};
+
+/* Mask to determine which y pixels are valid in a pass */
+static const uint8_t png_pass_ymask[NB_PASSES] = {
+ 0x80, 0x80, 0x08, 0x88, 0x22, 0xaa, 0x55,
+};
+
+/* Mask to determine which y pixels can be written in a pass */
+static const uint8_t png_pass_dsp_ymask[NB_PASSES] = {
+ 0xff, 0xff, 0x0f, 0xcc, 0x33, 0xff, 0x55,
+};
+
+/* minimum x value */
+static const uint8_t png_pass_xmin[NB_PASSES] = {
+ 0, 4, 0, 2, 0, 1, 0
+};
+
+/* x shift to get row width */
+static const uint8_t png_pass_xshift[NB_PASSES] = {
+ 3, 3, 2, 2, 1, 1, 0
+};
+
+/* Mask to determine which pixels are valid in a pass */
+static const uint8_t png_pass_mask[NB_PASSES] = {
+ 0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff
+};
+
+/* Mask to determine which pixels to overwrite while displaying */
+static const uint8_t png_pass_dsp_mask[NB_PASSES] = {
+ 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
+};
+
+static int png_probe(AVProbeData *pd)
+{
+ if (pd->buf_size >= 8 &&
+ memcmp(pd->buf, pngsig, 8) == 0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static void *png_zalloc(void *opaque, unsigned int items, unsigned int size)
+{
+ return av_malloc(items * size);
+}
+
+static void png_zfree(void *opaque, void *ptr)
+{
+ av_free(ptr);
+}
+
+static int png_get_nb_channels(int color_type)
+{
+ int channels;
+ channels = 1;
+ if ((color_type & (PNG_COLOR_MASK_COLOR | PNG_COLOR_MASK_PALETTE)) ==
+ PNG_COLOR_MASK_COLOR)
+ channels = 3;
+ if (color_type & PNG_COLOR_MASK_ALPHA)
+ channels++;
+ return channels;
+}
+
+/* compute the row size of an interleaved pass */
+static int png_pass_row_size(int pass, int bits_per_pixel, int width)
+{
+ int shift, xmin, pass_width;
+
+ xmin = png_pass_xmin[pass];
+ if (width <= xmin)
+ return 0;
+ shift = png_pass_xshift[pass];
+ pass_width = (width - xmin + (1 << shift) - 1) >> shift;
+ return (pass_width * bits_per_pixel + 7) >> 3;
+}
+
+/* NOTE: we try to construct a good looking image at each pass. width
+ is the original image width. We also do pixel format convertion at
+ this stage */
+static void png_put_interlaced_row(uint8_t *dst, int width,
+ int bits_per_pixel, int pass,
+ int color_type, const uint8_t *src)
+{
+ int x, mask, dsp_mask, j, src_x, b, bpp;
+ uint8_t *d;
+ const uint8_t *s;
+
+ mask = png_pass_mask[pass];
+ dsp_mask = png_pass_dsp_mask[pass];
+ switch(bits_per_pixel) {
+ case 1:
+ /* we must intialize the line to zero before writing to it */
+ if (pass == 0)
+ memset(dst, 0, (width + 7) >> 3);
+ src_x = 0;
+ for(x = 0; x < width; x++) {
+ j = (x & 7);
+ if ((dsp_mask << j) & 0x80) {
+ b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
+ dst[x >> 3] |= b << (7 - j);
+ }
+ if ((mask << j) & 0x80)
+ src_x++;
+ }
+ break;
+ default:
+ bpp = bits_per_pixel >> 3;
+ d = dst;
+ s = src;
+ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ for(x = 0; x < width; x++) {
+ j = x & 7;
+ if ((dsp_mask << j) & 0x80) {
+ *(uint32_t *)d = (s[3] << 24) | (s[0] << 16) | (s[1] << 8) | s[2];
+ }
+ d += bpp;
+ if ((mask << j) & 0x80)
+ s += bpp;
+ }
+ } else {
+ for(x = 0; x < width; x++) {
+ j = x & 7;
+ if ((dsp_mask << j) & 0x80) {
+ memcpy(d, s, bpp);
+ }
+ d += bpp;
+ if ((mask << j) & 0x80)
+ s += bpp;
+ }
+ }
+ break;
+ }
+}
+
+static void png_get_interlaced_row(uint8_t *dst, int row_size,
+ int bits_per_pixel, int pass,
+ const uint8_t *src, int width)
+{
+ int x, mask, dst_x, j, b, bpp;
+ uint8_t *d;
+ const uint8_t *s;
+
+ mask = png_pass_mask[pass];
+ switch(bits_per_pixel) {
+ case 1:
+ memset(dst, 0, row_size);
+ dst_x = 0;
+ for(x = 0; x < width; x++) {
+ j = (x & 7);
+ if ((mask << j) & 0x80) {
+ b = (src[x >> 3] >> (7 - j)) & 1;
+ dst[dst_x >> 3] |= b << (7 - (dst_x & 7));
+ dst_x++;
+ }
+ }
+ break;
+ default:
+ bpp = bits_per_pixel >> 3;
+ d = dst;
+ s = src;
+ for(x = 0; x < width; x++) {
+ j = x & 7;
+ if ((mask << j) & 0x80) {
+ memcpy(d, s, bpp);
+ d += bpp;
+ }
+ s += bpp;
+ }
+ break;
+ }
+}
+
+/* XXX: optimize */
+/* NOTE: 'dst' can be equal to 'last' */
+static void png_filter_row(uint8_t *dst, int filter_type,
+ uint8_t *src, uint8_t *last, int size, int bpp)
+{
+ int i, p;
+
+ switch(filter_type) {
+ case PNG_FILTER_VALUE_NONE:
+ memcpy(dst, src, size);
+ break;
+ case PNG_FILTER_VALUE_SUB:
+ for(i = 0; i < bpp; i++) {
+ dst[i] = src[i];
+ }
+ for(i = bpp; i < size; i++) {
+ p = dst[i - bpp];
+ dst[i] = p + src[i];
+ }
+ break;
+ case PNG_FILTER_VALUE_UP:
+ for(i = 0; i < size; i++) {
+ p = last[i];
+ dst[i] = p + src[i];
+ }
+ break;
+ case PNG_FILTER_VALUE_AVG:
+ for(i = 0; i < bpp; i++) {
+ p = (last[i] >> 1);
+ dst[i] = p + src[i];
+ }
+ for(i = bpp; i < size; i++) {
+ p = ((dst[i - bpp] + last[i]) >> 1);
+ dst[i] = p + src[i];
+ }
+ break;
+ case PNG_FILTER_VALUE_PAETH:
+ for(i = 0; i < bpp; i++) {
+ p = last[i];
+ dst[i] = p + src[i];
+ }
+ for(i = bpp; i < size; i++) {
+ int a, b, c, pa, pb, pc;
+
+ a = dst[i - bpp];
+ b = last[i];
+ c = last[i - bpp];
+
+ p = b - c;
+ pc = a - c;
+
+ pa = abs(p);
+ pb = abs(pc);
+ pc = abs(p + pc);
+
+ if (pa <= pb && pa <= pc)
+ p = a;
+ else if (pb <= pc)
+ p = b;
+ else
+ p = c;
+ dst[i] = p + src[i];
+ }
+ break;
+ }
+}
+
+static void convert_from_rgba32(uint8_t *dst, const uint8_t *src, int width)
+{
+ uint8_t *d;
+ int j;
+ unsigned int v;
+
+ d = dst;
+ for(j = 0; j < width; j++) {
+ v = ((const uint32_t *)src)[j];
+ d[0] = v >> 16;
+ d[1] = v >> 8;
+ d[2] = v;
+ d[3] = v >> 24;
+ d += 4;
+ }
+}
+
+static void convert_to_rgba32(uint8_t *dst, const uint8_t *src, int width)
+{
+ int j;
+ unsigned int r, g, b, a;
+
+ for(j = 0;j < width; j++) {
+ r = src[0];
+ g = src[1];
+ b = src[2];
+ a = src[3];
+ *(uint32_t *)dst = (a << 24) | (r << 16) | (g << 8) | b;
+ dst += 4;
+ src += 4;
+ }
+}
+
+/* process exactly one decompressed row */
+static void png_handle_row(PNGDecodeState *s)
+{
+ uint8_t *ptr, *last_row;
+ int got_line;
+
+ if (!s->interlace_type) {
+ ptr = s->image_buf + s->image_linesize * s->y;
+ /* need to swap bytes correctly for RGB_ALPHA */
+ if (s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
+ s->last_row, s->row_size, s->bpp);
+ memcpy(s->last_row, s->tmp_row, s->row_size);
+ convert_to_rgba32(ptr, s->tmp_row, s->width);
+ } else {
+ /* in normal case, we avoid one copy */
+ if (s->y == 0)
+ last_row = s->last_row;
+ else
+ last_row = ptr - s->image_linesize;
+
+ png_filter_row(ptr, s->crow_buf[0], s->crow_buf + 1,
+ last_row, s->row_size, s->bpp);
+ }
+ s->y++;
+ if (s->y == s->height) {
+ s->state |= PNG_ALLIMAGE;
+ }
+ } else {
+ got_line = 0;
+ for(;;) {
+ ptr = s->image_buf + s->image_linesize * s->y;
+ if ((png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
+ /* if we already read one row, it is time to stop to
+ wait for the next one */
+ if (got_line)
+ break;
+ png_filter_row(s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
+ s->last_row, s->pass_row_size, s->bpp);
+ memcpy(s->last_row, s->tmp_row, s->pass_row_size);
+ got_line = 1;
+ }
+ if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
+ /* NOTE: rgba32 is handled directly in png_put_interlaced_row */
+ png_put_interlaced_row(ptr, s->width, s->bits_per_pixel, s->pass,
+ s->color_type, s->last_row);
+ }
+ s->y++;
+ if (s->y == s->height) {
+ for(;;) {
+ if (s->pass == NB_PASSES - 1) {
+ s->state |= PNG_ALLIMAGE;
+ goto the_end;
+ } else {
+ s->pass++;
+ s->y = 0;
+ s->pass_row_size = png_pass_row_size(s->pass,
+ s->bits_per_pixel,
+ s->width);
+ s->crow_size = s->pass_row_size + 1;
+ if (s->pass_row_size != 0)
+ break;
+ /* skip pass if empty row */
+ }
+ }
+ }
+ }
+ the_end: ;
+ }
+}
+
+static int png_decode_idat(PNGDecodeState *s, ByteIOContext *f, int length)
+{
+ uint8_t buf[IOBUF_SIZE];
+ int buf_size;
+ int ret;
+ while (length > 0) {
+ /* read the buffer */
+ buf_size = IOBUF_SIZE;
+ if (buf_size > length)
+ buf_size = length;
+ ret = get_buffer(f, buf, buf_size);
+ if (ret != buf_size)
+ return -1;
+ s->zstream.avail_in = buf_size;
+ s->zstream.next_in = buf;
+ /* decode one line if possible */
+ while (s->zstream.avail_in > 0) {
+ ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
+ if (ret != Z_OK && ret != Z_STREAM_END) {
+ return -1;
+ }
+ if (s->zstream.avail_out == 0) {
+ if (!(s->state & PNG_ALLIMAGE)) {
+ png_handle_row(s);
+ }
+ s->zstream.avail_out = s->crow_size;
+ s->zstream.next_out = s->crow_buf;
+ }
+ }
+ length -= buf_size;
+ }
+ return 0;
+}
+
+static int png_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ AVImageInfo info1, *info = &info1;
+ PNGDecodeState s1, *s = &s1;
+ uint32_t tag, length;
+ int ret, crc;
+ uint8_t buf[8];
+
+ /* check signature */
+ ret = get_buffer(f, buf, 8);
+ if (ret != 8)
+ return -1;
+ if (memcmp(buf, pngsig, 8) != 0)
+ return -1;
+ memset(s, 0, sizeof(PNGDecodeState));
+ /* init the zlib */
+ s->zstream.zalloc = png_zalloc;
+ s->zstream.zfree = png_zfree;
+ s->zstream.opaque = NULL;
+ ret = inflateInit(&s->zstream);
+ if (ret != Z_OK)
+ return -1;
+ for(;;) {
+ if (url_feof(f))
+ goto fail;
+ length = get_be32(f);
+ if (length > 0x7fffffff)
+ goto fail;
+ tag = get_le32(f);
+#ifdef DEBUG
+ printf("png: tag=%c%c%c%c length=%u\n",
+ (tag & 0xff),
+ ((tag >> 8) & 0xff),
+ ((tag >> 16) & 0xff),
+ ((tag >> 24) & 0xff), length);
+#endif
+ switch(tag) {
+ case MKTAG('I', 'H', 'D', 'R'):
+ if (length != 13)
+ goto fail;
+ s->width = get_be32(f);
+ s->height = get_be32(f);
+ s->bit_depth = get_byte(f);
+ s->color_type = get_byte(f);
+ s->compression_type = get_byte(f);
+ s->filter_type = get_byte(f);
+ s->interlace_type = get_byte(f);
+ crc = get_be32(f);
+ s->state |= PNG_IHDR;
+#ifdef DEBUG
+ printf("width=%d height=%d depth=%d color_type=%d compression_type=%d filter_type=%d interlace_type=%d\n",
+ s->width, s->height, s->bit_depth, s->color_type,
+ s->compression_type, s->filter_type, s->interlace_type);
+#endif
+ break;
+ case MKTAG('I', 'D', 'A', 'T'):
+ if (!(s->state & PNG_IHDR))
+ goto fail;
+ if (!(s->state & PNG_IDAT)) {
+ /* init image info */
+ info->width = s->width;
+ info->height = s->height;
+ info->interleaved = (s->interlace_type != 0);
+
+ s->channels = png_get_nb_channels(s->color_type);
+ s->bits_per_pixel = s->bit_depth * s->channels;
+ s->bpp = (s->bits_per_pixel + 7) >> 3;
+ s->row_size = (info->width * s->bits_per_pixel + 7) >> 3;
+
+ if (s->bit_depth == 8 &&
+ s->color_type == PNG_COLOR_TYPE_RGB) {
+ info->pix_fmt = PIX_FMT_RGB24;
+ } else if (s->bit_depth == 8 &&
+ s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ info->pix_fmt = PIX_FMT_RGBA32;
+ } else if (s->bit_depth == 8 &&
+ s->color_type == PNG_COLOR_TYPE_GRAY) {
+ info->pix_fmt = PIX_FMT_GRAY8;
+ } else if (s->bit_depth == 1 &&
+ s->color_type == PNG_COLOR_TYPE_GRAY) {
+ info->pix_fmt = PIX_FMT_MONOBLACK;
+ } else if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
+ info->pix_fmt = PIX_FMT_PAL8;
+ } else {
+ goto fail;
+ }
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ goto the_end;
+
+ /* compute the compressed row size */
+ if (!s->interlace_type) {
+ s->crow_size = s->row_size + 1;
+ } else {
+ s->pass = 0;
+ s->pass_row_size = png_pass_row_size(s->pass,
+ s->bits_per_pixel,
+ s->width);
+ s->crow_size = s->pass_row_size + 1;
+ }
+#ifdef DEBUG
+ printf("row_size=%d crow_size =%d\n",
+ s->row_size, s->crow_size);
+#endif
+ s->image_buf = info->pict.data[0];
+ s->image_linesize = info->pict.linesize[0];
+ /* copy the palette if needed */
+ if (s->color_type == PNG_COLOR_TYPE_PALETTE)
+ memcpy(info->pict.data[1], s->palette, 256 * sizeof(uint32_t));
+ /* empty row is used if differencing to the first row */
+ s->last_row = av_mallocz(s->row_size);
+ if (!s->last_row)
+ goto fail;
+ if (s->interlace_type ||
+ s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ s->tmp_row = av_malloc(s->row_size);
+ if (!s->tmp_row)
+ goto fail;
+ }
+ /* compressed row */
+ s->crow_buf = av_malloc(s->row_size + 1);
+ if (!s->crow_buf)
+ goto fail;
+ s->zstream.avail_out = s->crow_size;
+ s->zstream.next_out = s->crow_buf;
+ }
+ s->state |= PNG_IDAT;
+ if (png_decode_idat(s, f, length) < 0)
+ goto fail;
+ /* skip crc */
+ crc = get_be32(f);
+ break;
+ case MKTAG('P', 'L', 'T', 'E'):
+ {
+ int n, i, r, g, b;
+
+ if ((length % 3) != 0 || length > 256 * 3)
+ goto skip_tag;
+ /* read the palette */
+ n = length / 3;
+ for(i=0;i<n;i++) {
+ r = get_byte(f);
+ g = get_byte(f);
+ b = get_byte(f);
+ s->palette[i] = (0xff << 24) | (r << 16) | (g << 8) | b;
+ }
+ for(;i<256;i++) {
+ s->palette[i] = (0xff << 24);
+ }
+ s->state |= PNG_PLTE;
+ crc = get_be32(f);
+ }
+ break;
+ case MKTAG('t', 'R', 'N', 'S'):
+ {
+ int v, i;
+
+ /* read the transparency. XXX: Only palette mode supported */
+ if (s->color_type != PNG_COLOR_TYPE_PALETTE ||
+ length > 256 ||
+ !(s->state & PNG_PLTE))
+ goto skip_tag;
+ for(i=0;i<length;i++) {
+ v = get_byte(f);
+ s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
+ }
+ crc = get_be32(f);
+ }
+ break;
+ case MKTAG('I', 'E', 'N', 'D'):
+ if (!(s->state & PNG_ALLIMAGE))
+ goto fail;
+ crc = get_be32(f);
+ goto exit_loop;
+ default:
+ /* skip tag */
+ skip_tag:
+ url_fskip(f, length + 4);
+ break;
+ }
+ }
+ exit_loop:
+ ret = 0;
+ the_end:
+ inflateEnd(&s->zstream);
+ av_free(s->crow_buf);
+ av_free(s->last_row);
+ av_free(s->tmp_row);
+ return ret;
+ fail:
+ ret = -1;
+ goto the_end;
+}
+
+static void png_write_chunk(ByteIOContext *f, uint32_t tag,
+ const uint8_t *buf, int length)
+{
+ uint32_t crc;
+ uint8_t tagbuf[4];
+
+ put_be32(f, length);
+ crc = crc32(0, Z_NULL, 0);
+ tagbuf[0] = tag;
+ tagbuf[1] = tag >> 8;
+ tagbuf[2] = tag >> 16;
+ tagbuf[3] = tag >> 24;
+ crc = crc32(crc, tagbuf, 4);
+ put_le32(f, tag);
+ if (length > 0) {
+ crc = crc32(crc, buf, length);
+ put_buffer(f, buf, length);
+ }
+ put_be32(f, crc);
+}
+
+/* XXX: use avcodec generic function ? */
+static void to_be32(uint8_t *p, uint32_t v)
+{
+ p[0] = v >> 24;
+ p[1] = v >> 16;
+ p[2] = v >> 8;
+ p[3] = v;
+}
+
+typedef struct PNGEncodeState {
+ ByteIOContext *f;
+ z_stream zstream;
+ uint8_t buf[IOBUF_SIZE];
+} PNGEncodeState;
+
+
+/* XXX: do filtering */
+static int png_write_row(PNGEncodeState *s, const uint8_t *data, int size)
+{
+ int ret;
+
+ s->zstream.avail_in = size;
+ s->zstream.next_in = (uint8_t *)data;
+ while (s->zstream.avail_in > 0) {
+ ret = deflate(&s->zstream, Z_NO_FLUSH);
+ if (ret != Z_OK)
+ return -1;
+ if (s->zstream.avail_out == 0) {
+ png_write_chunk(s->f, MKTAG('I', 'D', 'A', 'T'), s->buf, IOBUF_SIZE);
+ s->zstream.avail_out = IOBUF_SIZE;
+ s->zstream.next_out = s->buf;
+ }
+ }
+ return 0;
+}
+
+static int png_write(ByteIOContext *f, AVImageInfo *info)
+{
+ PNGEncodeState s1, *s = &s1;
+ int bit_depth, color_type, y, len, row_size, ret, is_progressive;
+ int bits_per_pixel, pass_row_size;
+ uint8_t *ptr;
+ uint8_t *crow_buf = NULL;
+ uint8_t *tmp_buf = NULL;
+
+ s->f = f;
+ is_progressive = info->interleaved;
+ switch(info->pix_fmt) {
+ case PIX_FMT_RGBA32:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_RGB_ALPHA;
+ break;
+ case PIX_FMT_RGB24:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_RGB;
+ break;
+ case PIX_FMT_GRAY8:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case PIX_FMT_MONOBLACK:
+ bit_depth = 1;
+ color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case PIX_FMT_PAL8:
+ bit_depth = 8;
+ color_type = PNG_COLOR_TYPE_PALETTE;
+ break;
+ default:
+ return -1;
+ }
+ bits_per_pixel = png_get_nb_channels(color_type) * bit_depth;
+ row_size = (info->width * bits_per_pixel + 7) >> 3;
+
+ s->zstream.zalloc = png_zalloc;
+ s->zstream.zfree = png_zfree;
+ s->zstream.opaque = NULL;
+ ret = deflateInit2(&s->zstream, Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED, 15, 8, Z_DEFAULT_STRATEGY);
+ if (ret != Z_OK)
+ return -1;
+ crow_buf = av_malloc(row_size + 1);
+ if (!crow_buf)
+ goto fail;
+ if (is_progressive) {
+ tmp_buf = av_malloc(row_size + 1);
+ if (!tmp_buf)
+ goto fail;
+ }
+
+ /* write png header */
+ put_buffer(f, pngsig, 8);
+
+ to_be32(s->buf, info->width);
+ to_be32(s->buf + 4, info->height);
+ s->buf[8] = bit_depth;
+ s->buf[9] = color_type;
+ s->buf[10] = 0; /* compression type */
+ s->buf[11] = 0; /* filter type */
+ s->buf[12] = is_progressive; /* interlace type */
+
+ png_write_chunk(f, MKTAG('I', 'H', 'D', 'R'), s->buf, 13);
+
+ /* put the palette if needed */
+ if (color_type == PNG_COLOR_TYPE_PALETTE) {
+ int has_alpha, alpha, i;
+ unsigned int v;
+ uint32_t *palette;
+ uint8_t *alpha_ptr;
+
+ palette = (uint32_t *)info->pict.data[1];
+ ptr = s->buf;
+ alpha_ptr = s->buf + 256 * 3;
+ has_alpha = 0;
+ for(i = 0; i < 256; i++) {
+ v = palette[i];
+ alpha = v >> 24;
+ if (alpha != 0xff)
+ has_alpha = 1;
+ *alpha_ptr++ = alpha;
+ ptr[0] = v >> 16;
+ ptr[1] = v >> 8;
+ ptr[2] = v;
+ ptr += 3;
+ }
+ png_write_chunk(f, MKTAG('P', 'L', 'T', 'E'), s->buf, 256 * 3);
+ if (has_alpha) {
+ png_write_chunk(f, MKTAG('t', 'R', 'N', 'S'), s->buf + 256 * 3, 256);
+ }
+ }
+
+ /* now put each row */
+ s->zstream.avail_out = IOBUF_SIZE;
+ s->zstream.next_out = s->buf;
+ if (is_progressive) {
+ uint8_t *ptr1;
+ int pass;
+
+ for(pass = 0; pass < NB_PASSES; pass++) {
+ /* NOTE: a pass is completely omited if no pixels would be
+ output */
+ pass_row_size = png_pass_row_size(pass, bits_per_pixel, info->width);
+ if (pass_row_size > 0) {
+ for(y = 0; y < info->height; y++) {
+ if ((png_pass_ymask[pass] << (y & 7)) & 0x80) {
+ ptr = info->pict.data[0] + y * info->pict.linesize[0];
+ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
+ convert_from_rgba32(tmp_buf, ptr, info->width);
+ ptr1 = tmp_buf;
+ } else {
+ ptr1 = ptr;
+ }
+ png_get_interlaced_row(crow_buf + 1, pass_row_size,
+ bits_per_pixel, pass,
+ ptr1, info->width);
+ crow_buf[0] = PNG_FILTER_VALUE_NONE;
+ png_write_row(s, crow_buf, pass_row_size + 1);
+ }
+ }
+ }
+ }
+ } else {
+ for(y = 0; y < info->height; y++) {
+ ptr = info->pict.data[0] + y * info->pict.linesize[0];
+ if (color_type == PNG_COLOR_TYPE_RGB_ALPHA)
+ convert_from_rgba32(crow_buf + 1, ptr, info->width);
+ else
+ memcpy(crow_buf + 1, ptr, row_size);
+ crow_buf[0] = PNG_FILTER_VALUE_NONE;
+ png_write_row(s, crow_buf, row_size + 1);
+ }
+ }
+ /* compress last bytes */
+ for(;;) {
+ ret = deflate(&s->zstream, Z_FINISH);
+ if (ret == Z_OK || ret == Z_STREAM_END) {
+ len = IOBUF_SIZE - s->zstream.avail_out;
+ if (len > 0) {
+ png_write_chunk(f, MKTAG('I', 'D', 'A', 'T'), s->buf, len);
+ }
+ s->zstream.avail_out = IOBUF_SIZE;
+ s->zstream.next_out = s->buf;
+ if (ret == Z_STREAM_END)
+ break;
+ } else {
+ goto fail;
+ }
+ }
+ png_write_chunk(f, MKTAG('I', 'E', 'N', 'D'), NULL, 0);
+
+ put_flush_packet(f);
+ ret = 0;
+ the_end:
+ av_free(crow_buf);
+ av_free(tmp_buf);
+ deflateEnd(&s->zstream);
+ return ret;
+ fail:
+ ret = -1;
+ goto the_end;
+}
+
+AVImageFormat png_image_format = {
+ "png",
+ "png",
+ png_probe,
+ png_read,
+ (1 << PIX_FMT_RGBA32) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_GRAY8) |
+ (1 << PIX_FMT_MONOBLACK) | (1 << PIX_FMT_PAL8),
+ png_write,
+ AVIMAGE_INTERLEAVED,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/pnm.c b/contrib/ffmpeg/libavformat/pnm.c
new file mode 100644
index 000000000..ade5d7c5d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/pnm.c
@@ -0,0 +1,478 @@
+/*
+ * PNM image format
+ * Copyright (c) 2002, 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static inline int pnm_space(int c)
+{
+ return (c == ' ' || c == '\n' || c == '\r' || c == '\t');
+}
+
+static void pnm_get(ByteIOContext *f, char *str, int buf_size)
+{
+ char *s;
+ int c;
+
+ /* skip spaces and comments */
+ for(;;) {
+ c = url_fgetc(f);
+ if (c == '#') {
+ do {
+ c = url_fgetc(f);
+ } while (c != '\n' && c != URL_EOF);
+ } else if (!pnm_space(c)) {
+ break;
+ }
+ }
+
+ s = str;
+ while (c != URL_EOF && !pnm_space(c)) {
+ if ((s - str) < buf_size - 1)
+ *s++ = c;
+ c = url_fgetc(f);
+ }
+ *s = '\0';
+}
+
+static int pnm_read1(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque,
+ int allow_yuv)
+{
+ int i, n, linesize, h;
+ char buf1[32];
+ unsigned char *ptr;
+ AVImageInfo info1, *info = &info1;
+ int ret;
+
+ pnm_get(f, buf1, sizeof(buf1));
+ if (!strcmp(buf1, "P4")) {
+ info->pix_fmt = PIX_FMT_MONOWHITE;
+ } else if (!strcmp(buf1, "P5")) {
+ if (allow_yuv)
+ info->pix_fmt = PIX_FMT_YUV420P;
+ else
+ info->pix_fmt = PIX_FMT_GRAY8;
+ } else if (!strcmp(buf1, "P6")) {
+ info->pix_fmt = PIX_FMT_RGB24;
+ } else {
+ return AVERROR_INVALIDDATA;
+ }
+ pnm_get(f, buf1, sizeof(buf1));
+ info->width = atoi(buf1);
+ if (info->width <= 0)
+ return AVERROR_INVALIDDATA;
+ pnm_get(f, buf1, sizeof(buf1));
+ info->height = atoi(buf1);
+ if (info->height <= 0)
+ return AVERROR_INVALIDDATA;
+ if (info->pix_fmt != PIX_FMT_MONOWHITE) {
+ pnm_get(f, buf1, sizeof(buf1));
+ }
+
+ /* more check if YUV420 */
+ if (info->pix_fmt == PIX_FMT_YUV420P) {
+ if ((info->width & 1) != 0)
+ return AVERROR_INVALIDDATA;
+ h = (info->height * 2);
+ if ((h % 3) != 0)
+ return AVERROR_INVALIDDATA;
+ h /= 3;
+ info->height = h;
+ }
+
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ return ret;
+
+ switch(info->pix_fmt) {
+ default:
+ return AVERROR_INVALIDDATA;
+ case PIX_FMT_RGB24:
+ n = info->width * 3;
+ goto do_read;
+ case PIX_FMT_GRAY8:
+ n = info->width;
+ goto do_read;
+ case PIX_FMT_MONOWHITE:
+ n = (info->width + 7) >> 3;
+ do_read:
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i = 0; i < info->height; i++) {
+ get_buffer(f, ptr, n);
+ ptr += linesize;
+ }
+ break;
+ case PIX_FMT_YUV420P:
+ {
+ unsigned char *ptr1, *ptr2;
+
+ n = info->width;
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i = 0; i < info->height; i++) {
+ get_buffer(f, ptr, n);
+ ptr += linesize;
+ }
+ ptr1 = info->pict.data[1];
+ ptr2 = info->pict.data[2];
+ n >>= 1;
+ h = info->height >> 1;
+ for(i = 0; i < h; i++) {
+ get_buffer(f, ptr1, n);
+ get_buffer(f, ptr2, n);
+ ptr1 += info->pict.linesize[1];
+ ptr2 += info->pict.linesize[2];
+ }
+ }
+ break;
+ }
+ return 0;
+}
+
+static int pnm_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ return pnm_read1(f, alloc_cb, opaque, 0);
+}
+
+static int pgmyuv_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ return pnm_read1(f, alloc_cb, opaque, 1);
+}
+
+static int pnm_write(ByteIOContext *pb, AVImageInfo *info)
+{
+ int i, h, h1, c, n, linesize;
+ char buf[100];
+ uint8_t *ptr, *ptr1, *ptr2;
+
+ h = info->height;
+ h1 = h;
+ switch(info->pix_fmt) {
+ case PIX_FMT_MONOWHITE:
+ c = '4';
+ n = (info->width + 7) >> 3;
+ break;
+ case PIX_FMT_GRAY8:
+ c = '5';
+ n = info->width;
+ break;
+ case PIX_FMT_RGB24:
+ c = '6';
+ n = info->width * 3;
+ break;
+ case PIX_FMT_YUV420P:
+ c = '5';
+ n = info->width;
+ h1 = (h * 3) / 2;
+ break;
+ default:
+ return AVERROR_INVALIDDATA;
+ }
+ snprintf(buf, sizeof(buf),
+ "P%c\n%d %d\n",
+ c, info->width, h1);
+ put_buffer(pb, buf, strlen(buf));
+ if (info->pix_fmt != PIX_FMT_MONOWHITE) {
+ snprintf(buf, sizeof(buf),
+ "%d\n", 255);
+ put_buffer(pb, buf, strlen(buf));
+ }
+
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i=0;i<h;i++) {
+ put_buffer(pb, ptr, n);
+ ptr += linesize;
+ }
+
+ if (info->pix_fmt == PIX_FMT_YUV420P) {
+ h >>= 1;
+ n >>= 1;
+ ptr1 = info->pict.data[1];
+ ptr2 = info->pict.data[2];
+ for(i=0;i<h;i++) {
+ put_buffer(pb, ptr1, n);
+ put_buffer(pb, ptr2, n);
+ ptr1 += info->pict.linesize[1];
+ ptr2 += info->pict.linesize[2];
+ }
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int pam_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ int i, n, linesize, h, w, depth, maxval;
+ char buf1[32], tuple_type[32];
+ unsigned char *ptr;
+ AVImageInfo info1, *info = &info1;
+ int ret;
+
+ pnm_get(f, buf1, sizeof(buf1));
+ if (strcmp(buf1, "P7") != 0)
+ return AVERROR_INVALIDDATA;
+ w = -1;
+ h = -1;
+ maxval = -1;
+ depth = -1;
+ tuple_type[0] = '\0';
+ for(;;) {
+ pnm_get(f, buf1, sizeof(buf1));
+ if (!strcmp(buf1, "WIDTH")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ w = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "HEIGHT")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ h = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "DEPTH")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ depth = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "MAXVAL")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ maxval = strtol(buf1, NULL, 10);
+ } else if (!strcmp(buf1, "TUPLETYPE")) {
+ pnm_get(f, buf1, sizeof(buf1));
+ pstrcpy(tuple_type, sizeof(tuple_type), buf1);
+ } else if (!strcmp(buf1, "ENDHDR")) {
+ break;
+ } else {
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ /* check that all tags are present */
+ if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0')
+ return AVERROR_INVALIDDATA;
+ info->width = w;
+ info->height = h;
+ if (depth == 1) {
+ if (maxval == 1)
+ info->pix_fmt = PIX_FMT_MONOWHITE;
+ else
+ info->pix_fmt = PIX_FMT_GRAY8;
+ } else if (depth == 3) {
+ info->pix_fmt = PIX_FMT_RGB24;
+ } else if (depth == 4) {
+ info->pix_fmt = PIX_FMT_RGBA32;
+ } else {
+ return AVERROR_INVALIDDATA;
+ }
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ return ret;
+
+ switch(info->pix_fmt) {
+ default:
+ return AVERROR_INVALIDDATA;
+ case PIX_FMT_RGB24:
+ n = info->width * 3;
+ goto do_read;
+ case PIX_FMT_GRAY8:
+ n = info->width;
+ goto do_read;
+ case PIX_FMT_MONOWHITE:
+ n = (info->width + 7) >> 3;
+ do_read:
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i = 0; i < info->height; i++) {
+ get_buffer(f, ptr, n);
+ ptr += linesize;
+ }
+ break;
+ case PIX_FMT_RGBA32:
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+ for(i = 0; i < info->height; i++) {
+ int j, r, g, b, a;
+
+ for(j = 0;j < w; j++) {
+ r = get_byte(f);
+ g = get_byte(f);
+ b = get_byte(f);
+ a = get_byte(f);
+ ((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b;
+ }
+ ptr += linesize;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int pam_write(ByteIOContext *pb, AVImageInfo *info)
+{
+ int i, h, w, n, linesize, depth, maxval;
+ const char *tuple_type;
+ char buf[100];
+ uint8_t *ptr;
+
+ h = info->height;
+ w = info->width;
+ switch(info->pix_fmt) {
+ case PIX_FMT_MONOWHITE:
+ n = (info->width + 7) >> 3;
+ depth = 1;
+ maxval = 1;
+ tuple_type = "BLACKANDWHITE";
+ break;
+ case PIX_FMT_GRAY8:
+ n = info->width;
+ depth = 1;
+ maxval = 255;
+ tuple_type = "GRAYSCALE";
+ break;
+ case PIX_FMT_RGB24:
+ n = info->width * 3;
+ depth = 3;
+ maxval = 255;
+ tuple_type = "RGB";
+ break;
+ case PIX_FMT_RGBA32:
+ n = info->width * 4;
+ depth = 4;
+ maxval = 255;
+ tuple_type = "RGB_ALPHA";
+ break;
+ default:
+ return AVERROR_INVALIDDATA;
+ }
+ snprintf(buf, sizeof(buf),
+ "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
+ w, h, depth, maxval, tuple_type);
+ put_buffer(pb, buf, strlen(buf));
+
+ ptr = info->pict.data[0];
+ linesize = info->pict.linesize[0];
+
+ if (info->pix_fmt == PIX_FMT_RGBA32) {
+ int j;
+ unsigned int v;
+
+ for(i=0;i<h;i++) {
+ for(j=0;j<w;j++) {
+ v = ((uint32_t *)ptr)[j];
+ put_byte(pb, (v >> 16) & 0xff);
+ put_byte(pb, (v >> 8) & 0xff);
+ put_byte(pb, (v) & 0xff);
+ put_byte(pb, (v >> 24) & 0xff);
+ }
+ ptr += linesize;
+ }
+ } else {
+ for(i=0;i<h;i++) {
+ put_buffer(pb, ptr, n);
+ ptr += linesize;
+ }
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int pnm_probe(AVProbeData *pd)
+{
+ const char *p = pd->buf;
+ if (pd->buf_size >= 8 &&
+ p[0] == 'P' &&
+ p[1] >= '4' && p[1] <= '6' &&
+ pnm_space(p[2]) )
+ return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
+ else
+ return 0;
+}
+
+static int pgmyuv_probe(AVProbeData *pd)
+{
+ if (match_ext(pd->filename, "pgmyuv"))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int pam_probe(AVProbeData *pd)
+{
+ const char *p = pd->buf;
+ if (pd->buf_size >= 8 &&
+ p[0] == 'P' &&
+ p[1] == '7' &&
+ p[2] == '\n')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+AVImageFormat pnm_image_format = {
+ "pnm",
+ NULL,
+ pnm_probe,
+ pnm_read,
+ 0,
+ NULL,
+};
+
+AVImageFormat pbm_image_format = {
+ "pbm",
+ "pbm",
+ NULL,
+ NULL,
+ (1 << PIX_FMT_MONOWHITE),
+ pnm_write,
+};
+
+AVImageFormat pgm_image_format = {
+ "pgm",
+ "pgm",
+ NULL,
+ NULL,
+ (1 << PIX_FMT_GRAY8),
+ pnm_write,
+};
+
+AVImageFormat ppm_image_format = {
+ "ppm",
+ "ppm",
+ NULL,
+ NULL,
+ (1 << PIX_FMT_RGB24),
+ pnm_write,
+};
+
+AVImageFormat pam_image_format = {
+ "pam",
+ "pam",
+ pam_probe,
+ pam_read,
+ (1 << PIX_FMT_MONOWHITE) | (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) |
+ (1 << PIX_FMT_RGBA32),
+ pam_write,
+};
+
+AVImageFormat pgmyuv_image_format = {
+ "pgmyuv",
+ "pgmyuv",
+ pgmyuv_probe,
+ pgmyuv_read,
+ (1 << PIX_FMT_YUV420P),
+ pnm_write,
+};
diff --git a/contrib/ffmpeg/libavformat/psxstr.c b/contrib/ffmpeg/libavformat/psxstr.c
new file mode 100644
index 000000000..b03f65750
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/psxstr.c
@@ -0,0 +1,364 @@
+/*
+ * Sony Playstation (PSX) STR File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file psxstr.c
+ * PSX STR file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * This module handles streams that have been ripped from Sony Playstation
+ * CD games. This demuxer can handle either raw STR files (which are just
+ * concatenations of raw compact disc sectors) or STR files with 0x2C-byte
+ * RIFF headers, followed by CD sectors.
+ */
+
+#include "avformat.h"
+
+//#define PRINTSTUFF
+
+#define RIFF_TAG MKTAG('R', 'I', 'F', 'F')
+#define CDXA_TAG MKTAG('C', 'D', 'X', 'A')
+
+#define RAW_CD_SECTOR_SIZE 2352
+#define RAW_CD_SECTOR_DATA_SIZE 2304
+#define VIDEO_DATA_CHUNK_SIZE 0x7E0
+#define VIDEO_DATA_HEADER_SIZE 0x38
+#define RIFF_HEADER_SIZE 0x2C
+
+#define CDXA_TYPE_MASK 0x0E
+#define CDXA_TYPE_DATA 0x08
+#define CDXA_TYPE_AUDIO 0x04
+#define CDXA_TYPE_VIDEO 0x02
+
+#define STR_MAGIC (0x80010160)
+
+typedef struct StrChannel {
+
+ int type;
+#define STR_AUDIO 0
+#define STR_VIDEO 1
+
+ /* video parameters */
+ int width;
+ int height;
+ int video_stream_index;
+
+ /* audio parameters */
+ int sample_rate;
+ int channels;
+ int bits;
+ int audio_stream_index;
+} StrChannel;
+
+typedef struct StrDemuxContext {
+
+ /* a STR file can contain up to 32 channels of data */
+ StrChannel channels[32];
+
+ /* only decode the first audio and video channels encountered */
+ int video_channel;
+ int audio_channel;
+
+ int64_t pts;
+
+ unsigned char *video_chunk;
+ AVPacket tmp_pkt;
+} StrDemuxContext;
+
+static const char sync_header[12] = {0x00,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x00};
+
+static int str_probe(AVProbeData *p)
+{
+ int start;
+
+ /* need at least 0x38 bytes to validate */
+ if (p->buf_size < 0x38)
+ return 0;
+
+ if ((LE_32(&p->buf[0]) == RIFF_TAG) &&
+ (LE_32(&p->buf[8]) == CDXA_TAG)) {
+
+ /* RIFF header seen; skip 0x2C bytes */
+ start = RIFF_HEADER_SIZE;
+ } else
+ start = 0;
+
+ /* look for CD sync header (00, 0xFF x 10, 00) */
+ if (memcmp(p->buf+start,sync_header,sizeof(sync_header)))
+ return 0;
+
+ /* MPEG files (like those ripped from VCDs) can also look like this;
+ * only return half certainty */
+ return 50;
+}
+
+#if 0
+static void dump(unsigned char *buf,size_t len)
+{
+ int i;
+ for(i=0;i<len;i++) {
+ if ((i&15)==0) av_log(NULL, AV_LOG_DEBUG, "%04x ",i);
+ av_log(NULL, AV_LOG_DEBUG, "%02x ",buf[i]);
+ if ((i&15)==15) av_log(NULL, AV_LOG_DEBUG, "\n");
+ }
+ av_log(NULL, AV_LOG_DEBUG, "\n");
+}
+#endif
+
+static int str_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ StrDemuxContext *str = (StrDemuxContext *)s->priv_data;
+ AVStream *st;
+ unsigned char sector[RAW_CD_SECTOR_SIZE];
+ int start;
+ int i;
+ int channel;
+
+ /* initialize context members */
+ str->pts = 0;
+ str->audio_channel = -1; /* assume to audio or video */
+ str->video_channel = -1;
+ str->video_chunk = NULL;
+
+
+ /* skip over any RIFF header */
+ if (get_buffer(pb, sector, RIFF_HEADER_SIZE) != RIFF_HEADER_SIZE)
+ return AVERROR_IO;
+ if (LE_32(&sector[0]) == RIFF_TAG)
+ start = RIFF_HEADER_SIZE;
+ else
+ start = 0;
+
+ url_fseek(pb, start, SEEK_SET);
+
+ /* check through the first 32 sectors for individual channels */
+ for (i = 0; i < 32; i++) {
+ if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
+ return AVERROR_IO;
+
+//printf("%02x %02x %02x %02x\n",sector[0x10],sector[0x11],sector[0x12],sector[0x13]);
+
+ channel = sector[0x11];
+ if (channel >= 32)
+ return AVERROR_INVALIDDATA;
+
+ switch (sector[0x12] & CDXA_TYPE_MASK) {
+
+ case CDXA_TYPE_DATA:
+ case CDXA_TYPE_VIDEO:
+ /* check if this channel gets to be the dominant video channel */
+ if (str->video_channel == -1) {
+ /* qualify the magic number */
+ if (LE_32(&sector[0x18]) != STR_MAGIC)
+ break;
+ str->video_channel = channel;
+ str->channels[channel].type = STR_VIDEO;
+ str->channels[channel].width = LE_16(&sector[0x28]);
+ str->channels[channel].height = LE_16(&sector[0x2A]);
+
+ /* allocate a new AVStream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 64, 1, 15);
+
+ str->channels[channel].video_stream_index = st->index;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_MDEC;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = str->channels[channel].width;
+ st->codec->height = str->channels[channel].height;
+ }
+ break;
+
+ case CDXA_TYPE_AUDIO:
+ /* check if this channel gets to be the dominant audio channel */
+ if (str->audio_channel == -1) {
+ int fmt;
+ str->audio_channel = channel;
+ str->channels[channel].type = STR_AUDIO;
+ str->channels[channel].channels =
+ (sector[0x13] & 0x01) ? 2 : 1;
+ str->channels[channel].sample_rate =
+ (sector[0x13] & 0x04) ? 18900 : 37800;
+ str->channels[channel].bits =
+ (sector[0x13] & 0x10) ? 8 : 4;
+
+ /* allocate a new AVStream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 64, 128, str->channels[channel].sample_rate);
+
+ str->channels[channel].audio_stream_index = st->index;
+
+ fmt = sector[0x13];
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_ADPCM_XA;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->channels = (fmt&1)?2:1;
+ st->codec->sample_rate = (fmt&4)?18900:37800;
+ // st->codec->bit_rate = 0; //FIXME;
+ st->codec->block_align = 128;
+ }
+ break;
+
+ default:
+ /* ignore */
+ break;
+ }
+ }
+
+if (str->video_channel != -1)
+ av_log (s, AV_LOG_DEBUG, " video channel = %d, %d x %d %d\n", str->video_channel,
+ str->channels[str->video_channel].width,
+ str->channels[str->video_channel].height,str->channels[str->video_channel].video_stream_index);
+if (str->audio_channel != -1)
+ av_log (s, AV_LOG_DEBUG, " audio channel = %d, %d Hz, %d channels, %d bits/sample %d\n",
+ str->audio_channel,
+ str->channels[str->audio_channel].sample_rate,
+ str->channels[str->audio_channel].channels,
+ str->channels[str->audio_channel].bits,str->channels[str->audio_channel].audio_stream_index);
+
+ /* back to the start */
+ url_fseek(pb, start, SEEK_SET);
+
+ return 0;
+}
+
+static int str_read_packet(AVFormatContext *s,
+ AVPacket *ret_pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ StrDemuxContext *str = (StrDemuxContext *)s->priv_data;
+ unsigned char sector[RAW_CD_SECTOR_SIZE];
+ int channel;
+ int packet_read = 0;
+ int ret = 0;
+ AVPacket *pkt;
+
+ while (!packet_read) {
+
+ if (get_buffer(pb, sector, RAW_CD_SECTOR_SIZE) != RAW_CD_SECTOR_SIZE)
+ return AVERROR_IO;
+
+ channel = sector[0x11];
+ if (channel >= 32)
+ return AVERROR_INVALIDDATA;
+
+ switch (sector[0x12] & CDXA_TYPE_MASK) {
+
+ case CDXA_TYPE_DATA:
+ case CDXA_TYPE_VIDEO:
+ /* check if this the video channel we care about */
+ if (channel == str->video_channel) {
+
+ int current_sector = LE_16(&sector[0x1C]);
+ int sector_count = LE_16(&sector[0x1E]);
+ int frame_size = LE_32(&sector[0x24]);
+ int bytes_to_copy;
+// printf("%d %d %d\n",current_sector,sector_count,frame_size);
+ /* if this is the first sector of the frame, allocate a pkt */
+ pkt = &str->tmp_pkt;
+ if (current_sector == 0) {
+ if (av_new_packet(pkt, frame_size))
+ return AVERROR_IO;
+
+ pkt->pos= url_ftell(pb) - RAW_CD_SECTOR_SIZE;
+ pkt->stream_index =
+ str->channels[channel].video_stream_index;
+ // pkt->pts = str->pts;
+
+ /* if there is no audio, adjust the pts after every video
+ * frame; assume 15 fps */
+ if (str->audio_channel != -1)
+ str->pts += (90000 / 15);
+ }
+
+ /* load all the constituent chunks in the video packet */
+ bytes_to_copy = frame_size - current_sector*VIDEO_DATA_CHUNK_SIZE;
+ if (bytes_to_copy>0) {
+ if (bytes_to_copy>VIDEO_DATA_CHUNK_SIZE) bytes_to_copy=VIDEO_DATA_CHUNK_SIZE;
+ memcpy(pkt->data + current_sector*VIDEO_DATA_CHUNK_SIZE,
+ sector + VIDEO_DATA_HEADER_SIZE, bytes_to_copy);
+ }
+ if (current_sector == sector_count-1) {
+ *ret_pkt = *pkt;
+ return 0;
+ }
+
+ }
+ break;
+
+ case CDXA_TYPE_AUDIO:
+#ifdef PRINTSTUFF
+printf (" dropping audio sector\n");
+#endif
+#if 1
+ /* check if this the video channel we care about */
+ if (channel == str->audio_channel) {
+ pkt = ret_pkt;
+ if (av_new_packet(pkt, 2304))
+ return AVERROR_IO;
+ memcpy(pkt->data,sector+24,2304);
+
+ pkt->stream_index =
+ str->channels[channel].audio_stream_index;
+ //pkt->pts = str->pts;
+ return 0;
+ }
+#endif
+ break;
+ default:
+ /* drop the sector and move on */
+#ifdef PRINTSTUFF
+printf (" dropping other sector\n");
+#endif
+ break;
+ }
+
+ if (url_feof(pb))
+ return AVERROR_IO;
+ }
+
+ return ret;
+}
+
+static int str_read_close(AVFormatContext *s)
+{
+ StrDemuxContext *str = (StrDemuxContext *)s->priv_data;
+
+ av_free(str->video_chunk);
+
+ return 0;
+}
+
+AVInputFormat str_demuxer = {
+ "psxstr",
+ "Sony Playstation STR format",
+ sizeof(StrDemuxContext),
+ str_probe,
+ str_read_header,
+ str_read_packet,
+ str_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/qtpalette.h b/contrib/ffmpeg/libavformat/qtpalette.h
new file mode 100644
index 000000000..ef4ccfa91
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/qtpalette.h
@@ -0,0 +1,295 @@
+/*
+ * Default Palettes for Quicktime Files
+ * Automatically generated from a utility derived from XAnim:
+ * http://xanim.va.pubnix.com/home.html
+ */
+
+#ifndef QTPALETTE_H
+#define QTPALETTE_H
+
+unsigned char ff_qt_default_palette_4[4 * 4] = {
+ 0x93, 0x65, 0x5E, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00,
+ 0xDF, 0xD0, 0xAB, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+unsigned char ff_qt_default_palette_16[16 * 4] = {
+ 0xFF, 0xFB, 0xFF, 0x00,
+ 0xEF, 0xD9, 0xBB, 0x00,
+ 0xE8, 0xC9, 0xB1, 0x00,
+ 0x93, 0x65, 0x5E, 0x00,
+ 0xFC, 0xDE, 0xE8, 0x00,
+ 0x9D, 0x88, 0x91, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00,
+ 0xFF, 0xFF, 0xFF, 0x00,
+ 0x47, 0x48, 0x37, 0x00,
+ 0x7A, 0x5E, 0x55, 0x00,
+ 0xDF, 0xD0, 0xAB, 0x00,
+ 0xFF, 0xFB, 0xF9, 0x00,
+ 0xE8, 0xCA, 0xC5, 0x00,
+ 0x8A, 0x7C, 0x77, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+unsigned char ff_qt_default_palette_256[256 * 4] = {
+ /* 0, 0x00 */ 0xFF, 0xFF, 0xFF, 0x00,
+ /* 1, 0x01 */ 0xFF, 0xFF, 0xCC, 0x00,
+ /* 2, 0x02 */ 0xFF, 0xFF, 0x99, 0x00,
+ /* 3, 0x03 */ 0xFF, 0xFF, 0x66, 0x00,
+ /* 4, 0x04 */ 0xFF, 0xFF, 0x33, 0x00,
+ /* 5, 0x05 */ 0xFF, 0xFF, 0x00, 0x00,
+ /* 6, 0x06 */ 0xFF, 0xCC, 0xFF, 0x00,
+ /* 7, 0x07 */ 0xFF, 0xCC, 0xCC, 0x00,
+ /* 8, 0x08 */ 0xFF, 0xCC, 0x99, 0x00,
+ /* 9, 0x09 */ 0xFF, 0xCC, 0x66, 0x00,
+ /* 10, 0x0A */ 0xFF, 0xCC, 0x33, 0x00,
+ /* 11, 0x0B */ 0xFF, 0xCC, 0x00, 0x00,
+ /* 12, 0x0C */ 0xFF, 0x99, 0xFF, 0x00,
+ /* 13, 0x0D */ 0xFF, 0x99, 0xCC, 0x00,
+ /* 14, 0x0E */ 0xFF, 0x99, 0x99, 0x00,
+ /* 15, 0x0F */ 0xFF, 0x99, 0x66, 0x00,
+ /* 16, 0x10 */ 0xFF, 0x99, 0x33, 0x00,
+ /* 17, 0x11 */ 0xFF, 0x99, 0x00, 0x00,
+ /* 18, 0x12 */ 0xFF, 0x66, 0xFF, 0x00,
+ /* 19, 0x13 */ 0xFF, 0x66, 0xCC, 0x00,
+ /* 20, 0x14 */ 0xFF, 0x66, 0x99, 0x00,
+ /* 21, 0x15 */ 0xFF, 0x66, 0x66, 0x00,
+ /* 22, 0x16 */ 0xFF, 0x66, 0x33, 0x00,
+ /* 23, 0x17 */ 0xFF, 0x66, 0x00, 0x00,
+ /* 24, 0x18 */ 0xFF, 0x33, 0xFF, 0x00,
+ /* 25, 0x19 */ 0xFF, 0x33, 0xCC, 0x00,
+ /* 26, 0x1A */ 0xFF, 0x33, 0x99, 0x00,
+ /* 27, 0x1B */ 0xFF, 0x33, 0x66, 0x00,
+ /* 28, 0x1C */ 0xFF, 0x33, 0x33, 0x00,
+ /* 29, 0x1D */ 0xFF, 0x33, 0x00, 0x00,
+ /* 30, 0x1E */ 0xFF, 0x00, 0xFF, 0x00,
+ /* 31, 0x1F */ 0xFF, 0x00, 0xCC, 0x00,
+ /* 32, 0x20 */ 0xFF, 0x00, 0x99, 0x00,
+ /* 33, 0x21 */ 0xFF, 0x00, 0x66, 0x00,
+ /* 34, 0x22 */ 0xFF, 0x00, 0x33, 0x00,
+ /* 35, 0x23 */ 0xFF, 0x00, 0x00, 0x00,
+ /* 36, 0x24 */ 0xCC, 0xFF, 0xFF, 0x00,
+ /* 37, 0x25 */ 0xCC, 0xFF, 0xCC, 0x00,
+ /* 38, 0x26 */ 0xCC, 0xFF, 0x99, 0x00,
+ /* 39, 0x27 */ 0xCC, 0xFF, 0x66, 0x00,
+ /* 40, 0x28 */ 0xCC, 0xFF, 0x33, 0x00,
+ /* 41, 0x29 */ 0xCC, 0xFF, 0x00, 0x00,
+ /* 42, 0x2A */ 0xCC, 0xCC, 0xFF, 0x00,
+ /* 43, 0x2B */ 0xCC, 0xCC, 0xCC, 0x00,
+ /* 44, 0x2C */ 0xCC, 0xCC, 0x99, 0x00,
+ /* 45, 0x2D */ 0xCC, 0xCC, 0x66, 0x00,
+ /* 46, 0x2E */ 0xCC, 0xCC, 0x33, 0x00,
+ /* 47, 0x2F */ 0xCC, 0xCC, 0x00, 0x00,
+ /* 48, 0x30 */ 0xCC, 0x99, 0xFF, 0x00,
+ /* 49, 0x31 */ 0xCC, 0x99, 0xCC, 0x00,
+ /* 50, 0x32 */ 0xCC, 0x99, 0x99, 0x00,
+ /* 51, 0x33 */ 0xCC, 0x99, 0x66, 0x00,
+ /* 52, 0x34 */ 0xCC, 0x99, 0x33, 0x00,
+ /* 53, 0x35 */ 0xCC, 0x99, 0x00, 0x00,
+ /* 54, 0x36 */ 0xCC, 0x66, 0xFF, 0x00,
+ /* 55, 0x37 */ 0xCC, 0x66, 0xCC, 0x00,
+ /* 56, 0x38 */ 0xCC, 0x66, 0x99, 0x00,
+ /* 57, 0x39 */ 0xCC, 0x66, 0x66, 0x00,
+ /* 58, 0x3A */ 0xCC, 0x66, 0x33, 0x00,
+ /* 59, 0x3B */ 0xCC, 0x66, 0x00, 0x00,
+ /* 60, 0x3C */ 0xCC, 0x33, 0xFF, 0x00,
+ /* 61, 0x3D */ 0xCC, 0x33, 0xCC, 0x00,
+ /* 62, 0x3E */ 0xCC, 0x33, 0x99, 0x00,
+ /* 63, 0x3F */ 0xCC, 0x33, 0x66, 0x00,
+ /* 64, 0x40 */ 0xCC, 0x33, 0x33, 0x00,
+ /* 65, 0x41 */ 0xCC, 0x33, 0x00, 0x00,
+ /* 66, 0x42 */ 0xCC, 0x00, 0xFF, 0x00,
+ /* 67, 0x43 */ 0xCC, 0x00, 0xCC, 0x00,
+ /* 68, 0x44 */ 0xCC, 0x00, 0x99, 0x00,
+ /* 69, 0x45 */ 0xCC, 0x00, 0x66, 0x00,
+ /* 70, 0x46 */ 0xCC, 0x00, 0x33, 0x00,
+ /* 71, 0x47 */ 0xCC, 0x00, 0x00, 0x00,
+ /* 72, 0x48 */ 0x99, 0xFF, 0xFF, 0x00,
+ /* 73, 0x49 */ 0x99, 0xFF, 0xCC, 0x00,
+ /* 74, 0x4A */ 0x99, 0xFF, 0x99, 0x00,
+ /* 75, 0x4B */ 0x99, 0xFF, 0x66, 0x00,
+ /* 76, 0x4C */ 0x99, 0xFF, 0x33, 0x00,
+ /* 77, 0x4D */ 0x99, 0xFF, 0x00, 0x00,
+ /* 78, 0x4E */ 0x99, 0xCC, 0xFF, 0x00,
+ /* 79, 0x4F */ 0x99, 0xCC, 0xCC, 0x00,
+ /* 80, 0x50 */ 0x99, 0xCC, 0x99, 0x00,
+ /* 81, 0x51 */ 0x99, 0xCC, 0x66, 0x00,
+ /* 82, 0x52 */ 0x99, 0xCC, 0x33, 0x00,
+ /* 83, 0x53 */ 0x99, 0xCC, 0x00, 0x00,
+ /* 84, 0x54 */ 0x99, 0x99, 0xFF, 0x00,
+ /* 85, 0x55 */ 0x99, 0x99, 0xCC, 0x00,
+ /* 86, 0x56 */ 0x99, 0x99, 0x99, 0x00,
+ /* 87, 0x57 */ 0x99, 0x99, 0x66, 0x00,
+ /* 88, 0x58 */ 0x99, 0x99, 0x33, 0x00,
+ /* 89, 0x59 */ 0x99, 0x99, 0x00, 0x00,
+ /* 90, 0x5A */ 0x99, 0x66, 0xFF, 0x00,
+ /* 91, 0x5B */ 0x99, 0x66, 0xCC, 0x00,
+ /* 92, 0x5C */ 0x99, 0x66, 0x99, 0x00,
+ /* 93, 0x5D */ 0x99, 0x66, 0x66, 0x00,
+ /* 94, 0x5E */ 0x99, 0x66, 0x33, 0x00,
+ /* 95, 0x5F */ 0x99, 0x66, 0x00, 0x00,
+ /* 96, 0x60 */ 0x99, 0x33, 0xFF, 0x00,
+ /* 97, 0x61 */ 0x99, 0x33, 0xCC, 0x00,
+ /* 98, 0x62 */ 0x99, 0x33, 0x99, 0x00,
+ /* 99, 0x63 */ 0x99, 0x33, 0x66, 0x00,
+ /* 100, 0x64 */ 0x99, 0x33, 0x33, 0x00,
+ /* 101, 0x65 */ 0x99, 0x33, 0x00, 0x00,
+ /* 102, 0x66 */ 0x99, 0x00, 0xFF, 0x00,
+ /* 103, 0x67 */ 0x99, 0x00, 0xCC, 0x00,
+ /* 104, 0x68 */ 0x99, 0x00, 0x99, 0x00,
+ /* 105, 0x69 */ 0x99, 0x00, 0x66, 0x00,
+ /* 106, 0x6A */ 0x99, 0x00, 0x33, 0x00,
+ /* 107, 0x6B */ 0x99, 0x00, 0x00, 0x00,
+ /* 108, 0x6C */ 0x66, 0xFF, 0xFF, 0x00,
+ /* 109, 0x6D */ 0x66, 0xFF, 0xCC, 0x00,
+ /* 110, 0x6E */ 0x66, 0xFF, 0x99, 0x00,
+ /* 111, 0x6F */ 0x66, 0xFF, 0x66, 0x00,
+ /* 112, 0x70 */ 0x66, 0xFF, 0x33, 0x00,
+ /* 113, 0x71 */ 0x66, 0xFF, 0x00, 0x00,
+ /* 114, 0x72 */ 0x66, 0xCC, 0xFF, 0x00,
+ /* 115, 0x73 */ 0x66, 0xCC, 0xCC, 0x00,
+ /* 116, 0x74 */ 0x66, 0xCC, 0x99, 0x00,
+ /* 117, 0x75 */ 0x66, 0xCC, 0x66, 0x00,
+ /* 118, 0x76 */ 0x66, 0xCC, 0x33, 0x00,
+ /* 119, 0x77 */ 0x66, 0xCC, 0x00, 0x00,
+ /* 120, 0x78 */ 0x66, 0x99, 0xFF, 0x00,
+ /* 121, 0x79 */ 0x66, 0x99, 0xCC, 0x00,
+ /* 122, 0x7A */ 0x66, 0x99, 0x99, 0x00,
+ /* 123, 0x7B */ 0x66, 0x99, 0x66, 0x00,
+ /* 124, 0x7C */ 0x66, 0x99, 0x33, 0x00,
+ /* 125, 0x7D */ 0x66, 0x99, 0x00, 0x00,
+ /* 126, 0x7E */ 0x66, 0x66, 0xFF, 0x00,
+ /* 127, 0x7F */ 0x66, 0x66, 0xCC, 0x00,
+ /* 128, 0x80 */ 0x66, 0x66, 0x99, 0x00,
+ /* 129, 0x81 */ 0x66, 0x66, 0x66, 0x00,
+ /* 130, 0x82 */ 0x66, 0x66, 0x33, 0x00,
+ /* 131, 0x83 */ 0x66, 0x66, 0x00, 0x00,
+ /* 132, 0x84 */ 0x66, 0x33, 0xFF, 0x00,
+ /* 133, 0x85 */ 0x66, 0x33, 0xCC, 0x00,
+ /* 134, 0x86 */ 0x66, 0x33, 0x99, 0x00,
+ /* 135, 0x87 */ 0x66, 0x33, 0x66, 0x00,
+ /* 136, 0x88 */ 0x66, 0x33, 0x33, 0x00,
+ /* 137, 0x89 */ 0x66, 0x33, 0x00, 0x00,
+ /* 138, 0x8A */ 0x66, 0x00, 0xFF, 0x00,
+ /* 139, 0x8B */ 0x66, 0x00, 0xCC, 0x00,
+ /* 140, 0x8C */ 0x66, 0x00, 0x99, 0x00,
+ /* 141, 0x8D */ 0x66, 0x00, 0x66, 0x00,
+ /* 142, 0x8E */ 0x66, 0x00, 0x33, 0x00,
+ /* 143, 0x8F */ 0x66, 0x00, 0x00, 0x00,
+ /* 144, 0x90 */ 0x33, 0xFF, 0xFF, 0x00,
+ /* 145, 0x91 */ 0x33, 0xFF, 0xCC, 0x00,
+ /* 146, 0x92 */ 0x33, 0xFF, 0x99, 0x00,
+ /* 147, 0x93 */ 0x33, 0xFF, 0x66, 0x00,
+ /* 148, 0x94 */ 0x33, 0xFF, 0x33, 0x00,
+ /* 149, 0x95 */ 0x33, 0xFF, 0x00, 0x00,
+ /* 150, 0x96 */ 0x33, 0xCC, 0xFF, 0x00,
+ /* 151, 0x97 */ 0x33, 0xCC, 0xCC, 0x00,
+ /* 152, 0x98 */ 0x33, 0xCC, 0x99, 0x00,
+ /* 153, 0x99 */ 0x33, 0xCC, 0x66, 0x00,
+ /* 154, 0x9A */ 0x33, 0xCC, 0x33, 0x00,
+ /* 155, 0x9B */ 0x33, 0xCC, 0x00, 0x00,
+ /* 156, 0x9C */ 0x33, 0x99, 0xFF, 0x00,
+ /* 157, 0x9D */ 0x33, 0x99, 0xCC, 0x00,
+ /* 158, 0x9E */ 0x33, 0x99, 0x99, 0x00,
+ /* 159, 0x9F */ 0x33, 0x99, 0x66, 0x00,
+ /* 160, 0xA0 */ 0x33, 0x99, 0x33, 0x00,
+ /* 161, 0xA1 */ 0x33, 0x99, 0x00, 0x00,
+ /* 162, 0xA2 */ 0x33, 0x66, 0xFF, 0x00,
+ /* 163, 0xA3 */ 0x33, 0x66, 0xCC, 0x00,
+ /* 164, 0xA4 */ 0x33, 0x66, 0x99, 0x00,
+ /* 165, 0xA5 */ 0x33, 0x66, 0x66, 0x00,
+ /* 166, 0xA6 */ 0x33, 0x66, 0x33, 0x00,
+ /* 167, 0xA7 */ 0x33, 0x66, 0x00, 0x00,
+ /* 168, 0xA8 */ 0x33, 0x33, 0xFF, 0x00,
+ /* 169, 0xA9 */ 0x33, 0x33, 0xCC, 0x00,
+ /* 170, 0xAA */ 0x33, 0x33, 0x99, 0x00,
+ /* 171, 0xAB */ 0x33, 0x33, 0x66, 0x00,
+ /* 172, 0xAC */ 0x33, 0x33, 0x33, 0x00,
+ /* 173, 0xAD */ 0x33, 0x33, 0x00, 0x00,
+ /* 174, 0xAE */ 0x33, 0x00, 0xFF, 0x00,
+ /* 175, 0xAF */ 0x33, 0x00, 0xCC, 0x00,
+ /* 176, 0xB0 */ 0x33, 0x00, 0x99, 0x00,
+ /* 177, 0xB1 */ 0x33, 0x00, 0x66, 0x00,
+ /* 178, 0xB2 */ 0x33, 0x00, 0x33, 0x00,
+ /* 179, 0xB3 */ 0x33, 0x00, 0x00, 0x00,
+ /* 180, 0xB4 */ 0x00, 0xFF, 0xFF, 0x00,
+ /* 181, 0xB5 */ 0x00, 0xFF, 0xCC, 0x00,
+ /* 182, 0xB6 */ 0x00, 0xFF, 0x99, 0x00,
+ /* 183, 0xB7 */ 0x00, 0xFF, 0x66, 0x00,
+ /* 184, 0xB8 */ 0x00, 0xFF, 0x33, 0x00,
+ /* 185, 0xB9 */ 0x00, 0xFF, 0x00, 0x00,
+ /* 186, 0xBA */ 0x00, 0xCC, 0xFF, 0x00,
+ /* 187, 0xBB */ 0x00, 0xCC, 0xCC, 0x00,
+ /* 188, 0xBC */ 0x00, 0xCC, 0x99, 0x00,
+ /* 189, 0xBD */ 0x00, 0xCC, 0x66, 0x00,
+ /* 190, 0xBE */ 0x00, 0xCC, 0x33, 0x00,
+ /* 191, 0xBF */ 0x00, 0xCC, 0x00, 0x00,
+ /* 192, 0xC0 */ 0x00, 0x99, 0xFF, 0x00,
+ /* 193, 0xC1 */ 0x00, 0x99, 0xCC, 0x00,
+ /* 194, 0xC2 */ 0x00, 0x99, 0x99, 0x00,
+ /* 195, 0xC3 */ 0x00, 0x99, 0x66, 0x00,
+ /* 196, 0xC4 */ 0x00, 0x99, 0x33, 0x00,
+ /* 197, 0xC5 */ 0x00, 0x99, 0x00, 0x00,
+ /* 198, 0xC6 */ 0x00, 0x66, 0xFF, 0x00,
+ /* 199, 0xC7 */ 0x00, 0x66, 0xCC, 0x00,
+ /* 200, 0xC8 */ 0x00, 0x66, 0x99, 0x00,
+ /* 201, 0xC9 */ 0x00, 0x66, 0x66, 0x00,
+ /* 202, 0xCA */ 0x00, 0x66, 0x33, 0x00,
+ /* 203, 0xCB */ 0x00, 0x66, 0x00, 0x00,
+ /* 204, 0xCC */ 0x00, 0x33, 0xFF, 0x00,
+ /* 205, 0xCD */ 0x00, 0x33, 0xCC, 0x00,
+ /* 206, 0xCE */ 0x00, 0x33, 0x99, 0x00,
+ /* 207, 0xCF */ 0x00, 0x33, 0x66, 0x00,
+ /* 208, 0xD0 */ 0x00, 0x33, 0x33, 0x00,
+ /* 209, 0xD1 */ 0x00, 0x33, 0x00, 0x00,
+ /* 210, 0xD2 */ 0x00, 0x00, 0xFF, 0x00,
+ /* 211, 0xD3 */ 0x00, 0x00, 0xCC, 0x00,
+ /* 212, 0xD4 */ 0x00, 0x00, 0x99, 0x00,
+ /* 213, 0xD5 */ 0x00, 0x00, 0x66, 0x00,
+ /* 214, 0xD6 */ 0x00, 0x00, 0x33, 0x00,
+ /* 215, 0xD7 */ 0xEE, 0x00, 0x00, 0x00,
+ /* 216, 0xD8 */ 0xDD, 0x00, 0x00, 0x00,
+ /* 217, 0xD9 */ 0xBB, 0x00, 0x00, 0x00,
+ /* 218, 0xDA */ 0xAA, 0x00, 0x00, 0x00,
+ /* 219, 0xDB */ 0x88, 0x00, 0x00, 0x00,
+ /* 220, 0xDC */ 0x77, 0x00, 0x00, 0x00,
+ /* 221, 0xDD */ 0x55, 0x00, 0x00, 0x00,
+ /* 222, 0xDE */ 0x44, 0x00, 0x00, 0x00,
+ /* 223, 0xDF */ 0x22, 0x00, 0x00, 0x00,
+ /* 224, 0xE0 */ 0x11, 0x00, 0x00, 0x00,
+ /* 225, 0xE1 */ 0x00, 0xEE, 0x00, 0x00,
+ /* 226, 0xE2 */ 0x00, 0xDD, 0x00, 0x00,
+ /* 227, 0xE3 */ 0x00, 0xBB, 0x00, 0x00,
+ /* 228, 0xE4 */ 0x00, 0xAA, 0x00, 0x00,
+ /* 229, 0xE5 */ 0x00, 0x88, 0x00, 0x00,
+ /* 230, 0xE6 */ 0x00, 0x77, 0x00, 0x00,
+ /* 231, 0xE7 */ 0x00, 0x55, 0x00, 0x00,
+ /* 232, 0xE8 */ 0x00, 0x44, 0x00, 0x00,
+ /* 233, 0xE9 */ 0x00, 0x22, 0x00, 0x00,
+ /* 234, 0xEA */ 0x00, 0x11, 0x00, 0x00,
+ /* 235, 0xEB */ 0x00, 0x00, 0xEE, 0x00,
+ /* 236, 0xEC */ 0x00, 0x00, 0xDD, 0x00,
+ /* 237, 0xED */ 0x00, 0x00, 0xBB, 0x00,
+ /* 238, 0xEE */ 0x00, 0x00, 0xAA, 0x00,
+ /* 239, 0xEF */ 0x00, 0x00, 0x88, 0x00,
+ /* 240, 0xF0 */ 0x00, 0x00, 0x77, 0x00,
+ /* 241, 0xF1 */ 0x00, 0x00, 0x55, 0x00,
+ /* 242, 0xF2 */ 0x00, 0x00, 0x44, 0x00,
+ /* 243, 0xF3 */ 0x00, 0x00, 0x22, 0x00,
+ /* 244, 0xF4 */ 0x00, 0x00, 0x11, 0x00,
+ /* 245, 0xF5 */ 0xEE, 0xEE, 0xEE, 0x00,
+ /* 246, 0xF6 */ 0xDD, 0xDD, 0xDD, 0x00,
+ /* 247, 0xF7 */ 0xBB, 0xBB, 0xBB, 0x00,
+ /* 248, 0xF8 */ 0xAA, 0xAA, 0xAA, 0x00,
+ /* 249, 0xF9 */ 0x88, 0x88, 0x88, 0x00,
+ /* 250, 0xFA */ 0x77, 0x77, 0x77, 0x00,
+ /* 251, 0xFB */ 0x55, 0x55, 0x55, 0x00,
+ /* 252, 0xFC */ 0x44, 0x44, 0x44, 0x00,
+ /* 253, 0xFD */ 0x22, 0x22, 0x22, 0x00,
+ /* 254, 0xFE */ 0x11, 0x11, 0x11, 0x00,
+ /* 255, 0xFF */ 0x00, 0x00, 0x00, 0x00
+};
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/raw.c b/contrib/ffmpeg/libavformat/raw.c
new file mode 100644
index 000000000..e1ccbcd6d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/raw.c
@@ -0,0 +1,843 @@
+/*
+ * RAW muxer and demuxer
+ * Copyright (c) 2001 Fabrice Bellard.
+ * Copyright (c) 2005 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#ifdef CONFIG_MUXERS
+/* simple formats */
+static int raw_write_header(struct AVFormatContext *s)
+{
+ return 0;
+}
+
+static int flac_write_header(struct AVFormatContext *s)
+{
+ static const uint8_t header[8] = {
+ 0x66, 0x4C, 0x61, 0x43, 0x80, 0x00, 0x00, 0x22
+ };
+ uint8_t *streaminfo = s->streams[0]->codec->extradata;
+ int len = s->streams[0]->codec->extradata_size;
+ if(streaminfo != NULL && len > 0) {
+ put_buffer(&s->pb, header, 8);
+ put_buffer(&s->pb, streaminfo, len);
+ }
+ return 0;
+}
+
+static int raw_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ put_buffer(&s->pb, pkt->data, pkt->size);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int raw_write_trailer(struct AVFormatContext *s)
+{
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/* raw input */
+static int raw_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ AVStream *st;
+ int id;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ id = s->iformat->value;
+ if (id == CODEC_ID_RAWVIDEO) {
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ } else {
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ }
+ st->codec->codec_id = id;
+
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ st->codec->sample_rate = ap->sample_rate;
+ st->codec->channels = ap->channels;
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+ break;
+ case CODEC_TYPE_VIDEO:
+ av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den);
+ st->codec->width = ap->width;
+ st->codec->height = ap->height;
+ st->codec->pix_fmt = ap->pix_fmt;
+ if(st->codec->pix_fmt == PIX_FMT_NONE)
+ st->codec->pix_fmt= PIX_FMT_YUV420P;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+#define RAW_PACKET_SIZE 1024
+
+static int raw_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, size;
+ // AVStream *st = s->streams[0];
+
+ size= RAW_PACKET_SIZE;
+
+ ret= av_get_packet(&s->pb, pkt, size);
+
+ pkt->stream_index = 0;
+ if (ret <= 0) {
+ return AVERROR_IO;
+ }
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return ret;
+}
+
+static int raw_read_partial_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, size;
+
+ size = RAW_PACKET_SIZE;
+
+ if (av_new_packet(pkt, size) < 0)
+ return AVERROR_IO;
+
+ pkt->pos= url_ftell(&s->pb);
+ pkt->stream_index = 0;
+ ret = get_partial_buffer(&s->pb, pkt->data, size);
+ if (ret <= 0) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ pkt->size = ret;
+ return ret;
+}
+
+// http://www.artificis.hu/files/texts/ingenient.txt
+static int ingenient_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret, size, w, h, unk1, unk2;
+
+ if (get_le32(&s->pb) != MKTAG('M', 'J', 'P', 'G'))
+ return AVERROR_IO; // FIXME
+
+ size = get_le32(&s->pb);
+
+ w = get_le16(&s->pb);
+ h = get_le16(&s->pb);
+
+ url_fskip(&s->pb, 8); // zero + size (padded?)
+ url_fskip(&s->pb, 2);
+ unk1 = get_le16(&s->pb);
+ unk2 = get_le16(&s->pb);
+ url_fskip(&s->pb, 22); // ascii timestamp
+
+ av_log(NULL, AV_LOG_DEBUG, "Ingenient packet: size=%d, width=%d, height=%d, unk1=%d unk2=%d\n",
+ size, w, h, unk1, unk2);
+
+ if (av_new_packet(pkt, size) < 0)
+ return AVERROR_IO;
+
+ pkt->pos = url_ftell(&s->pb);
+ pkt->stream_index = 0;
+ ret = get_buffer(&s->pb, pkt->data, size);
+ if (ret <= 0) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ pkt->size = ret;
+ return ret;
+}
+
+static int raw_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+int pcm_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ AVStream *st;
+ int block_align, byte_rate;
+ int64_t pos;
+
+ st = s->streams[0];
+
+ block_align = st->codec->block_align ? st->codec->block_align :
+ (av_get_bits_per_sample(st->codec->codec_id) * st->codec->channels) >> 3;
+ byte_rate = st->codec->bit_rate ? st->codec->bit_rate >> 3 :
+ block_align * st->codec->sample_rate;
+
+ if (block_align <= 0 || byte_rate <= 0)
+ return -1;
+
+ /* compute the position by aligning it to block_align */
+ pos = av_rescale_rnd(timestamp * byte_rate,
+ st->time_base.num,
+ st->time_base.den * (int64_t)block_align,
+ (flags & AVSEEK_FLAG_BACKWARD) ? AV_ROUND_DOWN : AV_ROUND_UP);
+ pos *= block_align;
+
+ /* recompute exact position */
+ st->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num);
+ url_fseek(&s->pb, pos + s->data_offset, SEEK_SET);
+ return 0;
+}
+
+/* ac3 read */
+static int ac3_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_AC3;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+static int shorten_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_SHORTEN;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+/* flac read */
+static int flac_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_FLAC;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+/* dts read */
+static int dts_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_DTS;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+/* aac read */
+static int aac_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_AAC;
+ st->need_parsing = 1;
+ /* the parameters will be extracted from the compressed bitstream */
+ return 0;
+}
+
+/* mpeg1/h263 input */
+static int video_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ AVStream *st;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = s->iformat->value;
+ st->need_parsing = 1;
+
+ /* for mjpeg, specify frame rate */
+ /* for mpeg4 specify it too (most mpeg4 streams dont have the fixed_vop_rate set ...)*/
+ if (ap->time_base.num) {
+ av_set_pts_info(st, 64, ap->time_base.num, ap->time_base.den);
+ } else if ( st->codec->codec_id == CODEC_ID_MJPEG ||
+ st->codec->codec_id == CODEC_ID_MPEG4 ||
+ st->codec->codec_id == CODEC_ID_H264) {
+ av_set_pts_info(st, 64, 1, 25);
+ }
+
+ return 0;
+}
+
+#define SEQ_START_CODE 0x000001b3
+#define GOP_START_CODE 0x000001b8
+#define PICTURE_START_CODE 0x00000100
+#define SLICE_START_CODE 0x00000101
+#define PACK_START_CODE 0x000001ba
+#define VIDEO_ID 0x000001e0
+#define AUDIO_ID 0x000001c0
+
+static int mpegvideo_probe(AVProbeData *p)
+{
+ uint32_t code= -1;
+ int pic=0, seq=0, slice=0, pspack=0, pes=0;
+ int i;
+
+ for(i=0; i<p->buf_size; i++){
+ code = (code<<8) + p->buf[i];
+ if ((code & 0xffffff00) == 0x100) {
+ switch(code){
+ case SEQ_START_CODE: seq++; break;
+ case PICTURE_START_CODE: pic++; break;
+ case SLICE_START_CODE: slice++; break;
+ case PACK_START_CODE: pspack++; break;
+ case VIDEO_ID:
+ case AUDIO_ID: pes++; break;
+ }
+ }
+ }
+ if(seq && seq*9<=pic*10 && pic*9<=slice*10 && !pspack && !pes)
+ return AVPROBE_SCORE_MAX/2+1; // +1 for .mpg
+ return 0;
+}
+
+#define VIDEO_OBJECT_START_CODE 0x00000100
+#define VIDEO_OBJECT_LAYER_START_CODE 0x00000120
+#define VISUAL_OBJECT_START_CODE 0x000001b5
+#define VOP_START_CODE 0x000001b6
+
+static int mpeg4video_probe(AVProbeData *probe_packet)
+{
+ uint32_t temp_buffer= -1;
+ int VO=0, VOL=0, VOP = 0, VISO = 0;
+ int i;
+
+ for(i=0; i<probe_packet->buf_size; i++){
+ temp_buffer = (temp_buffer<<8) + probe_packet->buf[i];
+ if ((temp_buffer & 0xffffff00) == 0x100) {
+ switch(temp_buffer){
+ case VOP_START_CODE: VOP++; break;
+ case VISUAL_OBJECT_START_CODE: VISO++; break;
+ }
+ switch(temp_buffer & 0xfffffff0){
+ case VIDEO_OBJECT_START_CODE: VO++; break;
+ case VIDEO_OBJECT_LAYER_START_CODE: VOL++; break;
+ }
+ }
+ }
+
+ if ( VOP >= VISO && VOP >= VOL && VO >= VOL && VOL > 0)
+ return AVPROBE_SCORE_MAX/2;
+ return 0;
+}
+
+static int h263_probe(AVProbeData *p)
+{
+ int code;
+ const uint8_t *d;
+
+ if (p->buf_size < 6)
+ return 0;
+ d = p->buf;
+ code = (d[0] << 14) | (d[1] << 6) | (d[2] >> 2);
+ if (code == 0x20) {
+ return 50;
+ }
+ return 0;
+}
+
+static int h261_probe(AVProbeData *p)
+{
+ int code;
+ const uint8_t *d;
+
+ if (p->buf_size < 6)
+ return 0;
+ d = p->buf;
+ code = (d[0] << 12) | (d[1] << 4) | (d[2] >> 4);
+ if (code == 0x10) {
+ return 50;
+ }
+ return 0;
+}
+
+AVInputFormat shorten_demuxer = {
+ "shn",
+ "raw shorten",
+ 0,
+ NULL,
+ shorten_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "shn",
+};
+
+AVInputFormat flac_demuxer = {
+ "flac",
+ "raw flac",
+ 0,
+ NULL,
+ flac_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "flac",
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat flac_muxer = {
+ "flac",
+ "raw flac",
+ "audio/x-flac",
+ "flac",
+ 0,
+ CODEC_ID_FLAC,
+ 0,
+ flac_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat ac3_demuxer = {
+ "ac3",
+ "raw ac3",
+ 0,
+ NULL,
+ ac3_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "ac3",
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat ac3_muxer = {
+ "ac3",
+ "raw ac3",
+ "audio/x-ac3",
+ "ac3",
+ 0,
+ CODEC_ID_AC3,
+ 0,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat dts_demuxer = {
+ "dts",
+ "raw dts",
+ 0,
+ NULL,
+ dts_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "dts",
+};
+
+AVInputFormat aac_demuxer = {
+ "aac",
+ "ADTS AAC",
+ 0,
+ NULL,
+ aac_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "aac",
+};
+
+AVInputFormat h261_demuxer = {
+ "h261",
+ "raw h261",
+ 0,
+ h261_probe,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "h261",
+ .value = CODEC_ID_H261,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat h261_muxer = {
+ "h261",
+ "raw h261",
+ "video/x-h261",
+ "h261",
+ 0,
+ 0,
+ CODEC_ID_H261,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat h263_demuxer = {
+ "h263",
+ "raw h263",
+ 0,
+ h263_probe,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+// .extensions = "h263", //FIXME remove after writing mpeg4_probe
+ .value = CODEC_ID_H263,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat h263_muxer = {
+ "h263",
+ "raw h263",
+ "video/x-h263",
+ "h263",
+ 0,
+ 0,
+ CODEC_ID_H263,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat m4v_demuxer = {
+ "m4v",
+ "raw MPEG4 video format",
+ 0,
+ mpeg4video_probe, /** probing for mpeg4 data */
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "m4v", //FIXME remove after writing mpeg4_probe
+ .value = CODEC_ID_MPEG4,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat m4v_muxer = {
+ "m4v",
+ "raw MPEG4 video format",
+ NULL,
+ "m4v",
+ 0,
+ CODEC_ID_NONE,
+ CODEC_ID_MPEG4,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat h264_demuxer = {
+ "h264",
+ "raw H264 video format",
+ 0,
+ NULL /*mpegvideo_probe*/,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "h26l,h264,264", //FIXME remove after writing mpeg4_probe
+ .value = CODEC_ID_H264,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat h264_muxer = {
+ "h264",
+ "raw H264 video format",
+ NULL,
+ "h264",
+ 0,
+ CODEC_ID_NONE,
+ CODEC_ID_H264,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat mpegvideo_demuxer = {
+ "mpegvideo",
+ "MPEG video",
+ 0,
+ mpegvideo_probe,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .value = CODEC_ID_MPEG1VIDEO,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat mpeg1video_muxer = {
+ "mpeg1video",
+ "MPEG video",
+ "video/x-mpeg",
+ "mpg,mpeg,m1v",
+ 0,
+ 0,
+ CODEC_ID_MPEG1VIDEO,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat mpeg2video_muxer = {
+ "mpeg2video",
+ "MPEG2 video",
+ NULL,
+ "m2v",
+ 0,
+ 0,
+ CODEC_ID_MPEG2VIDEO,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+AVInputFormat mjpeg_demuxer = {
+ "mjpeg",
+ "MJPEG video",
+ 0,
+ NULL,
+ video_read_header,
+ raw_read_partial_packet,
+ raw_read_close,
+ .extensions = "mjpg,mjpeg",
+ .value = CODEC_ID_MJPEG,
+};
+
+AVInputFormat ingenient_demuxer = {
+ "ingenient",
+ "Ingenient MJPEG",
+ 0,
+ NULL,
+ video_read_header,
+ ingenient_read_packet,
+ raw_read_close,
+ .extensions = "cgi", // FIXME
+ .value = CODEC_ID_MJPEG,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat mjpeg_muxer = {
+ "mjpeg",
+ "MJPEG video",
+ "video/x-mjpeg",
+ "mjpg,mjpeg",
+ 0,
+ 0,
+ CODEC_ID_MJPEG,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+/* pcm formats */
+
+#define PCMINPUTDEF(name, long_name, ext, codec) \
+AVInputFormat pcm_ ## name ## _demuxer = {\
+ #name,\
+ long_name,\
+ 0,\
+ NULL,\
+ raw_read_header,\
+ raw_read_packet,\
+ raw_read_close,\
+ pcm_read_seek,\
+ .extensions = ext,\
+ .value = codec,\
+};
+
+#define PCMOUTPUTDEF(name, long_name, ext, codec) \
+AVOutputFormat pcm_ ## name ## _muxer = {\
+ #name,\
+ long_name,\
+ NULL,\
+ ext,\
+ 0,\
+ codec,\
+ 0,\
+ raw_write_header,\
+ raw_write_packet,\
+ raw_write_trailer,\
+ .flags= AVFMT_NOTIMESTAMPS,\
+};
+
+
+#if !defined(CONFIG_MUXERS) && defined(CONFIG_DEMUXERS)
+#define PCMDEF(name, long_name, ext, codec) \
+ PCMINPUTDEF(name, long_name, ext, codec)
+#elif defined(CONFIG_MUXERS) && !defined(CONFIG_DEMUXERS)
+#define PCMDEF(name, long_name, ext, codec) \
+ PCMOUTPUTDEF(name, long_name, ext, codec)
+#elif defined(CONFIG_MUXERS) && defined(CONFIG_DEMUXERS)
+#define PCMDEF(name, long_name, ext, codec) \
+ PCMINPUTDEF(name, long_name, ext, codec)\
+ PCMOUTPUTDEF(name, long_name, ext, codec)
+#else
+#define PCMDEF(name, long_name, ext, codec)
+#endif
+
+#ifdef WORDS_BIGENDIAN
+#define BE_DEF(s) s
+#define LE_DEF(s) NULL
+#else
+#define BE_DEF(s) NULL
+#define LE_DEF(s) s
+#endif
+
+
+PCMDEF(s16le, "pcm signed 16 bit little endian format",
+ LE_DEF("sw"), CODEC_ID_PCM_S16LE)
+
+PCMDEF(s16be, "pcm signed 16 bit big endian format",
+ BE_DEF("sw"), CODEC_ID_PCM_S16BE)
+
+PCMDEF(u16le, "pcm unsigned 16 bit little endian format",
+ LE_DEF("uw"), CODEC_ID_PCM_U16LE)
+
+PCMDEF(u16be, "pcm unsigned 16 bit big endian format",
+ BE_DEF("uw"), CODEC_ID_PCM_U16BE)
+
+PCMDEF(s8, "pcm signed 8 bit format",
+ "sb", CODEC_ID_PCM_S8)
+
+PCMDEF(u8, "pcm unsigned 8 bit format",
+ "ub", CODEC_ID_PCM_U8)
+
+PCMDEF(mulaw, "pcm mu law format",
+ "ul", CODEC_ID_PCM_MULAW)
+
+PCMDEF(alaw, "pcm A law format",
+ "al", CODEC_ID_PCM_ALAW)
+
+static int rawvideo_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int packet_size, ret, width, height;
+ AVStream *st = s->streams[0];
+
+ width = st->codec->width;
+ height = st->codec->height;
+
+ packet_size = avpicture_get_size(st->codec->pix_fmt, width, height);
+ if (packet_size < 0)
+ return -1;
+
+ ret= av_get_packet(&s->pb, pkt, packet_size);
+
+ pkt->stream_index = 0;
+ if (ret != packet_size) {
+ return AVERROR_IO;
+ } else {
+ return 0;
+ }
+}
+
+AVInputFormat rawvideo_demuxer = {
+ "rawvideo",
+ "raw video format",
+ 0,
+ NULL,
+ raw_read_header,
+ rawvideo_read_packet,
+ raw_read_close,
+ .extensions = "yuv,cif,qcif",
+ .value = CODEC_ID_RAWVIDEO,
+};
+
+#ifdef CONFIG_MUXERS
+AVOutputFormat rawvideo_muxer = {
+ "rawvideo",
+ "raw video format",
+ NULL,
+ "yuv",
+ 0,
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ raw_write_header,
+ raw_write_packet,
+ raw_write_trailer,
+ .flags= AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
+
+#ifdef CONFIG_MUXERS
+static int null_write_packet(struct AVFormatContext *s, AVPacket *pkt)
+{
+ return 0;
+}
+
+AVOutputFormat null_muxer = {
+ "null",
+ "null video format",
+ NULL,
+ NULL,
+ 0,
+#ifdef WORDS_BIGENDIAN
+ CODEC_ID_PCM_S16BE,
+#else
+ CODEC_ID_PCM_S16LE,
+#endif
+ CODEC_ID_RAWVIDEO,
+ raw_write_header,
+ null_write_packet,
+ raw_write_trailer,
+ .flags = AVFMT_NOFILE | AVFMT_RAWPICTURE | AVFMT_NOTIMESTAMPS,
+};
+#endif //CONFIG_MUXERS
diff --git a/contrib/ffmpeg/libavformat/riff.c b/contrib/ffmpeg/libavformat/riff.c
new file mode 100644
index 000000000..d315c66af
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/riff.c
@@ -0,0 +1,468 @@
+/*
+ * RIFF codec tags
+ * Copyright (c) 2000 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "avcodec.h"
+#include "riff.h"
+
+/* Note: when encoding, the first matching tag is used, so order is
+ important if multiple tags possible for a given codec. */
+const CodecTag codec_bmp_tags[] = {
+ { CODEC_ID_H264, MKTAG('H', '2', '6', '4') },
+ { CODEC_ID_H264, MKTAG('h', '2', '6', '4') },
+ { CODEC_ID_H264, MKTAG('X', '2', '6', '4') },
+ { CODEC_ID_H264, MKTAG('x', '2', '6', '4') },
+ { CODEC_ID_H264, MKTAG('a', 'v', 'c', '1') },
+ { CODEC_ID_H264, MKTAG('V', 'S', 'S', 'H') },
+
+ { CODEC_ID_H263, MKTAG('H', '2', '6', '3') },
+ { CODEC_ID_H263P, MKTAG('H', '2', '6', '3') },
+ { CODEC_ID_H263I, MKTAG('I', '2', '6', '3') }, /* intel h263 */
+ { CODEC_ID_H261, MKTAG('H', '2', '6', '1') },
+
+ /* added based on MPlayer */
+ { CODEC_ID_H263P, MKTAG('U', '2', '6', '3') },
+ { CODEC_ID_H263P, MKTAG('v', 'i', 'v', '1') },
+
+ { CODEC_ID_MPEG4, MKTAG('F', 'M', 'P', '4')},
+ { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', 'X'), .invalid_asf = 1 },
+ { CODEC_ID_MPEG4, MKTAG('D', 'X', '5', '0'), .invalid_asf = 1 },
+ { CODEC_ID_MPEG4, MKTAG('X', 'V', 'I', 'D'), .invalid_asf = 1 },
+ { CODEC_ID_MPEG4, MKTAG('M', 'P', '4', 'S') },
+ { CODEC_ID_MPEG4, MKTAG('M', '4', 'S', '2') },
+ { CODEC_ID_MPEG4, MKTAG(0x04, 0, 0, 0) }, /* some broken avi use this */
+
+ /* added based on MPlayer */
+ { CODEC_ID_MPEG4, MKTAG('D', 'I', 'V', '1') },
+ { CODEC_ID_MPEG4, MKTAG('B', 'L', 'Z', '0') },
+ { CODEC_ID_MPEG4, MKTAG('m', 'p', '4', 'v') },
+ { CODEC_ID_MPEG4, MKTAG('U', 'M', 'P', '4') },
+ { CODEC_ID_MPEG4, MKTAG('W', 'V', '1', 'F') },
+ { CODEC_ID_MPEG4, MKTAG('S', 'E', 'D', 'G') },
+
+ { CODEC_ID_MPEG4, MKTAG('R', 'M', 'P', '4') },
+
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '3'), .invalid_asf = 1 }, /* default signature when using MSMPEG4 */
+ { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', '4', '3') },
+
+ /* added based on MPlayer */
+ { CODEC_ID_MSMPEG4V3, MKTAG('M', 'P', 'G', '3') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '5') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '6') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('D', 'I', 'V', '4') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('A', 'P', '4', '1') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '1') },
+ { CODEC_ID_MSMPEG4V3, MKTAG('C', 'O', 'L', '0') },
+
+ { CODEC_ID_MSMPEG4V2, MKTAG('M', 'P', '4', '2') },
+
+ /* added based on MPlayer */
+ { CODEC_ID_MSMPEG4V2, MKTAG('D', 'I', 'V', '2') },
+
+ { CODEC_ID_MSMPEG4V1, MKTAG('M', 'P', 'G', '4') },
+
+ { CODEC_ID_WMV1, MKTAG('W', 'M', 'V', '1') },
+
+ /* added based on MPlayer */
+ { CODEC_ID_WMV2, MKTAG('W', 'M', 'V', '2') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'd') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 'h', 'd') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', 's', 'l') },
+ { CODEC_ID_DVVIDEO, MKTAG('d', 'v', '2', '5') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '1') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('m', 'p', 'g', '2') },
+ { CODEC_ID_MPEG2VIDEO, MKTAG('m', 'p', 'g', '2') },
+ { CODEC_ID_MPEG2VIDEO, MKTAG('M', 'P', 'E', 'G') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('P', 'I', 'M', '1') },
+ { CODEC_ID_MPEG1VIDEO, MKTAG('V', 'C', 'R', '2') },
+ { CODEC_ID_MPEG1VIDEO, 0x10000001 },
+ { CODEC_ID_MPEG2VIDEO, 0x10000002 },
+ { CODEC_ID_MPEG2VIDEO, MKTAG('D', 'V', 'R', ' ') },
+ { CODEC_ID_MJPEG, MKTAG('M', 'J', 'P', 'G') },
+ { CODEC_ID_MJPEG, MKTAG('L', 'J', 'P', 'G') },
+ { CODEC_ID_LJPEG, MKTAG('L', 'J', 'P', 'G') },
+ { CODEC_ID_MJPEG, MKTAG('J', 'P', 'G', 'L') }, /* Pegasus lossless JPEG */
+ { CODEC_ID_MJPEG, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - decoder */
+ { CODEC_ID_JPEGLS, MKTAG('M', 'J', 'L', 'S') }, /* JPEG-LS custom FOURCC for avi - encoder */
+ { CODEC_ID_HUFFYUV, MKTAG('H', 'F', 'Y', 'U') },
+ { CODEC_ID_FFVHUFF, MKTAG('F', 'F', 'V', 'H') },
+ { CODEC_ID_CYUV, MKTAG('C', 'Y', 'U', 'V') },
+ { CODEC_ID_RAWVIDEO, 0 },
+ { CODEC_ID_RAWVIDEO, MKTAG('I', '4', '2', '0') },
+ { CODEC_ID_RAWVIDEO, MKTAG('Y', 'U', 'Y', '2') },
+ { CODEC_ID_RAWVIDEO, MKTAG('Y', '4', '2', '2') },
+ { CODEC_ID_RAWVIDEO, MKTAG('Y', 'V', '1', '2') },
+ { CODEC_ID_RAWVIDEO, MKTAG('U', 'Y', 'V', 'Y') },
+ { CODEC_ID_RAWVIDEO, MKTAG('I', 'Y', 'U', 'V') },
+ { CODEC_ID_RAWVIDEO, MKTAG('Y', '8', '0', '0') },
+ { CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '1') },
+ { CODEC_ID_INDEO3, MKTAG('I', 'V', '3', '2') },
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '1') },
+ { CODEC_ID_VP3, MKTAG('V', 'P', '3', '0') },
+ { CODEC_ID_VP5, MKTAG('V', 'P', '5', '0') },
+ { CODEC_ID_VP6, MKTAG('V', 'P', '6', '2') },
+ { CODEC_ID_ASV1, MKTAG('A', 'S', 'V', '1') },
+ { CODEC_ID_ASV2, MKTAG('A', 'S', 'V', '2') },
+ { CODEC_ID_VCR1, MKTAG('V', 'C', 'R', '1') },
+ { CODEC_ID_FFV1, MKTAG('F', 'F', 'V', '1') },
+ { CODEC_ID_XAN_WC4, MKTAG('X', 'x', 'a', 'n') },
+ { CODEC_ID_MSRLE, MKTAG('m', 'r', 'l', 'e') },
+ { CODEC_ID_MSRLE, MKTAG(0x1, 0x0, 0x0, 0x0) },
+ { CODEC_ID_MSVIDEO1, MKTAG('M', 'S', 'V', 'C') },
+ { CODEC_ID_MSVIDEO1, MKTAG('m', 's', 'v', 'c') },
+ { CODEC_ID_MSVIDEO1, MKTAG('C', 'R', 'A', 'M') },
+ { CODEC_ID_MSVIDEO1, MKTAG('c', 'r', 'a', 'm') },
+ { CODEC_ID_MSVIDEO1, MKTAG('W', 'H', 'A', 'M') },
+ { CODEC_ID_MSVIDEO1, MKTAG('w', 'h', 'a', 'm') },
+ { CODEC_ID_CINEPAK, MKTAG('c', 'v', 'i', 'd') },
+ { CODEC_ID_TRUEMOTION1, MKTAG('D', 'U', 'C', 'K') },
+ { CODEC_ID_MSZH, MKTAG('M', 'S', 'Z', 'H') },
+ { CODEC_ID_ZLIB, MKTAG('Z', 'L', 'I', 'B') },
+ { CODEC_ID_SNOW, MKTAG('S', 'N', 'O', 'W') },
+ { CODEC_ID_4XM, MKTAG('4', 'X', 'M', 'V') },
+ { CODEC_ID_FLV1, MKTAG('F', 'L', 'V', '1') },
+ { CODEC_ID_FLASHSV, MKTAG('F', 'S', 'V', '1') },
+ { CODEC_ID_VP6F, MKTAG('V', 'P', '6', 'F') },
+ { CODEC_ID_SVQ1, MKTAG('s', 'v', 'q', '1') },
+ { CODEC_ID_TSCC, MKTAG('t', 's', 'c', 'c') },
+ { CODEC_ID_ULTI, MKTAG('U', 'L', 'T', 'I') },
+ { CODEC_ID_VIXL, MKTAG('V', 'I', 'X', 'L') },
+ { CODEC_ID_QPEG, MKTAG('Q', 'P', 'E', 'G') },
+ { CODEC_ID_QPEG, MKTAG('Q', '1', '.', '0') },
+ { CODEC_ID_QPEG, MKTAG('Q', '1', '.', '1') },
+ { CODEC_ID_WMV3, MKTAG('W', 'M', 'V', '3') },
+ { CODEC_ID_VC1, MKTAG('W', 'V', 'C', '1') },
+ { CODEC_ID_LOCO, MKTAG('L', 'O', 'C', 'O') },
+ { CODEC_ID_WNV1, MKTAG('W', 'N', 'V', '1') },
+ { CODEC_ID_AASC, MKTAG('A', 'A', 'S', 'C') },
+ { CODEC_ID_INDEO2, MKTAG('R', 'T', '2', '1') },
+ { CODEC_ID_FRAPS, MKTAG('F', 'P', 'S', '1') },
+ { CODEC_ID_THEORA, MKTAG('t', 'h', 'e', 'o') },
+ { CODEC_ID_TRUEMOTION2, MKTAG('T', 'M', '2', '0') },
+ { CODEC_ID_CSCD, MKTAG('C', 'S', 'C', 'D') },
+ { CODEC_ID_ZMBV, MKTAG('Z', 'M', 'B', 'V') },
+ { CODEC_ID_KMVC, MKTAG('K', 'M', 'V', 'C') },
+ { CODEC_ID_CAVS, MKTAG('C', 'A', 'V', 'S') },
+ { CODEC_ID_JPEG2000, MKTAG('M', 'J', '2', 'C') },
+ { CODEC_ID_VMNC, MKTAG('V', 'M', 'n', 'c') },
+ { CODEC_ID_NONE, 0 },
+};
+
+const CodecTag codec_wav_tags[] = {
+ { CODEC_ID_MP2, 0x50 },
+ { CODEC_ID_MP3, 0x55 },
+ { CODEC_ID_AC3, 0x2000 },
+ { CODEC_ID_DTS, 0x2001 },
+ { CODEC_ID_PCM_S16LE, 0x01 },
+ { CODEC_ID_PCM_U8, 0x01 }, /* must come after s16le in this list */
+ { CODEC_ID_PCM_S24LE, 0x01 },
+ { CODEC_ID_PCM_S32LE, 0x01 },
+ { CODEC_ID_PCM_ALAW, 0x06 },
+ { CODEC_ID_PCM_MULAW, 0x07 },
+ { CODEC_ID_ADPCM_MS, 0x02 },
+ { CODEC_ID_ADPCM_IMA_WAV, 0x11 },
+ { CODEC_ID_ADPCM_YAMAHA, 0x20 },
+ { CODEC_ID_ADPCM_G726, 0x45 },
+ { CODEC_ID_ADPCM_IMA_DK4, 0x61 }, /* rogue format number */
+ { CODEC_ID_ADPCM_IMA_DK3, 0x62 }, /* rogue format number */
+ { CODEC_ID_WMAV1, 0x160 },
+ { CODEC_ID_WMAV2, 0x161 },
+ { CODEC_ID_AAC, 0x706d },
+ { CODEC_ID_AAC, 0xff },
+ { CODEC_ID_VORBIS, ('V'<<8)+'o' }, //HACK/FIXME, does vorbis in WAV/AVI have an (in)official id?
+ { CODEC_ID_SONIC, 0x2048 },
+ { CODEC_ID_SONIC_LS, 0x2048 },
+ { CODEC_ID_ADPCM_CT, 0x200 },
+ { CODEC_ID_ADPCM_SWF, ('S'<<8)+'F' },
+ { CODEC_ID_TRUESPEECH, 0x22 },
+ { CODEC_ID_FLAC, 0xF1AC },
+ { CODEC_ID_IMC, 0x401 },
+
+ /* FIXME: All of the IDs below are not 16 bit and thus illegal. */
+ // for NuppelVideo (nuv.c)
+ { CODEC_ID_PCM_S16LE, MKTAG('R', 'A', 'W', 'A') },
+ { CODEC_ID_MP3, MKTAG('L', 'A', 'M', 'E') },
+ { CODEC_ID_MP3, MKTAG('M', 'P', '3', ' ') },
+ { 0, 0 },
+};
+
+unsigned int codec_get_tag(const CodecTag *tags, int id)
+{
+ while (tags->id != CODEC_ID_NONE) {
+ if (tags->id == id)
+ return tags->tag;
+ tags++;
+ }
+ return 0;
+}
+
+unsigned int codec_get_asf_tag(const CodecTag *tags, unsigned int id)
+{
+ while (tags->id != CODEC_ID_NONE) {
+ if (!tags->invalid_asf && tags->id == id)
+ return tags->tag;
+ tags++;
+ }
+ return 0;
+}
+
+enum CodecID codec_get_id(const CodecTag *tags, unsigned int tag)
+{
+ while (tags->id != CODEC_ID_NONE) {
+ if( toupper((tag >> 0)&0xFF) == toupper((tags->tag >> 0)&0xFF)
+ && toupper((tag >> 8)&0xFF) == toupper((tags->tag >> 8)&0xFF)
+ && toupper((tag >>16)&0xFF) == toupper((tags->tag >>16)&0xFF)
+ && toupper((tag >>24)&0xFF) == toupper((tags->tag >>24)&0xFF))
+ return tags->id;
+ tags++;
+ }
+ return CODEC_ID_NONE;
+}
+
+unsigned int codec_get_bmp_tag(int id)
+{
+ return codec_get_tag(codec_bmp_tags, id);
+}
+
+unsigned int codec_get_wav_tag(int id)
+{
+ return codec_get_tag(codec_wav_tags, id);
+}
+
+enum CodecID codec_get_bmp_id(unsigned int tag)
+{
+ return codec_get_id(codec_bmp_tags, tag);
+}
+
+enum CodecID codec_get_wav_id(unsigned int tag)
+{
+ return codec_get_id(codec_wav_tags, tag);
+}
+
+#ifdef CONFIG_MUXERS
+offset_t start_tag(ByteIOContext *pb, const char *tag)
+{
+ put_tag(pb, tag);
+ put_le32(pb, 0);
+ return url_ftell(pb);
+}
+
+void end_tag(ByteIOContext *pb, offset_t start)
+{
+ offset_t pos;
+
+ pos = url_ftell(pb);
+ url_fseek(pb, start - 4, SEEK_SET);
+ put_le32(pb, (uint32_t)(pos - start));
+ url_fseek(pb, pos, SEEK_SET);
+}
+
+/* WAVEFORMATEX header */
+/* returns the size or -1 on error */
+int put_wav_header(ByteIOContext *pb, AVCodecContext *enc)
+{
+ int bps, blkalign, bytespersec;
+ int hdrsize = 18;
+
+ if(!enc->codec_tag || enc->codec_tag > 0xffff)
+ enc->codec_tag = codec_get_tag(codec_wav_tags, enc->codec_id);
+ if(!enc->codec_tag || enc->codec_tag > 0xffff)
+ return -1;
+
+ put_le16(pb, enc->codec_tag);
+ put_le16(pb, enc->channels);
+ put_le32(pb, enc->sample_rate);
+ if (enc->codec_id == CODEC_ID_PCM_U8 ||
+ enc->codec_id == CODEC_ID_PCM_ALAW ||
+ enc->codec_id == CODEC_ID_PCM_MULAW) {
+ bps = 8;
+ } else if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) {
+ bps = 0;
+ } else if (enc->codec_id == CODEC_ID_ADPCM_IMA_WAV || enc->codec_id == CODEC_ID_ADPCM_MS || enc->codec_id == CODEC_ID_ADPCM_G726 || enc->codec_id == CODEC_ID_ADPCM_YAMAHA) { //
+ bps = 4;
+ } else if (enc->codec_id == CODEC_ID_PCM_S24LE) {
+ bps = 24;
+ } else if (enc->codec_id == CODEC_ID_PCM_S32LE) {
+ bps = 32;
+ } else {
+ bps = 16;
+ }
+
+ if (enc->codec_id == CODEC_ID_MP2 || enc->codec_id == CODEC_ID_MP3) {
+ blkalign = enc->frame_size; //this is wrong, but seems many demuxers dont work if this is set correctly
+ //blkalign = 144 * enc->bit_rate/enc->sample_rate;
+ } else if (enc->codec_id == CODEC_ID_ADPCM_G726) { //
+ blkalign = 1;
+ } else if (enc->block_align != 0) { /* specified by the codec */
+ blkalign = enc->block_align;
+ } else
+ blkalign = enc->channels*bps >> 3;
+ if (enc->codec_id == CODEC_ID_PCM_U8 ||
+ enc->codec_id == CODEC_ID_PCM_S24LE ||
+ enc->codec_id == CODEC_ID_PCM_S32LE ||
+ enc->codec_id == CODEC_ID_PCM_S16LE) {
+ bytespersec = enc->sample_rate * blkalign;
+ } else {
+ bytespersec = enc->bit_rate / 8;
+ }
+ put_le32(pb, bytespersec); /* bytes per second */
+ put_le16(pb, blkalign); /* block align */
+ put_le16(pb, bps); /* bits per sample */
+ if (enc->codec_id == CODEC_ID_MP3) {
+ put_le16(pb, 12); /* wav_extra_size */
+ hdrsize += 12;
+ put_le16(pb, 1); /* wID */
+ put_le32(pb, 2); /* fdwFlags */
+ put_le16(pb, 1152); /* nBlockSize */
+ put_le16(pb, 1); /* nFramesPerBlock */
+ put_le16(pb, 1393); /* nCodecDelay */
+ } else if (enc->codec_id == CODEC_ID_MP2) {
+ put_le16(pb, 22); /* wav_extra_size */
+ hdrsize += 22;
+ put_le16(pb, 2); /* fwHeadLayer */
+ put_le32(pb, enc->bit_rate); /* dwHeadBitrate */
+ put_le16(pb, enc->channels == 2 ? 1 : 8); /* fwHeadMode */
+ put_le16(pb, 0); /* fwHeadModeExt */
+ put_le16(pb, 1); /* wHeadEmphasis */
+ put_le16(pb, 16); /* fwHeadFlags */
+ put_le32(pb, 0); /* dwPTSLow */
+ put_le32(pb, 0); /* dwPTSHigh */
+ } else if (enc->codec_id == CODEC_ID_ADPCM_IMA_WAV) {
+ put_le16(pb, 2); /* wav_extra_size */
+ hdrsize += 2;
+ put_le16(pb, ((enc->block_align - 4 * enc->channels) / (4 * enc->channels)) * 8 + 1); /* wSamplesPerBlock */
+ } else if(enc->extradata_size){
+ put_le16(pb, enc->extradata_size);
+ put_buffer(pb, enc->extradata, enc->extradata_size);
+ hdrsize += enc->extradata_size;
+ if(hdrsize&1){
+ hdrsize++;
+ put_byte(pb, 0);
+ }
+ } else {
+ hdrsize -= 2;
+ }
+
+ return hdrsize;
+}
+
+/* BITMAPINFOHEADER header */
+void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags, int for_asf)
+{
+ put_le32(pb, 40 + enc->extradata_size); /* size */
+ put_le32(pb, enc->width);
+ put_le32(pb, enc->height);
+ put_le16(pb, 1); /* planes */
+
+ put_le16(pb, enc->bits_per_sample ? enc->bits_per_sample : 24); /* depth */
+ /* compression type */
+ put_le32(pb, for_asf ? (enc->codec_tag ? enc->codec_tag : codec_get_asf_tag(tags, enc->codec_id)) : enc->codec_tag); //
+ put_le32(pb, enc->width * enc->height * 3);
+ put_le32(pb, 0);
+ put_le32(pb, 0);
+ put_le32(pb, 0);
+ put_le32(pb, 0);
+
+ put_buffer(pb, enc->extradata, enc->extradata_size);
+
+ if (enc->extradata_size & 1)
+ put_byte(pb, 0);
+}
+#endif //CONFIG_MUXERS
+
+#ifdef CONFIG_DEMUXERS
+/* We could be given one of the three possible structures here:
+ * WAVEFORMAT, PCMWAVEFORMAT or WAVEFORMATEX. Each structure
+ * is an expansion of the previous one with the fields added
+ * at the bottom. PCMWAVEFORMAT adds 'WORD wBitsPerSample' and
+ * WAVEFORMATEX adds 'WORD cbSize' and basically makes itself
+ * an openended structure.
+ */
+void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size)
+{
+ int id;
+
+ id = get_le16(pb);
+ codec->codec_type = CODEC_TYPE_AUDIO;
+ codec->codec_tag = id;
+ codec->channels = get_le16(pb);
+ codec->sample_rate = get_le32(pb);
+ codec->bit_rate = get_le32(pb) * 8;
+ codec->block_align = get_le16(pb);
+ if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */
+ codec->bits_per_sample = 8;
+ }else
+ codec->bits_per_sample = get_le16(pb);
+ codec->codec_id = wav_codec_get_id(id, codec->bits_per_sample);
+
+ if (size > 16) { /* We're obviously dealing with WAVEFORMATEX */
+ codec->extradata_size = get_le16(pb);
+ if (codec->extradata_size > 0) {
+ if (codec->extradata_size > size - 18)
+ codec->extradata_size = size - 18;
+ codec->extradata = av_mallocz(codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(pb, codec->extradata, codec->extradata_size);
+ } else
+ codec->extradata_size = 0;
+
+ /* It is possible for the chunk to contain garbage at the end */
+ if (size - codec->extradata_size - 18 > 0)
+ url_fskip(pb, size - codec->extradata_size - 18);
+ }
+}
+
+
+int wav_codec_get_id(unsigned int tag, int bps)
+{
+ int id;
+ id = codec_get_id(codec_wav_tags, tag);
+ if (id <= 0)
+ return id;
+ /* handle specific u8 codec */
+ if (id == CODEC_ID_PCM_S16LE && bps == 8)
+ id = CODEC_ID_PCM_U8;
+ if (id == CODEC_ID_PCM_S16LE && bps == 24)
+ id = CODEC_ID_PCM_S24LE;
+ if (id == CODEC_ID_PCM_S16LE && bps == 32)
+ id = CODEC_ID_PCM_S32LE;
+ return id;
+}
+#endif // CONFIG_DEMUXERS
+
+void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale)
+{
+ int gcd;
+
+ *au_ssize= stream->block_align;
+ if(stream->frame_size && stream->sample_rate){
+ *au_scale=stream->frame_size;
+ *au_rate= stream->sample_rate;
+ }else if(stream->codec_type == CODEC_TYPE_VIDEO){
+ *au_scale= stream->time_base.num;
+ *au_rate = stream->time_base.den;
+ }else{
+ *au_scale= stream->block_align ? stream->block_align*8 : 8;
+ *au_rate = stream->bit_rate;
+ }
+ gcd= ff_gcd(*au_scale, *au_rate);
+ *au_scale /= gcd;
+ *au_rate /= gcd;
+}
diff --git a/contrib/ffmpeg/libavformat/riff.h b/contrib/ffmpeg/libavformat/riff.h
new file mode 100644
index 000000000..240855a8b
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/riff.h
@@ -0,0 +1,51 @@
+/*
+ * RIFF codec tags
+ * copyright (c) 2000 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef FF_RIFF_H
+#define FF_RIFF_H
+
+offset_t start_tag(ByteIOContext *pb, const char *tag);
+void end_tag(ByteIOContext *pb, offset_t start);
+
+typedef struct CodecTag {
+ int id;
+ unsigned int tag;
+ unsigned int invalid_asf : 1;
+} CodecTag;
+
+void put_bmp_header(ByteIOContext *pb, AVCodecContext *enc, const CodecTag *tags, int for_asf);
+int put_wav_header(ByteIOContext *pb, AVCodecContext *enc);
+int wav_codec_get_id(unsigned int tag, int bps);
+void get_wav_header(ByteIOContext *pb, AVCodecContext *codec, int size);
+
+extern const CodecTag codec_bmp_tags[];
+extern const CodecTag codec_wav_tags[];
+
+unsigned int codec_get_tag(const CodecTag *tags, int id);
+enum CodecID codec_get_id(const CodecTag *tags, unsigned int tag);
+unsigned int codec_get_bmp_tag(int id);
+unsigned int codec_get_wav_tag(int id);
+enum CodecID codec_get_bmp_id(unsigned int tag);
+enum CodecID codec_get_wav_id(unsigned int tag);
+unsigned int codec_get_asf_tag(const CodecTag *tags, unsigned int id);
+void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale);
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/rm.c b/contrib/ffmpeg/libavformat/rm.c
new file mode 100644
index 000000000..b4ddf1b02
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rm.c
@@ -0,0 +1,1146 @@
+/*
+ * "Real" compatible muxer and demuxer.
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+/* in ms */
+#define BUFFER_DURATION 0
+
+typedef struct {
+ int nb_packets;
+ int packet_total_size;
+ int packet_max_size;
+ /* codec related output */
+ int bit_rate;
+ float frame_rate;
+ int nb_frames; /* current frame number */
+ int total_frames; /* total number of frames */
+ int num;
+ AVCodecContext *enc;
+} StreamInfo;
+
+typedef struct {
+ StreamInfo streams[2];
+ StreamInfo *audio_stream, *video_stream;
+ int data_pos; /* position of the data after the header */
+ int nb_packets;
+ int old_format;
+ int current_stream;
+ int remaining_len;
+ /// Audio descrambling matrix parameters
+ uint8_t *audiobuf; ///< place to store reordered audio data
+ int64_t audiotimestamp; ///< Audio packet timestamp
+ int sub_packet_cnt; // Subpacket counter, used while reading
+ int sub_packet_size, sub_packet_h, coded_framesize; ///< Descrambling parameters from container
+ int audio_stream_num; ///< Stream number for audio packets
+ int audio_pkt_cnt; ///< Output packet counter
+ int audio_framesize; /// Audio frame size from container
+ int sub_packet_lengths[16]; /// Length of each aac subpacket
+} RMContext;
+
+#ifdef CONFIG_MUXERS
+static void put_str(ByteIOContext *s, const char *tag)
+{
+ put_be16(s,strlen(tag));
+ while (*tag) {
+ put_byte(s, *tag++);
+ }
+}
+
+static void put_str8(ByteIOContext *s, const char *tag)
+{
+ put_byte(s, strlen(tag));
+ while (*tag) {
+ put_byte(s, *tag++);
+ }
+}
+
+static void rv10_write_header(AVFormatContext *ctx,
+ int data_size, int index_pos)
+{
+ RMContext *rm = ctx->priv_data;
+ ByteIOContext *s = &ctx->pb;
+ StreamInfo *stream;
+ unsigned char *data_offset_ptr, *start_ptr;
+ const char *desc, *mimetype;
+ int nb_packets, packet_total_size, packet_max_size, size, packet_avg_size, i;
+ int bit_rate, v, duration, flags, data_pos;
+
+ start_ptr = s->buf_ptr;
+
+ put_tag(s, ".RMF");
+ put_be32(s,18); /* header size */
+ put_be16(s,0);
+ put_be32(s,0);
+ put_be32(s,4 + ctx->nb_streams); /* num headers */
+
+ put_tag(s,"PROP");
+ put_be32(s, 50);
+ put_be16(s, 0);
+ packet_max_size = 0;
+ packet_total_size = 0;
+ nb_packets = 0;
+ bit_rate = 0;
+ duration = 0;
+ for(i=0;i<ctx->nb_streams;i++) {
+ StreamInfo *stream = &rm->streams[i];
+ bit_rate += stream->bit_rate;
+ if (stream->packet_max_size > packet_max_size)
+ packet_max_size = stream->packet_max_size;
+ nb_packets += stream->nb_packets;
+ packet_total_size += stream->packet_total_size;
+ /* select maximum duration */
+ v = (int) (1000.0 * (float)stream->total_frames / stream->frame_rate);
+ if (v > duration)
+ duration = v;
+ }
+ put_be32(s, bit_rate); /* max bit rate */
+ put_be32(s, bit_rate); /* avg bit rate */
+ put_be32(s, packet_max_size); /* max packet size */
+ if (nb_packets > 0)
+ packet_avg_size = packet_total_size / nb_packets;
+ else
+ packet_avg_size = 0;
+ put_be32(s, packet_avg_size); /* avg packet size */
+ put_be32(s, nb_packets); /* num packets */
+ put_be32(s, duration); /* duration */
+ put_be32(s, BUFFER_DURATION); /* preroll */
+ put_be32(s, index_pos); /* index offset */
+ /* computation of data the data offset */
+ data_offset_ptr = s->buf_ptr;
+ put_be32(s, 0); /* data offset : will be patched after */
+ put_be16(s, ctx->nb_streams); /* num streams */
+ flags = 1 | 2; /* save allowed & perfect play */
+ if (url_is_streamed(s))
+ flags |= 4; /* live broadcast */
+ put_be16(s, flags);
+
+ /* comments */
+
+ put_tag(s,"CONT");
+ size = strlen(ctx->title) + strlen(ctx->author) + strlen(ctx->copyright) +
+ strlen(ctx->comment) + 4 * 2 + 10;
+ put_be32(s,size);
+ put_be16(s,0);
+ put_str(s, ctx->title);
+ put_str(s, ctx->author);
+ put_str(s, ctx->copyright);
+ put_str(s, ctx->comment);
+
+ for(i=0;i<ctx->nb_streams;i++) {
+ int codec_data_size;
+
+ stream = &rm->streams[i];
+
+ if (stream->enc->codec_type == CODEC_TYPE_AUDIO) {
+ desc = "The Audio Stream";
+ mimetype = "audio/x-pn-realaudio";
+ codec_data_size = 73;
+ } else {
+ desc = "The Video Stream";
+ mimetype = "video/x-pn-realvideo";
+ codec_data_size = 34;
+ }
+
+ put_tag(s,"MDPR");
+ size = 10 + 9 * 4 + strlen(desc) + strlen(mimetype) + codec_data_size;
+ put_be32(s, size);
+ put_be16(s, 0);
+
+ put_be16(s, i); /* stream number */
+ put_be32(s, stream->bit_rate); /* max bit rate */
+ put_be32(s, stream->bit_rate); /* avg bit rate */
+ put_be32(s, stream->packet_max_size); /* max packet size */
+ if (stream->nb_packets > 0)
+ packet_avg_size = stream->packet_total_size /
+ stream->nb_packets;
+ else
+ packet_avg_size = 0;
+ put_be32(s, packet_avg_size); /* avg packet size */
+ put_be32(s, 0); /* start time */
+ put_be32(s, BUFFER_DURATION); /* preroll */
+ /* duration */
+ if (url_is_streamed(s) || !stream->total_frames)
+ put_be32(s, (int)(3600 * 1000));
+ else
+ put_be32(s, (int)(stream->total_frames * 1000 / stream->frame_rate));
+ put_str8(s, desc);
+ put_str8(s, mimetype);
+ put_be32(s, codec_data_size);
+
+ if (stream->enc->codec_type == CODEC_TYPE_AUDIO) {
+ int coded_frame_size, fscode, sample_rate;
+ sample_rate = stream->enc->sample_rate;
+ coded_frame_size = (stream->enc->bit_rate *
+ stream->enc->frame_size) / (8 * sample_rate);
+ /* audio codec info */
+ put_tag(s, ".ra");
+ put_byte(s, 0xfd);
+ put_be32(s, 0x00040000); /* version */
+ put_tag(s, ".ra4");
+ put_be32(s, 0x01b53530); /* stream length */
+ put_be16(s, 4); /* unknown */
+ put_be32(s, 0x39); /* header size */
+
+ switch(sample_rate) {
+ case 48000:
+ case 24000:
+ case 12000:
+ fscode = 1;
+ break;
+ default:
+ case 44100:
+ case 22050:
+ case 11025:
+ fscode = 2;
+ break;
+ case 32000:
+ case 16000:
+ case 8000:
+ fscode = 3;
+ }
+ put_be16(s, fscode); /* codec additional info, for AC3, seems
+ to be a frequency code */
+ /* special hack to compensate rounding errors... */
+ if (coded_frame_size == 557)
+ coded_frame_size--;
+ put_be32(s, coded_frame_size); /* frame length */
+ put_be32(s, 0x51540); /* unknown */
+ put_be32(s, 0x249f0); /* unknown */
+ put_be32(s, 0x249f0); /* unknown */
+ put_be16(s, 0x01);
+ /* frame length : seems to be very important */
+ put_be16(s, coded_frame_size);
+ put_be32(s, 0); /* unknown */
+ put_be16(s, stream->enc->sample_rate); /* sample rate */
+ put_be32(s, 0x10); /* unknown */
+ put_be16(s, stream->enc->channels);
+ put_str8(s, "Int0"); /* codec name */
+ put_str8(s, "dnet"); /* codec name */
+ put_be16(s, 0); /* title length */
+ put_be16(s, 0); /* author length */
+ put_be16(s, 0); /* copyright length */
+ put_byte(s, 0); /* end of header */
+ } else {
+ /* video codec info */
+ put_be32(s,34); /* size */
+ if(stream->enc->codec_id == CODEC_ID_RV10)
+ put_tag(s,"VIDORV10");
+ else
+ put_tag(s,"VIDORV20");
+ put_be16(s, stream->enc->width);
+ put_be16(s, stream->enc->height);
+ put_be16(s, (int) stream->frame_rate); /* frames per seconds ? */
+ put_be32(s,0); /* unknown meaning */
+ put_be16(s, (int) stream->frame_rate); /* unknown meaning */
+ put_be32(s,0); /* unknown meaning */
+ put_be16(s, 8); /* unknown meaning */
+ /* Seems to be the codec version: only use basic H263. The next
+ versions seems to add a diffential DC coding as in
+ MPEG... nothing new under the sun */
+ if(stream->enc->codec_id == CODEC_ID_RV10)
+ put_be32(s,0x10000000);
+ else
+ put_be32(s,0x20103001);
+ //put_be32(s,0x10003000);
+ }
+ }
+
+ /* patch data offset field */
+ data_pos = s->buf_ptr - start_ptr;
+ rm->data_pos = data_pos;
+ data_offset_ptr[0] = data_pos >> 24;
+ data_offset_ptr[1] = data_pos >> 16;
+ data_offset_ptr[2] = data_pos >> 8;
+ data_offset_ptr[3] = data_pos;
+
+ /* data stream */
+ put_tag(s,"DATA");
+ put_be32(s,data_size + 10 + 8);
+ put_be16(s,0);
+
+ put_be32(s, nb_packets); /* number of packets */
+ put_be32(s,0); /* next data header */
+}
+
+static void write_packet_header(AVFormatContext *ctx, StreamInfo *stream,
+ int length, int key_frame)
+{
+ int timestamp;
+ ByteIOContext *s = &ctx->pb;
+
+ stream->nb_packets++;
+ stream->packet_total_size += length;
+ if (length > stream->packet_max_size)
+ stream->packet_max_size = length;
+
+ put_be16(s,0); /* version */
+ put_be16(s,length + 12);
+ put_be16(s, stream->num); /* stream number */
+ timestamp = (1000 * (float)stream->nb_frames) / stream->frame_rate;
+ put_be32(s, timestamp); /* timestamp */
+ put_byte(s, 0); /* reserved */
+ put_byte(s, key_frame ? 2 : 0); /* flags */
+}
+
+static int rm_write_header(AVFormatContext *s)
+{
+ RMContext *rm = s->priv_data;
+ StreamInfo *stream;
+ int n;
+ AVCodecContext *codec;
+
+ for(n=0;n<s->nb_streams;n++) {
+ s->streams[n]->id = n;
+ codec = s->streams[n]->codec;
+ stream = &rm->streams[n];
+ memset(stream, 0, sizeof(StreamInfo));
+ stream->num = n;
+ stream->bit_rate = codec->bit_rate;
+ stream->enc = codec;
+
+ switch(codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ rm->audio_stream = stream;
+ stream->frame_rate = (float)codec->sample_rate / (float)codec->frame_size;
+ /* XXX: dummy values */
+ stream->packet_max_size = 1024;
+ stream->nb_packets = 0;
+ stream->total_frames = stream->nb_packets;
+ break;
+ case CODEC_TYPE_VIDEO:
+ rm->video_stream = stream;
+ stream->frame_rate = (float)codec->time_base.den / (float)codec->time_base.num;
+ /* XXX: dummy values */
+ stream->packet_max_size = 4096;
+ stream->nb_packets = 0;
+ stream->total_frames = stream->nb_packets;
+ break;
+ default:
+ return -1;
+ }
+ }
+
+ rv10_write_header(s, 0, 0);
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int rm_write_audio(AVFormatContext *s, const uint8_t *buf, int size, int flags)
+{
+ uint8_t *buf1;
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ StreamInfo *stream = rm->audio_stream;
+ int i;
+
+ /* XXX: suppress this malloc */
+ buf1= (uint8_t*) av_malloc( size * sizeof(uint8_t) );
+
+ write_packet_header(s, stream, size, !!(flags & PKT_FLAG_KEY));
+
+ /* for AC3, the words seems to be reversed */
+ for(i=0;i<size;i+=2) {
+ buf1[i] = buf[i+1];
+ buf1[i+1] = buf[i];
+ }
+ put_buffer(pb, buf1, size);
+ put_flush_packet(pb);
+ stream->nb_frames++;
+ av_free(buf1);
+ return 0;
+}
+
+static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size, int flags)
+{
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ StreamInfo *stream = rm->video_stream;
+ int key_frame = !!(flags & PKT_FLAG_KEY);
+
+ /* XXX: this is incorrect: should be a parameter */
+
+ /* Well, I spent some time finding the meaning of these bits. I am
+ not sure I understood everything, but it works !! */
+#if 1
+ write_packet_header(s, stream, size + 7, key_frame);
+ /* bit 7: '1' if final packet of a frame converted in several packets */
+ put_byte(pb, 0x81);
+ /* bit 7: '1' if I frame. bits 6..0 : sequence number in current
+ frame starting from 1 */
+ if (key_frame) {
+ put_byte(pb, 0x81);
+ } else {
+ put_byte(pb, 0x01);
+ }
+ put_be16(pb, 0x4000 + (size)); /* total frame size */
+ put_be16(pb, 0x4000 + (size)); /* offset from the start or the end */
+#else
+ /* full frame */
+ write_packet_header(s, size + 6);
+ put_byte(pb, 0xc0);
+ put_be16(pb, 0x4000 + size); /* total frame size */
+ put_be16(pb, 0x4000 + packet_number * 126); /* position in stream */
+#endif
+ put_byte(pb, stream->nb_frames & 0xff);
+
+ put_buffer(pb, buf, size);
+ put_flush_packet(pb);
+
+ stream->nb_frames++;
+ return 0;
+}
+
+static int rm_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ if (s->streams[pkt->stream_index]->codec->codec_type ==
+ CODEC_TYPE_AUDIO)
+ return rm_write_audio(s, pkt->data, pkt->size, pkt->flags);
+ else
+ return rm_write_video(s, pkt->data, pkt->size, pkt->flags);
+}
+
+static int rm_write_trailer(AVFormatContext *s)
+{
+ RMContext *rm = s->priv_data;
+ int data_size, index_pos, i;
+ ByteIOContext *pb = &s->pb;
+
+ if (!url_is_streamed(&s->pb)) {
+ /* end of file: finish to write header */
+ index_pos = url_fseek(pb, 0, SEEK_CUR);
+ data_size = index_pos - rm->data_pos;
+
+ /* index */
+ put_tag(pb, "INDX");
+ put_be32(pb, 10 + 10 * s->nb_streams);
+ put_be16(pb, 0);
+
+ for(i=0;i<s->nb_streams;i++) {
+ put_be32(pb, 0); /* zero indices */
+ put_be16(pb, i); /* stream number */
+ put_be32(pb, 0); /* next index */
+ }
+ /* undocumented end header */
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+
+ url_fseek(pb, 0, SEEK_SET);
+ for(i=0;i<s->nb_streams;i++)
+ rm->streams[i].total_frames = rm->streams[i].nb_frames;
+ rv10_write_header(s, data_size, index_pos);
+ } else {
+ /* undocumented end header */
+ put_be32(pb, 0);
+ put_be32(pb, 0);
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/***************************************************/
+
+static void get_str(ByteIOContext *pb, char *buf, int buf_size)
+{
+ int len, i;
+ char *q;
+
+ len = get_be16(pb);
+ q = buf;
+ for(i=0;i<len;i++) {
+ if (i < buf_size - 1)
+ *q++ = get_byte(pb);
+ }
+ *q = '\0';
+}
+
+static void get_str8(ByteIOContext *pb, char *buf, int buf_size)
+{
+ int len, i;
+ char *q;
+
+ len = get_byte(pb);
+ q = buf;
+ for(i=0;i<len;i++) {
+ if (i < buf_size - 1)
+ *q++ = get_byte(pb);
+ }
+ *q = '\0';
+}
+
+static int rm_read_audio_stream_info(AVFormatContext *s, AVStream *st,
+ int read_all)
+{
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ char buf[256];
+ uint32_t version;
+ int i;
+
+ /* ra type header */
+ version = get_be32(pb); /* version */
+ if (((version >> 16) & 0xff) == 3) {
+ int64_t startpos = url_ftell(pb);
+ /* very old version */
+ for(i = 0; i < 14; i++)
+ get_byte(pb);
+ get_str8(pb, s->title, sizeof(s->title));
+ get_str8(pb, s->author, sizeof(s->author));
+ get_str8(pb, s->copyright, sizeof(s->copyright));
+ get_str8(pb, s->comment, sizeof(s->comment));
+ if ((startpos + (version & 0xffff)) >= url_ftell(pb) + 2) {
+ // fourcc (should always be "lpcJ")
+ get_byte(pb);
+ get_str8(pb, buf, sizeof(buf));
+ }
+ // Skip extra header crap (this should never happen)
+ if ((startpos + (version & 0xffff)) > url_ftell(pb))
+ url_fskip(pb, (version & 0xffff) + startpos - url_ftell(pb));
+ st->codec->sample_rate = 8000;
+ st->codec->channels = 1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_RA_144;
+ } else {
+ int flavor, sub_packet_h, coded_framesize, sub_packet_size;
+ /* old version (4) */
+ get_be32(pb); /* .ra4 */
+ get_be32(pb); /* data size */
+ get_be16(pb); /* version2 */
+ get_be32(pb); /* header size */
+ flavor= get_be16(pb); /* add codec info / flavor */
+ rm->coded_framesize = coded_framesize = get_be32(pb); /* coded frame size */
+ get_be32(pb); /* ??? */
+ get_be32(pb); /* ??? */
+ get_be32(pb); /* ??? */
+ rm->sub_packet_h = sub_packet_h = get_be16(pb); /* 1 */
+ st->codec->block_align= get_be16(pb); /* frame size */
+ rm->sub_packet_size = sub_packet_size = get_be16(pb); /* sub packet size */
+ get_be16(pb); /* ??? */
+ if (((version >> 16) & 0xff) == 5) {
+ get_be16(pb); get_be16(pb); get_be16(pb); }
+ st->codec->sample_rate = get_be16(pb);
+ get_be32(pb);
+ st->codec->channels = get_be16(pb);
+ if (((version >> 16) & 0xff) == 5) {
+ get_be32(pb);
+ buf[0] = get_byte(pb);
+ buf[1] = get_byte(pb);
+ buf[2] = get_byte(pb);
+ buf[3] = get_byte(pb);
+ buf[4] = 0;
+ } else {
+ get_str8(pb, buf, sizeof(buf)); /* desc */
+ get_str8(pb, buf, sizeof(buf)); /* desc */
+ }
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ if (!strcmp(buf, "dnet")) {
+ st->codec->codec_id = CODEC_ID_AC3;
+ } else if (!strcmp(buf, "28_8")) {
+ st->codec->codec_id = CODEC_ID_RA_288;
+ st->codec->extradata_size= 0;
+ rm->audio_framesize = st->codec->block_align;
+ st->codec->block_align = coded_framesize;
+
+ if(rm->audio_framesize >= UINT_MAX / sub_packet_h){
+ av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n");
+ return -1;
+ }
+
+ rm->audiobuf = av_malloc(rm->audio_framesize * sub_packet_h);
+ } else if (!strcmp(buf, "cook")) {
+ int codecdata_length, i;
+ get_be16(pb); get_byte(pb);
+ if (((version >> 16) & 0xff) == 5)
+ get_byte(pb);
+ codecdata_length = get_be32(pb);
+ if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
+ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
+ return -1;
+ }
+
+ st->codec->codec_id = CODEC_ID_COOK;
+ st->codec->extradata_size= codecdata_length;
+ st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ for(i = 0; i < codecdata_length; i++)
+ ((uint8_t*)st->codec->extradata)[i] = get_byte(pb);
+ rm->audio_framesize = st->codec->block_align;
+ st->codec->block_align = rm->sub_packet_size;
+
+ if(rm->audio_framesize >= UINT_MAX / sub_packet_h){
+ av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n");
+ return -1;
+ }
+
+ rm->audiobuf = av_malloc(rm->audio_framesize * sub_packet_h);
+ } else if (!strcmp(buf, "raac") || !strcmp(buf, "racp")) {
+ int codecdata_length, i;
+ get_be16(pb); get_byte(pb);
+ if (((version >> 16) & 0xff) == 5)
+ get_byte(pb);
+ st->codec->codec_id = CODEC_ID_AAC;
+ codecdata_length = get_be32(pb);
+ if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
+ av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
+ return -1;
+ }
+ if (codecdata_length >= 1) {
+ st->codec->extradata_size = codecdata_length - 1;
+ st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_byte(pb);
+ for(i = 0; i < st->codec->extradata_size; i++)
+ ((uint8_t*)st->codec->extradata)[i] = get_byte(pb);
+ }
+ } else {
+ st->codec->codec_id = CODEC_ID_NONE;
+ pstrcpy(st->codec->codec_name, sizeof(st->codec->codec_name),
+ buf);
+ }
+ if (read_all) {
+ get_byte(pb);
+ get_byte(pb);
+ get_byte(pb);
+
+ get_str8(pb, s->title, sizeof(s->title));
+ get_str8(pb, s->author, sizeof(s->author));
+ get_str8(pb, s->copyright, sizeof(s->copyright));
+ get_str8(pb, s->comment, sizeof(s->comment));
+ }
+ }
+ return 0;
+}
+
+static int rm_read_header_old(AVFormatContext *s, AVFormatParameters *ap)
+{
+ RMContext *rm = s->priv_data;
+ AVStream *st;
+
+ rm->old_format = 1;
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ return rm_read_audio_stream_info(s, st, 1);
+}
+
+static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ RMContext *rm = s->priv_data;
+ AVStream *st;
+ ByteIOContext *pb = &s->pb;
+ unsigned int tag, v;
+ int tag_size, size, codec_data_size, i;
+ int64_t codec_pos;
+ unsigned int start_time, duration;
+ char buf[128];
+ int flags = 0;
+
+ tag = get_le32(pb);
+ if (tag == MKTAG('.', 'r', 'a', 0xfd)) {
+ /* very old .ra format */
+ return rm_read_header_old(s, ap);
+ } else if (tag != MKTAG('.', 'R', 'M', 'F')) {
+ return AVERROR_IO;
+ }
+
+ get_be32(pb); /* header size */
+ get_be16(pb);
+ get_be32(pb);
+ get_be32(pb); /* number of headers */
+
+ for(;;) {
+ if (url_feof(pb))
+ goto fail;
+ tag = get_le32(pb);
+ tag_size = get_be32(pb);
+ get_be16(pb);
+#if 0
+ printf("tag=%c%c%c%c (%08x) size=%d\n",
+ (tag) & 0xff,
+ (tag >> 8) & 0xff,
+ (tag >> 16) & 0xff,
+ (tag >> 24) & 0xff,
+ tag,
+ tag_size);
+#endif
+ if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A'))
+ goto fail;
+ switch(tag) {
+ case MKTAG('P', 'R', 'O', 'P'):
+ /* file header */
+ get_be32(pb); /* max bit rate */
+ get_be32(pb); /* avg bit rate */
+ get_be32(pb); /* max packet size */
+ get_be32(pb); /* avg packet size */
+ get_be32(pb); /* nb packets */
+ get_be32(pb); /* duration */
+ get_be32(pb); /* preroll */
+ get_be32(pb); /* index offset */
+ get_be32(pb); /* data offset */
+ get_be16(pb); /* nb streams */
+ flags = get_be16(pb); /* flags */
+ break;
+ case MKTAG('C', 'O', 'N', 'T'):
+ get_str(pb, s->title, sizeof(s->title));
+ get_str(pb, s->author, sizeof(s->author));
+ get_str(pb, s->copyright, sizeof(s->copyright));
+ get_str(pb, s->comment, sizeof(s->comment));
+ break;
+ case MKTAG('M', 'D', 'P', 'R'):
+ st = av_new_stream(s, 0);
+ if (!st)
+ goto fail;
+ st->id = get_be16(pb);
+ get_be32(pb); /* max bit rate */
+ st->codec->bit_rate = get_be32(pb); /* bit rate */
+ get_be32(pb); /* max packet size */
+ get_be32(pb); /* avg packet size */
+ start_time = get_be32(pb); /* start time */
+ get_be32(pb); /* preroll */
+ duration = get_be32(pb); /* duration */
+ st->start_time = start_time;
+ st->duration = duration;
+ get_str8(pb, buf, sizeof(buf)); /* desc */
+ get_str8(pb, buf, sizeof(buf)); /* mimetype */
+ codec_data_size = get_be32(pb);
+ codec_pos = url_ftell(pb);
+ st->codec->codec_type = CODEC_TYPE_DATA;
+ av_set_pts_info(st, 64, 1, 1000);
+
+ v = get_be32(pb);
+ if (v == MKTAG(0xfd, 'a', 'r', '.')) {
+ /* ra type header */
+ if (rm_read_audio_stream_info(s, st, 0))
+ return -1;
+ } else {
+ int fps, fps2;
+ if (get_le32(pb) != MKTAG('V', 'I', 'D', 'O')) {
+ fail1:
+ av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n");
+ goto skip;
+ }
+ st->codec->codec_tag = get_le32(pb);
+// av_log(NULL, AV_LOG_DEBUG, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
+ if ( st->codec->codec_tag != MKTAG('R', 'V', '1', '0')
+ && st->codec->codec_tag != MKTAG('R', 'V', '2', '0')
+ && st->codec->codec_tag != MKTAG('R', 'V', '3', '0')
+ && st->codec->codec_tag != MKTAG('R', 'V', '4', '0'))
+ goto fail1;
+ st->codec->width = get_be16(pb);
+ st->codec->height = get_be16(pb);
+ st->codec->time_base.num= 1;
+ fps= get_be16(pb);
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ get_be32(pb);
+ fps2= get_be16(pb);
+ get_be16(pb);
+
+ st->codec->extradata_size= codec_data_size - (url_ftell(pb) - codec_pos);
+
+ if(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
+ //check is redundant as get_buffer() will catch this
+ av_log(s, AV_LOG_ERROR, "st->codec->extradata_size too large\n");
+ return -1;
+ }
+ st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
+
+// av_log(NULL, AV_LOG_DEBUG, "fps= %d fps2= %d\n", fps, fps2);
+ st->codec->time_base.den = fps * st->codec->time_base.num;
+ switch(((uint8_t*)st->codec->extradata)[4]>>4){
+ case 1: st->codec->codec_id = CODEC_ID_RV10; break;
+ case 2: st->codec->codec_id = CODEC_ID_RV20; break;
+ case 3: st->codec->codec_id = CODEC_ID_RV30; break;
+ case 4: st->codec->codec_id = CODEC_ID_RV40; break;
+ default: goto fail1;
+ }
+ }
+skip:
+ /* skip codec info */
+ size = url_ftell(pb) - codec_pos;
+ url_fskip(pb, codec_data_size - size);
+ break;
+ case MKTAG('D', 'A', 'T', 'A'):
+ goto header_end;
+ default:
+ /* unknown tag: skip it */
+ url_fskip(pb, tag_size - 10);
+ break;
+ }
+ }
+ header_end:
+ rm->nb_packets = get_be32(pb); /* number of packets */
+ if (!rm->nb_packets && (flags & 4))
+ rm->nb_packets = 3600 * 25;
+ get_be32(pb); /* next data header */
+ return 0;
+
+ fail:
+ for(i=0;i<s->nb_streams;i++) {
+ av_free(s->streams[i]);
+ }
+ return AVERROR_IO;
+}
+
+static int get_num(ByteIOContext *pb, int *len)
+{
+ int n, n1;
+
+ n = get_be16(pb);
+ (*len)-=2;
+ if (n >= 0x4000) {
+ return n - 0x4000;
+ } else {
+ n1 = get_be16(pb);
+ (*len)-=2;
+ return (n << 16) | n1;
+ }
+}
+
+/* multiple of 20 bytes for ra144 (ugly) */
+#define RAW_PACKET_SIZE 1000
+
+static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_index, int64_t *pos){
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int len, num, res, i;
+ AVStream *st;
+ uint32_t state=0xFFFFFFFF;
+
+ while(!url_feof(pb)){
+ *pos= url_ftell(pb);
+ if(rm->remaining_len > 0){
+ num= rm->current_stream;
+ len= rm->remaining_len;
+ *timestamp = AV_NOPTS_VALUE;
+ *flags= 0;
+ }else{
+ state= (state<<8) + get_byte(pb);
+
+ if(state == MKBETAG('I', 'N', 'D', 'X')){
+ len = get_be16(pb) - 6;
+ if(len<0)
+ continue;
+ goto skip;
+ }
+
+ if(state > (unsigned)0xFFFF || state < 12)
+ continue;
+ len=state;
+ state= 0xFFFFFFFF;
+
+ num = get_be16(pb);
+ *timestamp = get_be32(pb);
+ res= get_byte(pb); /* reserved */
+ *flags = get_byte(pb); /* flags */
+
+
+ len -= 12;
+ }
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ if (num == st->id)
+ break;
+ }
+ if (i == s->nb_streams) {
+skip:
+ /* skip packet if unknown number */
+ url_fskip(pb, len);
+ rm->remaining_len -= len;
+ continue;
+ }
+ *stream_index= i;
+
+ return len;
+ }
+ return -1;
+}
+
+static int rm_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ RMContext *rm = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ int i, len, tmp, j;
+ int64_t timestamp, pos;
+ uint8_t *ptr;
+ int flags;
+
+ if (rm->audio_pkt_cnt) {
+ // If there are queued audio packet return them first
+ st = s->streams[rm->audio_stream_num];
+ if (st->codec->codec_id == CODEC_ID_AAC)
+ av_get_packet(pb, pkt, rm->sub_packet_lengths[rm->sub_packet_cnt - rm->audio_pkt_cnt]);
+ else {
+ av_new_packet(pkt, st->codec->block_align);
+ memcpy(pkt->data, rm->audiobuf + st->codec->block_align *
+ (rm->sub_packet_h * rm->audio_framesize / st->codec->block_align - rm->audio_pkt_cnt),
+ st->codec->block_align);
+ }
+ rm->audio_pkt_cnt--;
+ pkt->flags = 0;
+ pkt->stream_index = rm->audio_stream_num;
+ } else if (rm->old_format) {
+ st = s->streams[0];
+ if (st->codec->codec_id == CODEC_ID_RA_288) {
+ int x, y;
+
+ for (y = 0; y < rm->sub_packet_h; y++)
+ for (x = 0; x < rm->sub_packet_h/2; x++)
+ if (get_buffer(pb, rm->audiobuf+x*2*rm->audio_framesize+y*rm->coded_framesize, rm->coded_framesize) <= 0)
+ return AVERROR_IO;
+ rm->audio_stream_num = 0;
+ rm->audio_pkt_cnt = rm->sub_packet_h * rm->audio_framesize / st->codec->block_align - 1;
+ // Release first audio packet
+ av_new_packet(pkt, st->codec->block_align);
+ memcpy(pkt->data, rm->audiobuf, st->codec->block_align);
+ pkt->flags |= PKT_FLAG_KEY; // Mark first packet as keyframe
+ pkt->stream_index = 0;
+ } else {
+ /* just read raw bytes */
+ len = RAW_PACKET_SIZE;
+ len= av_get_packet(pb, pkt, len);
+ pkt->stream_index = 0;
+ if (len <= 0) {
+ return AVERROR_IO;
+ }
+ pkt->size = len;
+ }
+ } else {
+ int seq=1;
+resync:
+ len=sync(s, &timestamp, &flags, &i, &pos);
+ if(len<0)
+ return AVERROR_IO;
+ st = s->streams[i];
+
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ int h, pic_num, len2, pos;
+
+ h= get_byte(pb); len--;
+ if(!(h & 0x40)){
+ seq = get_byte(pb); len--;
+ }
+
+ if((h & 0xc0) == 0x40){
+ len2= pos= 0;
+ }else{
+ len2 = get_num(pb, &len);
+ pos = get_num(pb, &len);
+ }
+ /* picture number */
+ pic_num= get_byte(pb); len--;
+ rm->remaining_len= len;
+ rm->current_stream= st->id;
+
+// av_log(NULL, AV_LOG_DEBUG, "%X len:%d pos:%d len2:%d pic_num:%d\n",h, len, pos, len2, pic_num);
+ if(len2 && len2<len)
+ len=len2;
+ rm->remaining_len-= len;
+ av_get_packet(pb, pkt, len);
+ }
+
+ if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
+ if ((st->codec->codec_id == CODEC_ID_RA_288) ||
+ (st->codec->codec_id == CODEC_ID_COOK)) {
+ int x;
+ int sps = rm->sub_packet_size;
+ int cfs = rm->coded_framesize;
+ int h = rm->sub_packet_h;
+ int y = rm->sub_packet_cnt;
+ int w = rm->audio_framesize;
+
+ if (flags & 2)
+ y = rm->sub_packet_cnt = 0;
+ if (!y)
+ rm->audiotimestamp = timestamp;
+
+ switch(st->codec->codec_id) {
+ case CODEC_ID_RA_288:
+ for (x = 0; x < h/2; x++)
+ get_buffer(pb, rm->audiobuf+x*2*w+y*cfs, cfs);
+ break;
+ case CODEC_ID_COOK:
+ for (x = 0; x < w/sps; x++)
+ get_buffer(pb, rm->audiobuf+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps);
+ break;
+ }
+
+ if (++(rm->sub_packet_cnt) < h)
+ goto resync;
+ else {
+ rm->sub_packet_cnt = 0;
+ rm->audio_stream_num = i;
+ rm->audio_pkt_cnt = h * w / st->codec->block_align - 1;
+ // Release first audio packet
+ av_new_packet(pkt, st->codec->block_align);
+ memcpy(pkt->data, rm->audiobuf, st->codec->block_align);
+ timestamp = rm->audiotimestamp;
+ flags = 2; // Mark first packet as keyframe
+ }
+ } else if (st->codec->codec_id == CODEC_ID_AAC) {
+ int x;
+ rm->audio_stream_num = i;
+ rm->sub_packet_cnt = (get_be16(pb) & 0xf0) >> 4;
+ if (rm->sub_packet_cnt) {
+ for (x = 0; x < rm->sub_packet_cnt; x++)
+ rm->sub_packet_lengths[x] = get_be16(pb);
+ // Release first audio packet
+ rm->audio_pkt_cnt = rm->sub_packet_cnt - 1;
+ av_get_packet(pb, pkt, rm->sub_packet_lengths[0]);
+ flags = 2; // Mark first packet as keyframe
+ }
+ } else
+ av_get_packet(pb, pkt, len);
+ }
+
+ if( (st->discard >= AVDISCARD_NONKEY && !(flags&2))
+ || st->discard >= AVDISCARD_ALL){
+ av_free_packet(pkt);
+ goto resync;
+ }
+
+ pkt->stream_index = i;
+
+#if 0
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ if(st->codec->codec_id == CODEC_ID_RV20){
+ int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1);
+ av_log(NULL, AV_LOG_DEBUG, "%d %"PRId64" %d\n", timestamp, timestamp*512LL/25, seq);
+
+ seq |= (timestamp&~0x3FFF);
+ if(seq - timestamp > 0x2000) seq -= 0x4000;
+ if(seq - timestamp < -0x2000) seq += 0x4000;
+ }
+ }
+#endif
+ pkt->pts= timestamp;
+ if(flags&2){
+ pkt->flags |= PKT_FLAG_KEY;
+ if((seq&0x7F) == 1)
+ av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME);
+ }
+ }
+
+ /* for AC3, needs to swap bytes */
+ if (st->codec->codec_id == CODEC_ID_AC3) {
+ ptr = pkt->data;
+ for(j=0;j<len;j+=2) {
+ tmp = ptr[0];
+ ptr[0] = ptr[1];
+ ptr[1] = tmp;
+ ptr += 2;
+ }
+ }
+ return 0;
+}
+
+static int rm_read_close(AVFormatContext *s)
+{
+ RMContext *rm = s->priv_data;
+
+ av_free(rm->audiobuf);
+ return 0;
+}
+
+static int rm_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if ((p->buf[0] == '.' && p->buf[1] == 'R' &&
+ p->buf[2] == 'M' && p->buf[3] == 'F' &&
+ p->buf[4] == 0 && p->buf[5] == 0) ||
+ (p->buf[0] == '.' && p->buf[1] == 'r' &&
+ p->buf[2] == 'a' && p->buf[3] == 0xfd))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
+ int64_t *ppos, int64_t pos_limit)
+{
+ RMContext *rm = s->priv_data;
+ int64_t pos, dts;
+ int stream_index2, flags, len, h;
+
+ pos = *ppos;
+
+ if(rm->old_format)
+ return AV_NOPTS_VALUE;
+
+ url_fseek(&s->pb, pos, SEEK_SET);
+ rm->remaining_len=0;
+ for(;;){
+ int seq=1;
+ AVStream *st;
+
+ len=sync(s, &dts, &flags, &stream_index2, &pos);
+ if(len<0)
+ return AV_NOPTS_VALUE;
+
+ st = s->streams[stream_index2];
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ h= get_byte(&s->pb); len--;
+ if(!(h & 0x40)){
+ seq = get_byte(&s->pb); len--;
+ }
+ }
+
+ if((flags&2) && (seq&0x7F) == 1){
+// av_log(s, AV_LOG_DEBUG, "%d %d-%d %"PRId64" %d\n", flags, stream_index2, stream_index, dts, seq);
+ av_add_index_entry(st, pos, dts, 0, 0, AVINDEX_KEYFRAME);
+ if(stream_index2 == stream_index)
+ break;
+ }
+
+ url_fskip(&s->pb, len);
+ }
+ *ppos = pos;
+ return dts;
+}
+
+#ifdef CONFIG_RM_DEMUXER
+AVInputFormat rm_demuxer = {
+ "rm",
+ "rm format",
+ sizeof(RMContext),
+ rm_probe,
+ rm_read_header,
+ rm_read_packet,
+ rm_read_close,
+ NULL,
+ rm_read_dts,
+};
+#endif
+#ifdef CONFIG_RM_MUXER
+AVOutputFormat rm_muxer = {
+ "rm",
+ "rm format",
+ "application/vnd.rn-realmedia",
+ "rm,ra",
+ sizeof(RMContext),
+ CODEC_ID_AC3,
+ CODEC_ID_RV10,
+ rm_write_header,
+ rm_write_packet,
+ rm_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/rtp.c b/contrib/ffmpeg/libavformat/rtp.c
new file mode 100644
index 000000000..37a286289
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp.c
@@ -0,0 +1,1099 @@
+/*
+ * RTP input/output format
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "mpegts.h"
+#include "bitstream.h"
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+
+#include "rtp_internal.h"
+#include "rtp_h264.h"
+
+//#define DEBUG
+
+
+/* TODO: - add RTCP statistics reporting (should be optional).
+
+ - add support for h263/mpeg4 packetized output : IDEA: send a
+ buffer to 'rtp_write_packet' contains all the packets for ONE
+ frame. Each packet should have a four byte header containing
+ the length in big endian format (same trick as
+ 'url_open_dyn_packet_buf')
+*/
+
+/* from http://www.iana.org/assignments/rtp-parameters last updated 05 January 2005 */
+AVRtpPayloadType_t AVRtpPayloadTypes[]=
+{
+ {0, "PCMU", CODEC_TYPE_AUDIO, CODEC_ID_PCM_MULAW, 8000, 1},
+ {1, "Reserved", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {2, "Reserved", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {3, "GSM", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {4, "G723", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {5, "DVI4", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {6, "DVI4", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 16000, 1},
+ {7, "LPC", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {8, "PCMA", CODEC_TYPE_AUDIO, CODEC_ID_PCM_ALAW, 8000, 1},
+ {9, "G722", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {10, "L16", CODEC_TYPE_AUDIO, CODEC_ID_PCM_S16BE, 44100, 2},
+ {11, "L16", CODEC_TYPE_AUDIO, CODEC_ID_PCM_S16BE, 44100, 1},
+ {12, "QCELP", CODEC_TYPE_AUDIO, CODEC_ID_QCELP, 8000, 1},
+ {13, "CN", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {14, "MPA", CODEC_TYPE_AUDIO, CODEC_ID_MP2, 90000, -1},
+ {15, "G728", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {16, "DVI4", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 11025, 1},
+ {17, "DVI4", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 22050, 1},
+ {18, "G729", CODEC_TYPE_AUDIO, CODEC_ID_NONE, 8000, 1},
+ {19, "reserved", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {20, "unassigned", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {21, "unassigned", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {22, "unassigned", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {23, "unassigned", CODEC_TYPE_AUDIO, CODEC_ID_NONE, -1, -1},
+ {24, "unassigned", CODEC_TYPE_VIDEO, CODEC_ID_NONE, -1, -1},
+ {25, "CelB", CODEC_TYPE_VIDEO, CODEC_ID_NONE, 90000, -1},
+ {26, "JPEG", CODEC_TYPE_VIDEO, CODEC_ID_MJPEG, 90000, -1},
+ {27, "unassigned", CODEC_TYPE_VIDEO, CODEC_ID_NONE, -1, -1},
+ {28, "nv", CODEC_TYPE_VIDEO, CODEC_ID_NONE, 90000, -1},
+ {29, "unassigned", CODEC_TYPE_VIDEO, CODEC_ID_NONE, -1, -1},
+ {30, "unassigned", CODEC_TYPE_VIDEO, CODEC_ID_NONE, -1, -1},
+ {31, "H261", CODEC_TYPE_VIDEO, CODEC_ID_H261, 90000, -1},
+ {32, "MPV", CODEC_TYPE_VIDEO, CODEC_ID_MPEG1VIDEO, 90000, -1},
+ {33, "MP2T", CODEC_TYPE_DATA, CODEC_ID_MPEG2TS, 90000, -1},
+ {34, "H263", CODEC_TYPE_VIDEO, CODEC_ID_H263, 90000, -1},
+ {35, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {36, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {37, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {38, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {39, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {40, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {41, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {42, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {43, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {44, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {45, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {46, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {47, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {48, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {49, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {50, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {51, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {52, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {53, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {54, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {55, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {56, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {57, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {58, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {59, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {60, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {61, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {62, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {63, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {64, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {65, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {66, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {67, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {68, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {69, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {70, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {71, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {72, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {73, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {74, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {75, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {76, "reserved for RTCP conflict avoidance", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {77, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {78, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {79, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {80, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {81, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {82, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {83, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {84, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {85, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {86, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {87, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {88, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {89, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {90, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {91, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {92, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {93, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {94, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {95, "unassigned", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {96, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {97, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {98, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {99, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {100, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {101, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {102, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {103, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {104, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {105, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {106, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {107, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {108, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {109, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {110, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {111, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {112, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {113, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {114, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {115, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {116, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {117, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {118, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {119, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {120, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {121, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {122, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {123, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {124, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {125, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {126, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {127, "dynamic", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1},
+ {-1, "", CODEC_TYPE_UNKNOWN, CODEC_ID_NONE, -1, -1}
+};
+
+/* statistics functions */
+RTPDynamicProtocolHandler *RTPFirstDynamicPayloadHandler= NULL;
+
+static RTPDynamicProtocolHandler mp4v_es_handler= {"MP4V-ES", CODEC_TYPE_VIDEO, CODEC_ID_MPEG4};
+static RTPDynamicProtocolHandler mpeg4_generic_handler= {"mpeg4-generic", CODEC_TYPE_AUDIO, CODEC_ID_AAC};
+
+static void register_dynamic_payload_handler(RTPDynamicProtocolHandler *handler)
+{
+ handler->next= RTPFirstDynamicPayloadHandler;
+ RTPFirstDynamicPayloadHandler= handler;
+}
+
+void av_register_rtp_dynamic_payload_handlers()
+{
+ register_dynamic_payload_handler(&mp4v_es_handler);
+ register_dynamic_payload_handler(&mpeg4_generic_handler);
+ register_dynamic_payload_handler(&ff_h264_dynamic_handler);
+}
+
+int rtp_get_codec_info(AVCodecContext *codec, int payload_type)
+{
+ if (AVRtpPayloadTypes[payload_type].codec_id != CODEC_ID_NONE) {
+ codec->codec_type = AVRtpPayloadTypes[payload_type].codec_type;
+ codec->codec_id = AVRtpPayloadTypes[payload_type].codec_id;
+ if (AVRtpPayloadTypes[payload_type].audio_channels > 0)
+ codec->channels = AVRtpPayloadTypes[payload_type].audio_channels;
+ if (AVRtpPayloadTypes[payload_type].clock_rate > 0)
+ codec->sample_rate = AVRtpPayloadTypes[payload_type].clock_rate;
+ return 0;
+ }
+ return -1;
+}
+
+/* return < 0 if unknown payload type */
+int rtp_get_payload_type(AVCodecContext *codec)
+{
+ int i, payload_type;
+
+ /* compute the payload type */
+ for (payload_type = -1, i = 0; AVRtpPayloadTypes[i].pt >= 0; ++i)
+ if (AVRtpPayloadTypes[i].codec_id == codec->codec_id) {
+ if (codec->codec_id == CODEC_ID_PCM_S16BE)
+ if (codec->channels != AVRtpPayloadTypes[i].audio_channels)
+ continue;
+ payload_type = AVRtpPayloadTypes[i].pt;
+ }
+ return payload_type;
+}
+
+static inline uint32_t decode_be32(const uint8_t *p)
+{
+ return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
+}
+
+static inline uint64_t decode_be64(const uint8_t *p)
+{
+ return ((uint64_t)decode_be32(p) << 32) | decode_be32(p + 4);
+}
+
+static int rtcp_parse_packet(RTPDemuxContext *s, const unsigned char *buf, int len)
+{
+ if (buf[1] != 200)
+ return -1;
+ s->last_rtcp_ntp_time = decode_be64(buf + 8);
+ if (s->first_rtcp_ntp_time == AV_NOPTS_VALUE)
+ s->first_rtcp_ntp_time = s->last_rtcp_ntp_time;
+ s->last_rtcp_timestamp = decode_be32(buf + 16);
+ return 0;
+}
+
+#define RTP_SEQ_MOD (1<<16)
+
+/**
+* called on parse open packet
+*/
+static void rtp_init_statistics(RTPStatistics *s, uint16_t base_sequence) // called on parse open packet.
+{
+ memset(s, 0, sizeof(RTPStatistics));
+ s->max_seq= base_sequence;
+ s->probation= 1;
+}
+
+/**
+* called whenever there is a large jump in sequence numbers, or when they get out of probation...
+*/
+static void rtp_init_sequence(RTPStatistics *s, uint16_t seq)
+{
+ s->max_seq= seq;
+ s->cycles= 0;
+ s->base_seq= seq -1;
+ s->bad_seq= RTP_SEQ_MOD + 1;
+ s->received= 0;
+ s->expected_prior= 0;
+ s->received_prior= 0;
+ s->jitter= 0;
+ s->transit= 0;
+}
+
+/**
+* returns 1 if we should handle this packet.
+*/
+static int rtp_valid_packet_in_sequence(RTPStatistics *s, uint16_t seq)
+{
+ uint16_t udelta= seq - s->max_seq;
+ const int MAX_DROPOUT= 3000;
+ const int MAX_MISORDER = 100;
+ const int MIN_SEQUENTIAL = 2;
+
+ /* source not valid until MIN_SEQUENTIAL packets with sequence seq. numbers have been received */
+ if(s->probation)
+ {
+ if(seq==s->max_seq + 1) {
+ s->probation--;
+ s->max_seq= seq;
+ if(s->probation==0) {
+ rtp_init_sequence(s, seq);
+ s->received++;
+ return 1;
+ }
+ } else {
+ s->probation= MIN_SEQUENTIAL - 1;
+ s->max_seq = seq;
+ }
+ } else if (udelta < MAX_DROPOUT) {
+ // in order, with permissible gap
+ if(seq < s->max_seq) {
+ //sequence number wrapped; count antother 64k cycles
+ s->cycles += RTP_SEQ_MOD;
+ }
+ s->max_seq= seq;
+ } else if (udelta <= RTP_SEQ_MOD - MAX_MISORDER) {
+ // sequence made a large jump...
+ if(seq==s->bad_seq) {
+ // two sequential packets-- assume that the other side restarted without telling us; just resync.
+ rtp_init_sequence(s, seq);
+ } else {
+ s->bad_seq= (seq + 1) & (RTP_SEQ_MOD-1);
+ return 0;
+ }
+ } else {
+ // duplicate or reordered packet...
+ }
+ s->received++;
+ return 1;
+}
+
+#if 0
+/**
+* This function is currently unused; without a valid local ntp time, I don't see how we could calculate the
+* difference between the arrival and sent timestamp. As a result, the jitter and transit statistics values
+* never change. I left this in in case someone else can see a way. (rdm)
+*/
+static void rtcp_update_jitter(RTPStatistics *s, uint32_t sent_timestamp, uint32_t arrival_timestamp)
+{
+ uint32_t transit= arrival_timestamp - sent_timestamp;
+ int d;
+ s->transit= transit;
+ d= FFABS(transit - s->transit);
+ s->jitter += d - ((s->jitter + 8)>>4);
+}
+#endif
+
+/**
+ * some rtp servers assume client is dead if they don't hear from them...
+ * so we send a Receiver Report to the provided ByteIO context
+ * (we don't have access to the rtcp handle from here)
+ */
+int rtp_check_and_send_back_rr(RTPDemuxContext *s, int count)
+{
+ ByteIOContext pb;
+ uint8_t *buf;
+ int len;
+ int rtcp_bytes;
+ RTPStatistics *stats= &s->statistics;
+ uint32_t lost;
+ uint32_t extended_max;
+ uint32_t expected_interval;
+ uint32_t received_interval;
+ uint32_t lost_interval;
+ uint32_t expected;
+ uint32_t fraction;
+ uint64_t ntp_time= s->last_rtcp_ntp_time; // TODO: Get local ntp time?
+
+ if (!s->rtp_ctx || (count < 1))
+ return -1;
+
+ /* TODO: I think this is way too often; RFC 1889 has algorithm for this */
+ /* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
+ s->octet_count += count;
+ rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
+ RTCP_TX_RATIO_DEN;
+ rtcp_bytes /= 50; // mmu_man: that's enough for me... VLC sends much less btw !?
+ if (rtcp_bytes < 28)
+ return -1;
+ s->last_octet_count = s->octet_count;
+
+ if (url_open_dyn_buf(&pb) < 0)
+ return -1;
+
+ // Receiver Report
+ put_byte(&pb, (RTP_VERSION << 6) + 1); /* 1 report block */
+ put_byte(&pb, 201);
+ put_be16(&pb, 7); /* length in words - 1 */
+ put_be32(&pb, s->ssrc); // our own SSRC
+ put_be32(&pb, s->ssrc); // XXX: should be the server's here!
+ // some placeholders we should really fill...
+ // RFC 1889/p64
+ extended_max= stats->cycles + stats->max_seq;
+ expected= extended_max - stats->base_seq + 1;
+ lost= expected - stats->received;
+ lost= FFMIN(lost, 0xffffff); // clamp it since it's only 24 bits...
+ expected_interval= expected - stats->expected_prior;
+ stats->expected_prior= expected;
+ received_interval= stats->received - stats->received_prior;
+ stats->received_prior= stats->received;
+ lost_interval= expected_interval - received_interval;
+ if (expected_interval==0 || lost_interval<=0) fraction= 0;
+ else fraction = (lost_interval<<8)/expected_interval;
+
+ fraction= (fraction<<24) | lost;
+
+ put_be32(&pb, fraction); /* 8 bits of fraction, 24 bits of total packets lost */
+ put_be32(&pb, extended_max); /* max sequence received */
+ put_be32(&pb, stats->jitter>>4); /* jitter */
+
+ if(s->last_rtcp_ntp_time==AV_NOPTS_VALUE)
+ {
+ put_be32(&pb, 0); /* last SR timestamp */
+ put_be32(&pb, 0); /* delay since last SR */
+ } else {
+ uint32_t middle_32_bits= s->last_rtcp_ntp_time>>16; // this is valid, right? do we need to handle 64 bit values special?
+ uint32_t delay_since_last= ntp_time - s->last_rtcp_ntp_time;
+
+ put_be32(&pb, middle_32_bits); /* last SR timestamp */
+ put_be32(&pb, delay_since_last); /* delay since last SR */
+ }
+
+ // CNAME
+ put_byte(&pb, (RTP_VERSION << 6) + 1); /* 1 report block */
+ put_byte(&pb, 202);
+ len = strlen(s->hostname);
+ put_be16(&pb, (6 + len + 3) / 4); /* length in words - 1 */
+ put_be32(&pb, s->ssrc);
+ put_byte(&pb, 0x01);
+ put_byte(&pb, len);
+ put_buffer(&pb, s->hostname, len);
+ // padding
+ for (len = (6 + len) % 4; len % 4; len++) {
+ put_byte(&pb, 0);
+ }
+
+ put_flush_packet(&pb);
+ len = url_close_dyn_buf(&pb, &buf);
+ if ((len > 0) && buf) {
+ int result;
+#if defined(DEBUG)
+ printf("sending %d bytes of RR\n", len);
+#endif
+ result= url_write(s->rtp_ctx, buf, len);
+#if defined(DEBUG)
+ printf("result from url_write: %d\n", result);
+#endif
+ av_free(buf);
+ }
+ return 0;
+}
+
+/**
+ * open a new RTP parse context for stream 'st'. 'st' can be NULL for
+ * MPEG2TS streams to indicate that they should be demuxed inside the
+ * rtp demux (otherwise CODEC_ID_MPEG2TS packets are returned)
+ * TODO: change this to not take rtp_payload data, and use the new dynamic payload system.
+ */
+RTPDemuxContext *rtp_parse_open(AVFormatContext *s1, AVStream *st, URLContext *rtpc, int payload_type, rtp_payload_data_t *rtp_payload_data)
+{
+ RTPDemuxContext *s;
+
+ s = av_mallocz(sizeof(RTPDemuxContext));
+ if (!s)
+ return NULL;
+ s->payload_type = payload_type;
+ s->last_rtcp_ntp_time = AV_NOPTS_VALUE;
+ s->first_rtcp_ntp_time = AV_NOPTS_VALUE;
+ s->ic = s1;
+ s->st = st;
+ s->rtp_payload_data = rtp_payload_data;
+ rtp_init_statistics(&s->statistics, 0); // do we know the initial sequence from sdp?
+ if (!strcmp(AVRtpPayloadTypes[payload_type].enc_name, "MP2T")) {
+ s->ts = mpegts_parse_open(s->ic);
+ if (s->ts == NULL) {
+ av_free(s);
+ return NULL;
+ }
+ } else {
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MPEG1VIDEO:
+ case CODEC_ID_MPEG2VIDEO:
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ case CODEC_ID_MPEG4:
+ case CODEC_ID_H264:
+ st->need_parsing = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ // needed to send back RTCP RR in RTSP sessions
+ s->rtp_ctx = rtpc;
+ gethostname(s->hostname, sizeof(s->hostname));
+ return s;
+}
+
+static int rtp_parse_mp4_au(RTPDemuxContext *s, const uint8_t *buf)
+{
+ int au_headers_length, au_header_size, i;
+ GetBitContext getbitcontext;
+ rtp_payload_data_t *infos;
+
+ infos = s->rtp_payload_data;
+
+ if (infos == NULL)
+ return -1;
+
+ /* decode the first 2 bytes where are stored the AUHeader sections
+ length in bits */
+ au_headers_length = BE_16(buf);
+
+ if (au_headers_length > RTP_MAX_PACKET_LENGTH)
+ return -1;
+
+ infos->au_headers_length_bytes = (au_headers_length + 7) / 8;
+
+ /* skip AU headers length section (2 bytes) */
+ buf += 2;
+
+ init_get_bits(&getbitcontext, buf, infos->au_headers_length_bytes * 8);
+
+ /* XXX: Wrong if optionnal additional sections are present (cts, dts etc...) */
+ au_header_size = infos->sizelength + infos->indexlength;
+ if (au_header_size <= 0 || (au_headers_length % au_header_size != 0))
+ return -1;
+
+ infos->nb_au_headers = au_headers_length / au_header_size;
+ infos->au_headers = av_malloc(sizeof(struct AUHeaders) * infos->nb_au_headers);
+
+ /* XXX: We handle multiple AU Section as only one (need to fix this for interleaving)
+ In my test, the faad decoder doesnt behave correctly when sending each AU one by one
+ but does when sending the whole as one big packet... */
+ infos->au_headers[0].size = 0;
+ infos->au_headers[0].index = 0;
+ for (i = 0; i < infos->nb_au_headers; ++i) {
+ infos->au_headers[0].size += get_bits_long(&getbitcontext, infos->sizelength);
+ infos->au_headers[0].index = get_bits_long(&getbitcontext, infos->indexlength);
+ }
+
+ infos->nb_au_headers = 1;
+
+ return 0;
+}
+
+/**
+ * This was the second switch in rtp_parse packet. Normalizes time, if required, sets stream_index, etc.
+ */
+static void finalize_packet(RTPDemuxContext *s, AVPacket *pkt, uint32_t timestamp)
+{
+ switch(s->st->codec->codec_id) {
+ case CODEC_ID_MP2:
+ case CODEC_ID_MPEG1VIDEO:
+ if (s->last_rtcp_ntp_time != AV_NOPTS_VALUE) {
+ int64_t addend;
+
+ int delta_timestamp;
+ /* XXX: is it really necessary to unify the timestamp base ? */
+ /* compute pts from timestamp with received ntp_time */
+ delta_timestamp = timestamp - s->last_rtcp_timestamp;
+ /* convert to 90 kHz without overflow */
+ addend = (s->last_rtcp_ntp_time - s->first_rtcp_ntp_time) >> 14;
+ addend = (addend * 5625) >> 14;
+ pkt->pts = addend + delta_timestamp;
+ }
+ break;
+ case CODEC_ID_AAC:
+ case CODEC_ID_H264:
+ case CODEC_ID_MPEG4:
+ pkt->pts = timestamp;
+ break;
+ default:
+ /* no timestamp info yet */
+ break;
+ }
+ pkt->stream_index = s->st->index;
+}
+
+/**
+ * Parse an RTP or RTCP packet directly sent as a buffer.
+ * @param s RTP parse context.
+ * @param pkt returned packet
+ * @param buf input buffer or NULL to read the next packets
+ * @param len buffer len
+ * @return 0 if a packet is returned, 1 if a packet is returned and more can follow
+ * (use buf as NULL to read the next). -1 if no packet (error or no more packet).
+ */
+int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
+ const uint8_t *buf, int len)
+{
+ unsigned int ssrc, h;
+ int payload_type, seq, ret;
+ AVStream *st;
+ uint32_t timestamp;
+ int rv= 0;
+
+ if (!buf) {
+ /* return the next packets, if any */
+ if(s->st && s->parse_packet) {
+ timestamp= 0; ///< Should not be used if buf is NULL, but should be set to the timestamp of the packet returned....
+ rv= s->parse_packet(s, pkt, &timestamp, NULL, 0);
+ finalize_packet(s, pkt, timestamp);
+ return rv;
+ } else {
+ // TODO: Move to a dynamic packet handler (like above)
+ if (s->read_buf_index >= s->read_buf_size)
+ return -1;
+ ret = mpegts_parse_packet(s->ts, pkt, s->buf + s->read_buf_index,
+ s->read_buf_size - s->read_buf_index);
+ if (ret < 0)
+ return -1;
+ s->read_buf_index += ret;
+ if (s->read_buf_index < s->read_buf_size)
+ return 1;
+ else
+ return 0;
+ }
+ }
+
+ if (len < 12)
+ return -1;
+
+ if ((buf[0] & 0xc0) != (RTP_VERSION << 6))
+ return -1;
+ if (buf[1] >= 200 && buf[1] <= 204) {
+ rtcp_parse_packet(s, buf, len);
+ return -1;
+ }
+ payload_type = buf[1] & 0x7f;
+ seq = (buf[2] << 8) | buf[3];
+ timestamp = decode_be32(buf + 4);
+ ssrc = decode_be32(buf + 8);
+ /* store the ssrc in the RTPDemuxContext */
+ s->ssrc = ssrc;
+
+ /* NOTE: we can handle only one payload type */
+ if (s->payload_type != payload_type)
+ return -1;
+
+ st = s->st;
+ // only do something with this if all the rtp checks pass...
+ if(!rtp_valid_packet_in_sequence(&s->statistics, seq))
+ {
+ av_log(st?st->codec:NULL, AV_LOG_ERROR, "RTP: PT=%02x: bad cseq %04x expected=%04x\n",
+ payload_type, seq, ((s->seq + 1) & 0xffff));
+ return -1;
+ }
+
+ s->seq = seq;
+ len -= 12;
+ buf += 12;
+
+ if (!st) {
+ /* specific MPEG2TS demux support */
+ ret = mpegts_parse_packet(s->ts, pkt, buf, len);
+ if (ret < 0)
+ return -1;
+ if (ret < len) {
+ s->read_buf_size = len - ret;
+ memcpy(s->buf, buf + ret, s->read_buf_size);
+ s->read_buf_index = 0;
+ return 1;
+ }
+ } else {
+ // at this point, the RTP header has been stripped; This is ASSUMING that there is only 1 CSRC, which in't wise.
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MP2:
+ /* better than nothing: skip mpeg audio RTP header */
+ if (len <= 4)
+ return -1;
+ h = decode_be32(buf);
+ len -= 4;
+ buf += 4;
+ av_new_packet(pkt, len);
+ memcpy(pkt->data, buf, len);
+ break;
+ case CODEC_ID_MPEG1VIDEO:
+ /* better than nothing: skip mpeg video RTP header */
+ if (len <= 4)
+ return -1;
+ h = decode_be32(buf);
+ buf += 4;
+ len -= 4;
+ if (h & (1 << 26)) {
+ /* mpeg2 */
+ if (len <= 4)
+ return -1;
+ buf += 4;
+ len -= 4;
+ }
+ av_new_packet(pkt, len);
+ memcpy(pkt->data, buf, len);
+ break;
+ // moved from below, verbatim. this is because this section handles packets, and the lower switch handles
+ // timestamps.
+ // TODO: Put this into a dynamic packet handler...
+ case CODEC_ID_AAC:
+ if (rtp_parse_mp4_au(s, buf))
+ return -1;
+ {
+ rtp_payload_data_t *infos = s->rtp_payload_data;
+ if (infos == NULL)
+ return -1;
+ buf += infos->au_headers_length_bytes + 2;
+ len -= infos->au_headers_length_bytes + 2;
+
+ /* XXX: Fixme we only handle the case where rtp_parse_mp4_au define
+ one au_header */
+ av_new_packet(pkt, infos->au_headers[0].size);
+ memcpy(pkt->data, buf, infos->au_headers[0].size);
+ buf += infos->au_headers[0].size;
+ len -= infos->au_headers[0].size;
+ }
+ s->read_buf_size = len;
+ s->buf_ptr = buf;
+ rv= 0;
+ break;
+ default:
+ if(s->parse_packet) {
+ rv= s->parse_packet(s, pkt, &timestamp, buf, len);
+ } else {
+ av_new_packet(pkt, len);
+ memcpy(pkt->data, buf, len);
+ }
+ break;
+ }
+
+ // now perform timestamp things....
+ finalize_packet(s, pkt, timestamp);
+ }
+ return rv;
+}
+
+void rtp_parse_close(RTPDemuxContext *s)
+{
+ // TODO: fold this into the protocol specific data fields.
+ if (!strcmp(AVRtpPayloadTypes[s->payload_type].enc_name, "MP2T")) {
+ mpegts_parse_close(s->ts);
+ }
+ av_free(s);
+}
+
+/* rtp output */
+
+static int rtp_write_header(AVFormatContext *s1)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ int payload_type, max_packet_size, n;
+ AVStream *st;
+
+ if (s1->nb_streams != 1)
+ return -1;
+ st = s1->streams[0];
+
+ payload_type = rtp_get_payload_type(st->codec);
+ if (payload_type < 0)
+ payload_type = RTP_PT_PRIVATE; /* private payload type */
+ s->payload_type = payload_type;
+
+// following 2 FIXMies could be set based on the current time, theres normaly no info leak, as rtp will likely be transmitted immedeatly
+ s->base_timestamp = 0; /* FIXME: was random(), what should this be? */
+ s->timestamp = s->base_timestamp;
+ s->ssrc = 0; /* FIXME: was random(), what should this be? */
+ s->first_packet = 1;
+
+ max_packet_size = url_fget_max_packet_size(&s1->pb);
+ if (max_packet_size <= 12)
+ return AVERROR_IO;
+ s->max_payload_size = max_packet_size - 12;
+
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ s->buf_ptr = s->buf + 4;
+ s->cur_timestamp = 0;
+ break;
+ case CODEC_ID_MPEG1VIDEO:
+ s->cur_timestamp = 0;
+ break;
+ case CODEC_ID_MPEG2TS:
+ n = s->max_payload_size / TS_PACKET_SIZE;
+ if (n < 1)
+ n = 1;
+ s->max_payload_size = n * TS_PACKET_SIZE;
+ s->buf_ptr = s->buf;
+ break;
+ default:
+ s->buf_ptr = s->buf;
+ break;
+ }
+
+ return 0;
+}
+
+/* send an rtcp sender report packet */
+static void rtcp_send_sr(AVFormatContext *s1, int64_t ntp_time)
+{
+ RTPDemuxContext *s = s1->priv_data;
+#if defined(DEBUG)
+ printf("RTCP: %02x %"PRIx64" %x\n", s->payload_type, ntp_time, s->timestamp);
+#endif
+ put_byte(&s1->pb, (RTP_VERSION << 6));
+ put_byte(&s1->pb, 200);
+ put_be16(&s1->pb, 6); /* length in words - 1 */
+ put_be32(&s1->pb, s->ssrc);
+ put_be64(&s1->pb, ntp_time);
+ put_be32(&s1->pb, s->timestamp);
+ put_be32(&s1->pb, s->packet_count);
+ put_be32(&s1->pb, s->octet_count);
+ put_flush_packet(&s1->pb);
+}
+
+/* send an rtp packet. sequence number is incremented, but the caller
+ must update the timestamp itself */
+static void rtp_send_data(AVFormatContext *s1, const uint8_t *buf1, int len, int m)
+{
+ RTPDemuxContext *s = s1->priv_data;
+
+#ifdef DEBUG
+ printf("rtp_send_data size=%d\n", len);
+#endif
+
+ /* build the RTP header */
+ put_byte(&s1->pb, (RTP_VERSION << 6));
+ put_byte(&s1->pb, (s->payload_type & 0x7f) | ((m & 0x01) << 7));
+ put_be16(&s1->pb, s->seq);
+ put_be32(&s1->pb, s->timestamp);
+ put_be32(&s1->pb, s->ssrc);
+
+ put_buffer(&s1->pb, buf1, len);
+ put_flush_packet(&s1->pb);
+
+ s->seq++;
+ s->octet_count += len;
+ s->packet_count++;
+}
+
+/* send an integer number of samples and compute time stamp and fill
+ the rtp send buffer before sending. */
+static void rtp_send_samples(AVFormatContext *s1,
+ const uint8_t *buf1, int size, int sample_size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ int len, max_packet_size, n;
+
+ max_packet_size = (s->max_payload_size / sample_size) * sample_size;
+ /* not needed, but who nows */
+ if ((size % sample_size) != 0)
+ av_abort();
+ while (size > 0) {
+ len = (max_packet_size - (s->buf_ptr - s->buf));
+ if (len > size)
+ len = size;
+
+ /* copy data */
+ memcpy(s->buf_ptr, buf1, len);
+ s->buf_ptr += len;
+ buf1 += len;
+ size -= len;
+ n = (s->buf_ptr - s->buf);
+ /* if buffer full, then send it */
+ if (n >= max_packet_size) {
+ rtp_send_data(s1, s->buf, n, 0);
+ s->buf_ptr = s->buf;
+ /* update timestamp */
+ s->timestamp += n / sample_size;
+ }
+ }
+}
+
+/* NOTE: we suppose that exactly one frame is given as argument here */
+/* XXX: test it */
+static void rtp_send_mpegaudio(AVFormatContext *s1,
+ const uint8_t *buf1, int size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ AVStream *st = s1->streams[0];
+ int len, count, max_packet_size;
+
+ max_packet_size = s->max_payload_size;
+
+ /* test if we must flush because not enough space */
+ len = (s->buf_ptr - s->buf);
+ if ((len + size) > max_packet_size) {
+ if (len > 4) {
+ rtp_send_data(s1, s->buf, s->buf_ptr - s->buf, 0);
+ s->buf_ptr = s->buf + 4;
+ /* 90 KHz time stamp */
+ s->timestamp = s->base_timestamp +
+ (s->cur_timestamp * 90000LL) / st->codec->sample_rate;
+ }
+ }
+
+ /* add the packet */
+ if (size > max_packet_size) {
+ /* big packet: fragment */
+ count = 0;
+ while (size > 0) {
+ len = max_packet_size - 4;
+ if (len > size)
+ len = size;
+ /* build fragmented packet */
+ s->buf[0] = 0;
+ s->buf[1] = 0;
+ s->buf[2] = count >> 8;
+ s->buf[3] = count;
+ memcpy(s->buf + 4, buf1, len);
+ rtp_send_data(s1, s->buf, len + 4, 0);
+ size -= len;
+ buf1 += len;
+ count += len;
+ }
+ } else {
+ if (s->buf_ptr == s->buf + 4) {
+ /* no fragmentation possible */
+ s->buf[0] = 0;
+ s->buf[1] = 0;
+ s->buf[2] = 0;
+ s->buf[3] = 0;
+ }
+ memcpy(s->buf_ptr, buf1, size);
+ s->buf_ptr += size;
+ }
+ s->cur_timestamp += st->codec->frame_size;
+}
+
+/* NOTE: a single frame must be passed with sequence header if
+ needed. XXX: use slices. */
+static void rtp_send_mpegvideo(AVFormatContext *s1,
+ const uint8_t *buf1, int size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ AVStream *st = s1->streams[0];
+ int len, h, max_packet_size;
+ uint8_t *q;
+
+ max_packet_size = s->max_payload_size;
+
+ while (size > 0) {
+ /* XXX: more correct headers */
+ h = 0;
+ if (st->codec->sub_id == 2)
+ h |= 1 << 26; /* mpeg 2 indicator */
+ q = s->buf;
+ *q++ = h >> 24;
+ *q++ = h >> 16;
+ *q++ = h >> 8;
+ *q++ = h;
+
+ if (st->codec->sub_id == 2) {
+ h = 0;
+ *q++ = h >> 24;
+ *q++ = h >> 16;
+ *q++ = h >> 8;
+ *q++ = h;
+ }
+
+ len = max_packet_size - (q - s->buf);
+ if (len > size)
+ len = size;
+
+ memcpy(q, buf1, len);
+ q += len;
+
+ /* 90 KHz time stamp */
+ s->timestamp = s->base_timestamp +
+ av_rescale((int64_t)s->cur_timestamp * st->codec->time_base.num, 90000, st->codec->time_base.den); //FIXME pass timestamps
+ rtp_send_data(s1, s->buf, q - s->buf, (len == size));
+
+ buf1 += len;
+ size -= len;
+ }
+ s->cur_timestamp++;
+}
+
+static void rtp_send_raw(AVFormatContext *s1,
+ const uint8_t *buf1, int size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ AVStream *st = s1->streams[0];
+ int len, max_packet_size;
+
+ max_packet_size = s->max_payload_size;
+
+ while (size > 0) {
+ len = max_packet_size;
+ if (len > size)
+ len = size;
+
+ /* 90 KHz time stamp */
+ s->timestamp = s->base_timestamp +
+ av_rescale((int64_t)s->cur_timestamp * st->codec->time_base.num, 90000, st->codec->time_base.den); //FIXME pass timestamps
+ rtp_send_data(s1, buf1, len, (len == size));
+
+ buf1 += len;
+ size -= len;
+ }
+ s->cur_timestamp++;
+}
+
+/* NOTE: size is assumed to be an integer multiple of TS_PACKET_SIZE */
+static void rtp_send_mpegts_raw(AVFormatContext *s1,
+ const uint8_t *buf1, int size)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ int len, out_len;
+
+ while (size >= TS_PACKET_SIZE) {
+ len = s->max_payload_size - (s->buf_ptr - s->buf);
+ if (len > size)
+ len = size;
+ memcpy(s->buf_ptr, buf1, len);
+ buf1 += len;
+ size -= len;
+ s->buf_ptr += len;
+
+ out_len = s->buf_ptr - s->buf;
+ if (out_len >= s->max_payload_size) {
+ rtp_send_data(s1, s->buf, out_len, 0);
+ s->buf_ptr = s->buf;
+ }
+ }
+}
+
+/* write an RTP packet. 'buf1' must contain a single specific frame. */
+static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ RTPDemuxContext *s = s1->priv_data;
+ AVStream *st = s1->streams[0];
+ int rtcp_bytes;
+ int64_t ntp_time;
+ int size= pkt->size;
+ uint8_t *buf1= pkt->data;
+
+#ifdef DEBUG
+ printf("%d: write len=%d\n", pkt->stream_index, size);
+#endif
+
+ /* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
+ rtcp_bytes = ((s->octet_count - s->last_octet_count) * RTCP_TX_RATIO_NUM) /
+ RTCP_TX_RATIO_DEN;
+ if (s->first_packet || rtcp_bytes >= 28) {
+ /* compute NTP time */
+ /* XXX: 90 kHz timestamp hardcoded */
+ ntp_time = (pkt->pts << 28) / 5625;
+ rtcp_send_sr(s1, ntp_time);
+ s->last_octet_count = s->octet_count;
+ s->first_packet = 0;
+ }
+
+ switch(st->codec->codec_id) {
+ case CODEC_ID_PCM_MULAW:
+ case CODEC_ID_PCM_ALAW:
+ case CODEC_ID_PCM_U8:
+ case CODEC_ID_PCM_S8:
+ rtp_send_samples(s1, buf1, size, 1 * st->codec->channels);
+ break;
+ case CODEC_ID_PCM_U16BE:
+ case CODEC_ID_PCM_U16LE:
+ case CODEC_ID_PCM_S16BE:
+ case CODEC_ID_PCM_S16LE:
+ rtp_send_samples(s1, buf1, size, 2 * st->codec->channels);
+ break;
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ rtp_send_mpegaudio(s1, buf1, size);
+ break;
+ case CODEC_ID_MPEG1VIDEO:
+ rtp_send_mpegvideo(s1, buf1, size);
+ break;
+ case CODEC_ID_MPEG2TS:
+ rtp_send_mpegts_raw(s1, buf1, size);
+ break;
+ default:
+ /* better than nothing : send the codec raw data */
+ rtp_send_raw(s1, buf1, size);
+ break;
+ }
+ return 0;
+}
+
+static int rtp_write_trailer(AVFormatContext *s1)
+{
+ // RTPDemuxContext *s = s1->priv_data;
+ return 0;
+}
+
+AVOutputFormat rtp_muxer = {
+ "rtp",
+ "RTP output format",
+ NULL,
+ NULL,
+ sizeof(RTPDemuxContext),
+ CODEC_ID_PCM_MULAW,
+ CODEC_ID_NONE,
+ rtp_write_header,
+ rtp_write_packet,
+ rtp_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/rtp.h b/contrib/ffmpeg/libavformat/rtp.h
new file mode 100644
index 000000000..60ccc50ee
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp.h
@@ -0,0 +1,118 @@
+/*
+ * RTP definitions
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef RTP_H
+#define RTP_H
+
+#define RTP_MIN_PACKET_LENGTH 12
+#define RTP_MAX_PACKET_LENGTH 1500 /* XXX: suppress this define */
+
+int rtp_init(void);
+int rtp_get_codec_info(AVCodecContext *codec, int payload_type);
+int rtp_get_payload_type(AVCodecContext *codec);
+
+typedef struct RTPDemuxContext RTPDemuxContext;
+typedef struct rtp_payload_data_s rtp_payload_data_s;
+RTPDemuxContext *rtp_parse_open(AVFormatContext *s1, AVStream *st, URLContext *rtpc, int payload_type, rtp_payload_data_s *rtp_payload_data);
+int rtp_parse_packet(RTPDemuxContext *s, AVPacket *pkt,
+ const uint8_t *buf, int len);
+void rtp_parse_close(RTPDemuxContext *s);
+
+extern AVOutputFormat rtp_muxer;
+extern AVInputFormat rtp_demuxer;
+
+int rtp_get_local_port(URLContext *h);
+int rtp_set_remote_url(URLContext *h, const char *uri);
+void rtp_get_file_handles(URLContext *h, int *prtp_fd, int *prtcp_fd);
+
+extern URLProtocol rtp_protocol;
+
+#define RTP_PT_PRIVATE 96
+#define RTP_VERSION 2
+#define RTP_MAX_SDES 256 /* maximum text length for SDES */
+
+/* RTCP paquets use 0.5 % of the bandwidth */
+#define RTCP_TX_RATIO_NUM 5
+#define RTCP_TX_RATIO_DEN 1000
+
+/* Structure listing usefull vars to parse RTP packet payload*/
+typedef struct rtp_payload_data_s
+{
+ int sizelength;
+ int indexlength;
+ int indexdeltalength;
+ int profile_level_id;
+ int streamtype;
+ int objecttype;
+ char *mode;
+
+ /* mpeg 4 AU headers */
+ struct AUHeaders {
+ int size;
+ int index;
+ int cts_flag;
+ int cts;
+ int dts_flag;
+ int dts;
+ int rap_flag;
+ int streamstate;
+ } *au_headers;
+ int nb_au_headers;
+ int au_headers_length_bytes;
+ int cur_au_index;
+} rtp_payload_data_t;
+
+typedef struct AVRtpPayloadType_s
+{
+ int pt;
+ const char enc_name[50]; /* XXX: why 50 ? */
+ enum CodecType codec_type;
+ enum CodecID codec_id;
+ int clock_rate;
+ int audio_channels;
+} AVRtpPayloadType_t;
+
+#if 0
+typedef enum {
+ RTCP_SR = 200,
+ RTCP_RR = 201,
+ RTCP_SDES = 202,
+ RTCP_BYE = 203,
+ RTCP_APP = 204
+} rtcp_type_t;
+
+typedef enum {
+ RTCP_SDES_END = 0,
+ RTCP_SDES_CNAME = 1,
+ RTCP_SDES_NAME = 2,
+ RTCP_SDES_EMAIL = 3,
+ RTCP_SDES_PHONE = 4,
+ RTCP_SDES_LOC = 5,
+ RTCP_SDES_TOOL = 6,
+ RTCP_SDES_NOTE = 7,
+ RTCP_SDES_PRIV = 8,
+ RTCP_SDES_IMG = 9,
+ RTCP_SDES_DOOR = 10,
+ RTCP_SDES_SOURCE = 11
+} rtcp_sdes_type_t;
+#endif
+
+extern AVRtpPayloadType_t AVRtpPayloadTypes[];
+#endif /* RTP_H */
diff --git a/contrib/ffmpeg/libavformat/rtp_h264.c b/contrib/ffmpeg/libavformat/rtp_h264.c
new file mode 100644
index 000000000..2568e9ea5
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp_h264.c
@@ -0,0 +1,419 @@
+/*
+ * RTP H264 Protocol (RFC3984)
+ * Copyright (c) 2006 Ryan Martell.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+* @file rtp_h264.c
+ * @brief H.264 / RTP Code (RFC3984)
+ * @author Ryan Martell <rdm4@martellventures.com>
+ *
+ * @note Notes:
+ * Notes:
+ * This currently supports packetization mode:
+ * Single Nal Unit Mode (0), or
+ * Non-Interleaved Mode (1). It currently does not support
+ * Interleaved Mode (2). (This requires implementing STAP-B, MTAP16, MTAP24, FU-B packet types)
+ *
+ * @note TODO:
+ * 1) RTCP sender reports for udp streams are required..
+ *
+ */
+
+#include "avformat.h"
+#include "mpegts.h"
+#include "bitstream.h"
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <assert.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+
+#include "rtp_internal.h"
+#include "rtp_h264.h"
+#include "base64.h"
+
+/**
+ RTP/H264 specific private data.
+*/
+typedef struct h264_rtp_extra_data {
+ unsigned long cookie; ///< sanity check, to make sure we get the pointer we're expecting.
+
+ //sdp setup parameters
+ uint8_t profile_idc; ///< from the sdp setup parameters.
+ uint8_t profile_iop; ///< from the sdp setup parameters.
+ uint8_t level_idc; ///< from the sdp setup parameters.
+ int packetization_mode; ///< from the sdp setup parameters.
+#ifdef DEBUG
+ int packet_types_received[32];
+#endif
+} h264_rtp_extra_data;
+
+#define MAGIC_COOKIE (0xdeadbeef) ///< Cookie for the extradata; to verify we are what we think we are, and that we haven't been freed.
+#define DEAD_COOKIE (0xdeaddead) ///< Cookie for the extradata; once it is freed.
+
+/* ---------------- private code */
+static void sdp_parse_fmtp_config_h264(AVStream * stream,
+ h264_rtp_extra_data * h264_data,
+ char *attr, char *value)
+{
+ AVCodecContext *codec = stream->codec;
+ assert(codec->codec_id == CODEC_ID_H264);
+ assert(h264_data != NULL);
+
+ if (!strcmp(attr, "packetization-mode")) {
+ av_log(NULL, AV_LOG_DEBUG, "H.264/RTP Packetization Mode: %d\n", atoi(attr));
+ h264_data->packetization_mode = atoi(attr);
+ /*
+ Packetization Mode:
+ 0 or not present: Single NAL mode (Only nals from 1-23 are allowed)
+ 1: Non-interleaved Mode: 1-23, 24 (STAP-A), 28 (FU-A) are allowed.
+ 2: Interleaved Mode: 25 (STAP-B), 26 (MTAP16), 27 (MTAP24), 28 (FU-A), and 29 (FU-B) are allowed.
+ */
+ if (h264_data->packetization_mode > 1)
+ av_log(stream, AV_LOG_ERROR,
+ "H.264/RTP Interleaved RTP mode is not supported yet.");
+ } else if (!strcmp(attr, "profile-level-id")) {
+ if (strlen(value) == 6) {
+ char buffer[3];
+ // 6 characters=3 bytes, in hex.
+ uint8_t profile_idc;
+ uint8_t profile_iop;
+ uint8_t level_idc;
+
+ buffer[0] = value[0]; buffer[1] = value[1]; buffer[2] = '\0';
+ profile_idc = strtol(buffer, NULL, 16);
+ buffer[0] = value[2]; buffer[1] = value[3];
+ profile_iop = strtol(buffer, NULL, 16);
+ buffer[0] = value[4]; buffer[1] = value[5];
+ level_idc = strtol(buffer, NULL, 16);
+
+ // set the parameters...
+ av_log(NULL, AV_LOG_DEBUG,
+ "H.264/RTP Profile IDC: %x Profile IOP: %x Level: %x\n",
+ profile_idc, profile_iop, level_idc);
+ h264_data->profile_idc = profile_idc;
+ h264_data->profile_iop = profile_iop;
+ h264_data->level_idc = level_idc;
+ }
+ } else if (!strcmp(attr, "sprop-parameter-sets")) {
+ uint8_t start_sequence[]= { 0, 0, 1 };
+ codec->extradata_size= 0;
+ codec->extradata= NULL;
+
+ while (*value) {
+ char base64packet[1024];
+ uint8_t decoded_packet[1024];
+ uint32_t packet_size;
+ char *dst = base64packet;
+
+ while (*value && *value != ','
+ && (dst - base64packet) < sizeof(base64packet) - 1) {
+ *dst++ = *value++;
+ }
+ *dst++ = '\0';
+
+ if (*value == ',')
+ value++;
+
+ packet_size= av_base64_decode(decoded_packet, base64packet, sizeof(decoded_packet));
+ if (packet_size) {
+ uint8_t *dest= av_malloc(packet_size+sizeof(start_sequence)+codec->extradata_size);
+ if(dest)
+ {
+ if(codec->extradata_size)
+ {
+ // av_realloc?
+ memcpy(dest, codec->extradata, codec->extradata_size);
+ av_free(codec->extradata);
+ }
+
+ memcpy(dest+codec->extradata_size, start_sequence, sizeof(start_sequence));
+ memcpy(dest+codec->extradata_size+sizeof(start_sequence), decoded_packet, packet_size);
+
+ codec->extradata= dest;
+ codec->extradata_size+= sizeof(start_sequence)+packet_size;
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "H.264/RTP Unable to allocate memory for extradata!");
+ }
+ }
+ }
+ av_log(NULL, AV_LOG_DEBUG, "H.264/RTP Extradata set to %p (size: %d)!", codec->extradata, codec->extradata_size);
+ }
+}
+
+// return 0 on packet, no more left, 1 on packet, 1 on partial packet...
+static int h264_handle_packet(RTPDemuxContext * s,
+ AVPacket * pkt,
+ uint32_t * timestamp,
+ const uint8_t * buf,
+ int len)
+{
+// h264_rtp_extra_data *data = s->dynamic_protocol_context;
+ uint8_t nal = buf[0];
+ uint8_t type = (nal & 0x1f);
+ int result= 0;
+ uint8_t start_sequence[]= {0, 0, 1};
+
+ assert(data);
+ assert(data->cookie == MAGIC_COOKIE);
+ assert(buf);
+
+ if (type >= 1 && type <= 23)
+ type = 1; // simplify the case. (these are all the nal types used internally by the h264 codec)
+ switch (type) {
+ case 0: // undefined;
+ result= -1;
+ break;
+
+ case 1:
+ av_new_packet(pkt, len+sizeof(start_sequence));
+ memcpy(pkt->data, start_sequence, sizeof(start_sequence));
+ memcpy(pkt->data+sizeof(start_sequence), buf, len);
+#ifdef DEBUG
+ data->packet_types_received[nal & 0x1f]++;
+#endif
+ break;
+
+ case 24: // STAP-A (one packet, multiple nals)
+ // consume the STAP-A NAL
+ buf++;
+ len--;
+ // first we are going to figure out the total size....
+ {
+ int pass= 0;
+ int total_length= 0;
+ uint8_t *dst= NULL;
+
+ for(pass= 0; pass<2; pass++) {
+ const uint8_t *src= buf;
+ int src_len= len;
+
+ do {
+ uint16_t nal_size = BE_16(src); // this going to be a problem if unaligned (can it be?)
+
+ // consume the length of the aggregate...
+ src += 2;
+ src_len -= 2;
+
+ if (nal_size <= src_len) {
+ if(pass==0) {
+ // counting...
+ total_length+= sizeof(start_sequence)+nal_size;
+ } else {
+ // copying
+ assert(dst);
+ memcpy(dst, start_sequence, sizeof(start_sequence));
+ dst+= sizeof(start_sequence);
+ memcpy(dst, src, nal_size);
+#ifdef DEBUG
+ data->packet_types_received[*src & 0x1f]++;
+#endif
+ dst+= nal_size;
+ }
+ } else {
+ av_log(NULL, AV_LOG_ERROR,
+ "nal size exceeds length: %d %d\n", nal_size, src_len);
+ }
+
+ // eat what we handled...
+ src += nal_size;
+ src_len -= nal_size;
+
+ if (src_len < 0)
+ av_log(NULL, AV_LOG_ERROR,
+ "Consumed more bytes than we got! (%d)\n", src_len);
+ } while (src_len > 2); // because there could be rtp padding..
+
+ if(pass==0) {
+ // now we know the total size of the packet (with the start sequences added)
+ av_new_packet(pkt, total_length);
+ dst= pkt->data;
+ } else {
+ assert(dst-pkt->data==total_length);
+ }
+ }
+ }
+ break;
+
+ case 25: // STAP-B
+ case 26: // MTAP-16
+ case 27: // MTAP-24
+ case 29: // FU-B
+ av_log(NULL, AV_LOG_ERROR,
+ "Unhandled type (%d) (See RFC for implementation details\n",
+ type);
+ result= -1;
+ break;
+
+ case 28: // FU-A (fragmented nal)
+ buf++;
+ len--; // skip the fu_indicator
+ {
+ // these are the same as above, we just redo them here for clarity...
+ uint8_t fu_indicator = nal;
+ uint8_t fu_header = *buf; // read the fu_header.
+ uint8_t start_bit = (fu_header & 0x80) >> 7;
+// uint8_t end_bit = (fu_header & 0x40) >> 6;
+ uint8_t nal_type = (fu_header & 0x1f);
+ uint8_t reconstructed_nal;
+
+ // reconstruct this packet's true nal; only the data follows..
+ reconstructed_nal = fu_indicator & (0xe0); // the original nal forbidden bit and NRI are stored in this packet's nal;
+ reconstructed_nal |= (nal_type & 0x1f);
+
+ // skip the fu_header...
+ buf++;
+ len--;
+
+#ifdef DEBUG
+ if (start_bit)
+ data->packet_types_received[nal_type & 0x1f]++;
+#endif
+ if(start_bit) {
+ // copy in the start sequence, and the reconstructed nal....
+ av_new_packet(pkt, sizeof(start_sequence)+sizeof(nal)+len);
+ memcpy(pkt->data, start_sequence, sizeof(start_sequence));
+ pkt->data[sizeof(start_sequence)]= reconstructed_nal;
+ memcpy(pkt->data+sizeof(start_sequence)+sizeof(nal), buf, len);
+ } else {
+ av_new_packet(pkt, len);
+ memcpy(pkt->data, buf, len);
+ }
+ }
+ break;
+
+ case 30: // undefined
+ case 31: // undefined
+ default:
+ av_log(NULL, AV_LOG_ERROR, "Undefined type (%d)", type);
+ result= -1;
+ break;
+ }
+
+ return result;
+}
+
+/* ---------------- public code */
+static void *h264_new_extradata()
+{
+ h264_rtp_extra_data *data =
+ av_mallocz(sizeof(h264_rtp_extra_data) +
+ FF_INPUT_BUFFER_PADDING_SIZE);
+
+ if (data) {
+ data->cookie = MAGIC_COOKIE;
+ }
+
+ return data;
+}
+
+static void h264_free_extradata(void *d)
+{
+ h264_rtp_extra_data *data = (h264_rtp_extra_data *) d;
+#ifdef DEBUG
+ int ii;
+
+ for (ii = 0; ii < 32; ii++) {
+ if (data->packet_types_received[ii])
+ av_log(NULL, AV_LOG_DEBUG, "Received %d packets of type %d\n",
+ data->packet_types_received[ii], ii);
+ }
+#endif
+
+ assert(data);
+ assert(data->cookie == MAGIC_COOKIE);
+
+ // avoid stale pointers (assert)
+ data->cookie = DEAD_COOKIE;
+
+ // and clear out this...
+ av_free(data);
+}
+
+static int parse_h264_sdp_line(AVStream * stream, void *data,
+ const char *line)
+{
+ AVCodecContext *codec = stream->codec;
+ h264_rtp_extra_data *h264_data = (h264_rtp_extra_data *) data;
+ const char *p = line;
+
+ assert(h264_data->cookie == MAGIC_COOKIE);
+
+ if (strstart(p, "framesize:", &p)) {
+ char buf1[50];
+ char *dst = buf1;
+
+ // remove the protocol identifier..
+ while (*p && *p == ' ') p++; // strip spaces.
+ while (*p && *p != ' ') p++; // eat protocol identifier
+ while (*p && *p == ' ') p++; // strip trailing spaces.
+ while (*p && *p != '-' && (buf1 - dst) < sizeof(buf1) - 1) {
+ *dst++ = *p++;
+ }
+ *dst = '\0';
+
+ // a='framesize:96 320-240'
+ // set our parameters..
+ codec->width = atoi(buf1);
+ codec->height = atoi(p + 1); // skip the -
+ codec->pix_fmt = PIX_FMT_YUV420P;
+ } else if (strstart(p, "fmtp:", &p)) {
+ char attr[256];
+ char value[4096];
+
+ // remove the protocol identifier..
+ while (*p && *p == ' ') p++; // strip spaces.
+ while (*p && *p != ' ') p++; // eat protocol identifier
+ while (*p && *p == ' ') p++; // strip trailing spaces.
+
+ /* loop on each attribute */
+ while (rtsp_next_attr_and_value
+ (&p, attr, sizeof(attr), value, sizeof(value))) {
+ /* grab the codec extra_data from the config parameter of the fmtp line */
+ sdp_parse_fmtp_config_h264(stream, h264_data, attr, value);
+ }
+ } else if (strstart(p, "cliprect:", &p)) {
+ // could use this if we wanted.
+ }
+
+ av_set_pts_info(stream, 33, 1, 90000); // 33 should be right, because the pts is 64 bit? (done elsewhere; this is a one time thing)
+
+ return 0; // keep processing it the normal way...
+}
+
+/**
+This is the structure for expanding on the dynamic rtp protocols (makes everything static. yay!)
+*/
+RTPDynamicProtocolHandler ff_h264_dynamic_handler = {
+ "H264",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_H264,
+ parse_h264_sdp_line,
+ h264_new_extradata,
+ h264_free_extradata,
+ h264_handle_packet
+};
diff --git a/contrib/ffmpeg/libavformat/rtp_h264.h b/contrib/ffmpeg/libavformat/rtp_h264.h
new file mode 100644
index 000000000..19508574d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp_h264.h
@@ -0,0 +1,26 @@
+/*
+ * RTP H264 Protocol (RFC3984)
+ * Copyright (c) 2006 Ryan Martell.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef RTP_H264_H
+#define RTP_H264_H
+
+extern RTPDynamicProtocolHandler ff_h264_dynamic_handler;
+#endif /* RTP_H264_H */
diff --git a/contrib/ffmpeg/libavformat/rtp_internal.h b/contrib/ffmpeg/libavformat/rtp_internal.h
new file mode 100644
index 000000000..3edcf49c8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtp_internal.h
@@ -0,0 +1,110 @@
+/*
+ * RTP definitions
+ * Copyright (c) 2006 Ryan Martell <rdm4@martellventures.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+// this is a bit of a misnomer, because rtp & rtsp internal structures and prototypes are in here.
+#ifndef RTP_INTERNAL_H
+#define RTP_INTERNAL_H
+
+// these statistics are used for rtcp receiver reports...
+typedef struct {
+ uint16_t max_seq; ///< highest sequence number seen
+ uint32_t cycles; ///< shifted count of sequence number cycles
+ uint32_t base_seq; ///< base sequence number
+ uint32_t bad_seq; ///< last bad sequence number + 1
+ int probation; ///< sequence packets till source is valid
+ int received; ///< packets received
+ int expected_prior; ///< packets expected in last interval
+ int received_prior; ///< packets received in last interval
+ uint32_t transit; ///< relative transit time for previous packet
+ uint32_t jitter; ///< estimated jitter.
+} RTPStatistics;
+
+
+typedef int (*DynamicPayloadPacketHandlerProc) (struct RTPDemuxContext * s,
+ AVPacket * pkt,
+ uint32_t *timestamp,
+ const uint8_t * buf,
+ int len);
+
+typedef struct RTPDynamicProtocolHandler_s {
+ // fields from AVRtpDynamicPayloadType_s
+ const char enc_name[50]; /* XXX: still why 50 ? ;-) */
+ enum CodecType codec_type;
+ enum CodecID codec_id;
+
+ // may be null
+ int (*parse_sdp_a_line) (AVStream * stream,
+ void *protocol_data,
+ const char *line); ///< Parse the a= line from the sdp field
+ void *(*open) (); ///< allocate any data needed by the rtp parsing for this dynamic data.
+ void (*close)(void *protocol_data); ///< free any data needed by the rtp parsing for this dynamic data.
+ DynamicPayloadPacketHandlerProc parse_packet; ///< parse handler for this dynamic packet.
+
+ struct RTPDynamicProtocolHandler_s *next;
+} RTPDynamicProtocolHandler;
+
+// moved out of rtp.c, because the h264 decoder needs to know about this structure..
+struct RTPDemuxContext {
+ AVFormatContext *ic;
+ AVStream *st;
+ int payload_type;
+ uint32_t ssrc;
+ uint16_t seq;
+ uint32_t timestamp;
+ uint32_t base_timestamp;
+ uint32_t cur_timestamp;
+ int max_payload_size;
+ struct MpegTSContext *ts; /* only used for MP2T payloads */
+ int read_buf_index;
+ int read_buf_size;
+ /* used to send back RTCP RR */
+ URLContext *rtp_ctx;
+ char hostname[256];
+
+ RTPStatistics statistics; ///< Statistics for this stream (used by RTCP receiver reports)
+
+ /* rtcp sender statistics receive */
+ int64_t last_rtcp_ntp_time; // TODO: move into statistics
+ int64_t first_rtcp_ntp_time; // TODO: move into statistics
+ uint32_t last_rtcp_timestamp; // TODO: move into statistics
+
+ /* rtcp sender statistics */
+ unsigned int packet_count; // TODO: move into statistics (outgoing)
+ unsigned int octet_count; // TODO: move into statistics (outgoing)
+ unsigned int last_octet_count; // TODO: move into statistics (outgoing)
+ int first_packet;
+ /* buffer for output */
+ uint8_t buf[RTP_MAX_PACKET_LENGTH];
+ uint8_t *buf_ptr;
+
+ /* special infos for au headers parsing */
+ rtp_payload_data_t *rtp_payload_data; // TODO: Move into dynamic payload handlers
+
+ /* dynamic payload stuff */
+ DynamicPayloadPacketHandlerProc parse_packet; ///< This is also copied from the dynamic protocol handler structure
+ void *dynamic_protocol_context; ///< This is a copy from the values setup from the sdp parsing, in rtsp.c don't free me.
+};
+
+extern RTPDynamicProtocolHandler *RTPFirstDynamicPayloadHandler;
+
+int rtsp_next_attr_and_value(const char **p, char *attr, int attr_size, char *value, int value_size); ///< from rtsp.c, but used by rtp dynamic protocol handlers.
+#endif /* RTP_INTERNAL_H */
+
diff --git a/contrib/ffmpeg/libavformat/rtpproto.c b/contrib/ffmpeg/libavformat/rtpproto.c
new file mode 100644
index 000000000..d31c509c2
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtpproto.c
@@ -0,0 +1,303 @@
+/*
+ * RTP network protocol
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#include <unistd.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+#include <fcntl.h>
+
+#define RTP_TX_BUF_SIZE (64 * 1024)
+#define RTP_RX_BUF_SIZE (128 * 1024)
+
+typedef struct RTPContext {
+ URLContext *rtp_hd, *rtcp_hd;
+ int rtp_fd, rtcp_fd;
+} RTPContext;
+
+/**
+ * If no filename is given to av_open_input_file because you want to
+ * get the local port first, then you must call this function to set
+ * the remote server address.
+ *
+ * @param s1 media file context
+ * @param uri of the remote server
+ * @return zero if no error.
+ */
+int rtp_set_remote_url(URLContext *h, const char *uri)
+{
+ RTPContext *s = h->priv_data;
+ char hostname[256];
+ int port;
+
+ char buf[1024];
+ char path[1024];
+
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
+ path, sizeof(path), uri);
+
+ snprintf(buf, sizeof(buf), "udp://%s:%d%s", hostname, port, path);
+ udp_set_remote_url(s->rtp_hd, buf);
+
+ snprintf(buf, sizeof(buf), "udp://%s:%d%s", hostname, port + 1, path);
+ udp_set_remote_url(s->rtcp_hd, buf);
+ return 0;
+}
+
+
+/* add option to url of the form:
+ "http://host:port/path?option1=val1&option2=val2... */
+static void url_add_option(char *buf, int buf_size, const char *fmt, ...)
+{
+ char buf1[1024];
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (strchr(buf, '?'))
+ pstrcat(buf, buf_size, "&");
+ else
+ pstrcat(buf, buf_size, "?");
+ vsnprintf(buf1, sizeof(buf1), fmt, ap);
+ pstrcat(buf, buf_size, buf1);
+ va_end(ap);
+}
+
+static void build_udp_url(char *buf, int buf_size,
+ const char *hostname, int port,
+ int local_port, int multicast, int ttl)
+{
+ snprintf(buf, buf_size, "udp://%s:%d", hostname, port);
+ if (local_port >= 0)
+ url_add_option(buf, buf_size, "localport=%d", local_port);
+ if (multicast)
+ url_add_option(buf, buf_size, "multicast=1", multicast);
+ if (ttl >= 0)
+ url_add_option(buf, buf_size, "ttl=%d", ttl);
+}
+
+/*
+ * url syntax: rtp://host:port[?option=val...]
+ * option: 'multicast=1' : enable multicast
+ * 'ttl=n' : set the ttl value (for multicast only)
+ * 'localport=n' : set the local port to n
+ *
+ */
+static int rtp_open(URLContext *h, const char *uri, int flags)
+{
+ RTPContext *s;
+ int port, is_output, is_multicast, ttl, local_port;
+ char hostname[256];
+ char buf[1024];
+ char path[1024];
+ const char *p;
+
+ is_output = (flags & URL_WRONLY);
+
+ s = av_mallocz(sizeof(RTPContext));
+ if (!s)
+ return -ENOMEM;
+ h->priv_data = s;
+
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port,
+ path, sizeof(path), uri);
+ /* extract parameters */
+ is_multicast = 0;
+ ttl = -1;
+ local_port = -1;
+ p = strchr(uri, '?');
+ if (p) {
+ is_multicast = find_info_tag(buf, sizeof(buf), "multicast", p);
+ if (find_info_tag(buf, sizeof(buf), "ttl", p)) {
+ ttl = strtol(buf, NULL, 10);
+ }
+ if (find_info_tag(buf, sizeof(buf), "localport", p)) {
+ local_port = strtol(buf, NULL, 10);
+ }
+ }
+
+ build_udp_url(buf, sizeof(buf),
+ hostname, port, local_port, is_multicast, ttl);
+ if (url_open(&s->rtp_hd, buf, flags) < 0)
+ goto fail;
+ local_port = udp_get_local_port(s->rtp_hd);
+ /* XXX: need to open another connexion if the port is not even */
+
+ /* well, should suppress localport in path */
+
+ build_udp_url(buf, sizeof(buf),
+ hostname, port + 1, local_port + 1, is_multicast, ttl);
+ if (url_open(&s->rtcp_hd, buf, flags) < 0)
+ goto fail;
+
+ /* just to ease handle access. XXX: need to suppress direct handle
+ access */
+ s->rtp_fd = udp_get_file_handle(s->rtp_hd);
+ s->rtcp_fd = udp_get_file_handle(s->rtcp_hd);
+
+ h->max_packet_size = url_get_max_packet_size(s->rtp_hd);
+ h->is_streamed = 1;
+ return 0;
+
+ fail:
+ if (s->rtp_hd)
+ url_close(s->rtp_hd);
+ if (s->rtcp_hd)
+ url_close(s->rtcp_hd);
+ av_free(s);
+ return AVERROR_IO;
+}
+
+static int rtp_read(URLContext *h, uint8_t *buf, int size)
+{
+ RTPContext *s = h->priv_data;
+ struct sockaddr_in from;
+ socklen_t from_len;
+ int len, fd_max, n;
+ fd_set rfds;
+#if 0
+ for(;;) {
+ from_len = sizeof(from);
+ len = recvfrom (s->rtp_fd, buf, size, 0,
+ (struct sockaddr *)&from, &from_len);
+ if (len < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ return AVERROR_IO;
+ }
+ break;
+ }
+#else
+ for(;;) {
+ /* build fdset to listen to RTP and RTCP packets */
+ FD_ZERO(&rfds);
+ fd_max = s->rtp_fd;
+ FD_SET(s->rtp_fd, &rfds);
+ if (s->rtcp_fd > fd_max)
+ fd_max = s->rtcp_fd;
+ FD_SET(s->rtcp_fd, &rfds);
+ n = select(fd_max + 1, &rfds, NULL, NULL, NULL);
+ if (n > 0) {
+ /* first try RTCP */
+ if (FD_ISSET(s->rtcp_fd, &rfds)) {
+ from_len = sizeof(from);
+ len = recvfrom (s->rtcp_fd, buf, size, 0,
+ (struct sockaddr *)&from, &from_len);
+ if (len < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ return AVERROR_IO;
+ }
+ break;
+ }
+ /* then RTP */
+ if (FD_ISSET(s->rtp_fd, &rfds)) {
+ from_len = sizeof(from);
+ len = recvfrom (s->rtp_fd, buf, size, 0,
+ (struct sockaddr *)&from, &from_len);
+ if (len < 0) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ return AVERROR_IO;
+ }
+ break;
+ }
+ }
+ }
+#endif
+ return len;
+}
+
+static int rtp_write(URLContext *h, uint8_t *buf, int size)
+{
+ RTPContext *s = h->priv_data;
+ int ret;
+ URLContext *hd;
+
+ if (buf[1] >= 200 && buf[1] <= 204) {
+ /* RTCP payload type */
+ hd = s->rtcp_hd;
+ } else {
+ /* RTP payload type */
+ hd = s->rtp_hd;
+ }
+
+ ret = url_write(hd, buf, size);
+#if 0
+ {
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 10 * 1000000;
+ nanosleep(&ts, NULL);
+ }
+#endif
+ return ret;
+}
+
+static int rtp_close(URLContext *h)
+{
+ RTPContext *s = h->priv_data;
+
+ url_close(s->rtp_hd);
+ url_close(s->rtcp_hd);
+ av_free(s);
+ return 0;
+}
+
+/**
+ * Return the local port used by the RTP connexion
+ * @param s1 media file context
+ * @return the local port number
+ */
+int rtp_get_local_port(URLContext *h)
+{
+ RTPContext *s = h->priv_data;
+ return udp_get_local_port(s->rtp_hd);
+}
+
+/**
+ * Return the rtp and rtcp file handles for select() usage to wait for several RTP
+ * streams at the same time.
+ * @param h media file context
+ */
+void rtp_get_file_handles(URLContext *h, int *prtp_fd, int *prtcp_fd)
+{
+ RTPContext *s = h->priv_data;
+
+ *prtp_fd = s->rtp_fd;
+ *prtcp_fd = s->rtcp_fd;
+}
+
+URLProtocol rtp_protocol = {
+ "rtp",
+ rtp_open,
+ rtp_read,
+ rtp_write,
+ NULL, /* seek */
+ rtp_close,
+};
diff --git a/contrib/ffmpeg/libavformat/rtsp.c b/contrib/ffmpeg/libavformat/rtsp.c
new file mode 100644
index 000000000..787cdd685
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtsp.c
@@ -0,0 +1,1493 @@
+/*
+ * RTSP/SDP client
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#include <unistd.h> /* for select() prototype */
+#include <sys/time.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+
+#include "rtp_internal.h"
+
+//#define DEBUG
+//#define DEBUG_RTP_TCP
+
+enum RTSPClientState {
+ RTSP_STATE_IDLE,
+ RTSP_STATE_PLAYING,
+ RTSP_STATE_PAUSED,
+};
+
+typedef struct RTSPState {
+ URLContext *rtsp_hd; /* RTSP TCP connexion handle */
+ int nb_rtsp_streams;
+ struct RTSPStream **rtsp_streams;
+
+ enum RTSPClientState state;
+ int64_t seek_timestamp;
+
+ /* XXX: currently we use unbuffered input */
+ // ByteIOContext rtsp_gb;
+ int seq; /* RTSP command sequence number */
+ char session_id[512];
+ enum RTSPProtocol protocol;
+ char last_reply[2048]; /* XXX: allocate ? */
+ RTPDemuxContext *cur_rtp;
+} RTSPState;
+
+typedef struct RTSPStream {
+ URLContext *rtp_handle; /* RTP stream handle */
+ RTPDemuxContext *rtp_ctx; /* RTP parse context */
+
+ int stream_index; /* corresponding stream index, if any. -1 if none (MPEG2TS case) */
+ int interleaved_min, interleaved_max; /* interleave ids, if TCP transport */
+ char control_url[1024]; /* url for this stream (from SDP) */
+
+ int sdp_port; /* port (from SDP content - not used in RTSP) */
+ struct in_addr sdp_ip; /* IP address (from SDP content - not used in RTSP) */
+ int sdp_ttl; /* IP TTL (from SDP content - not used in RTSP) */
+ int sdp_payload_type; /* payload type - only used in SDP */
+ rtp_payload_data_t rtp_payload_data; /* rtp payload parsing infos from SDP */
+
+ RTPDynamicProtocolHandler *dynamic_handler; ///< Only valid if it's a dynamic protocol. (This is the handler structure)
+ void *dynamic_protocol_context; ///< Only valid if it's a dynamic protocol. (This is any private data associated with the dynamic protocol)
+} RTSPStream;
+
+static int rtsp_read_play(AVFormatContext *s);
+
+/* XXX: currently, the only way to change the protocols consists in
+ changing this variable */
+
+int rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_UDP);
+
+FFRTSPCallback *ff_rtsp_callback = NULL;
+
+static int rtsp_probe(AVProbeData *p)
+{
+ if (strstart(p->filename, "rtsp:", NULL))
+ return AVPROBE_SCORE_MAX;
+ return 0;
+}
+
+static int redir_isspace(int c)
+{
+ return (c == ' ' || c == '\t' || c == '\n' || c == '\r');
+}
+
+static void skip_spaces(const char **pp)
+{
+ const char *p;
+ p = *pp;
+ while (redir_isspace(*p))
+ p++;
+ *pp = p;
+}
+
+static void get_word_sep(char *buf, int buf_size, const char *sep,
+ const char **pp)
+{
+ const char *p;
+ char *q;
+
+ p = *pp;
+ if (*p == '/')
+ p++;
+ skip_spaces(&p);
+ q = buf;
+ while (!strchr(sep, *p) && *p != '\0') {
+ if ((q - buf) < buf_size - 1)
+ *q++ = *p;
+ p++;
+ }
+ if (buf_size > 0)
+ *q = '\0';
+ *pp = p;
+}
+
+static void get_word(char *buf, int buf_size, const char **pp)
+{
+ const char *p;
+ char *q;
+
+ p = *pp;
+ skip_spaces(&p);
+ q = buf;
+ while (!redir_isspace(*p) && *p != '\0') {
+ if ((q - buf) < buf_size - 1)
+ *q++ = *p;
+ p++;
+ }
+ if (buf_size > 0)
+ *q = '\0';
+ *pp = p;
+}
+
+/* parse the rtpmap description: <codec_name>/<clock_rate>[/<other
+ params>] */
+static int sdp_parse_rtpmap(AVCodecContext *codec, RTSPStream *rtsp_st, int payload_type, const char *p)
+{
+ char buf[256];
+ int i;
+ AVCodec *c;
+ const char *c_name;
+
+ /* Loop into AVRtpDynamicPayloadTypes[] and AVRtpPayloadTypes[] and
+ see if we can handle this kind of payload */
+ get_word_sep(buf, sizeof(buf), "/", &p);
+ if (payload_type >= RTP_PT_PRIVATE) {
+ RTPDynamicProtocolHandler *handler= RTPFirstDynamicPayloadHandler;
+ while(handler) {
+ if (!strcmp(buf, handler->enc_name) && (codec->codec_type == handler->codec_type)) {
+ codec->codec_id = handler->codec_id;
+ rtsp_st->dynamic_handler= handler;
+ if(handler->open) {
+ rtsp_st->dynamic_protocol_context= handler->open();
+ }
+ break;
+ }
+ handler= handler->next;
+ }
+ } else {
+ /* We are in a standard case ( from http://www.iana.org/assignments/rtp-parameters) */
+ /* search into AVRtpPayloadTypes[] */
+ for (i = 0; AVRtpPayloadTypes[i].pt >= 0; ++i)
+ if (!strcmp(buf, AVRtpPayloadTypes[i].enc_name) && (codec->codec_type == AVRtpPayloadTypes[i].codec_type)){
+ codec->codec_id = AVRtpPayloadTypes[i].codec_id;
+ break;
+ }
+ }
+
+ c = avcodec_find_decoder(codec->codec_id);
+ if (c && c->name)
+ c_name = c->name;
+ else
+ c_name = (char *)NULL;
+
+ if (c_name) {
+ get_word_sep(buf, sizeof(buf), "/", &p);
+ i = atoi(buf);
+ switch (codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ av_log(codec, AV_LOG_DEBUG, " audio codec set to : %s\n", c_name);
+ codec->sample_rate = RTSP_DEFAULT_AUDIO_SAMPLERATE;
+ codec->channels = RTSP_DEFAULT_NB_AUDIO_CHANNELS;
+ if (i > 0) {
+ codec->sample_rate = i;
+ get_word_sep(buf, sizeof(buf), "/", &p);
+ i = atoi(buf);
+ if (i > 0)
+ codec->channels = i;
+ // TODO: there is a bug here; if it is a mono stream, and less than 22000Hz, faad upconverts to stereo and twice the
+ // frequency. No problem, but the sample rate is being set here by the sdp line. Upcoming patch forthcoming. (rdm)
+ }
+ av_log(codec, AV_LOG_DEBUG, " audio samplerate set to : %i\n", codec->sample_rate);
+ av_log(codec, AV_LOG_DEBUG, " audio channels set to : %i\n", codec->channels);
+ break;
+ case CODEC_TYPE_VIDEO:
+ av_log(codec, AV_LOG_DEBUG, " video codec set to : %s\n", c_name);
+ break;
+ default:
+ break;
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+/* return the length and optionnaly the data */
+static int hex_to_data(uint8_t *data, const char *p)
+{
+ int c, len, v;
+
+ len = 0;
+ v = 1;
+ for(;;) {
+ skip_spaces(&p);
+ if (p == '\0')
+ break;
+ c = toupper((unsigned char)*p++);
+ if (c >= '0' && c <= '9')
+ c = c - '0';
+ else if (c >= 'A' && c <= 'F')
+ c = c - 'A' + 10;
+ else
+ break;
+ v = (v << 4) | c;
+ if (v & 0x100) {
+ if (data)
+ data[len] = v;
+ len++;
+ v = 1;
+ }
+ }
+ return len;
+}
+
+static void sdp_parse_fmtp_config(AVCodecContext *codec, char *attr, char *value)
+{
+ switch (codec->codec_id) {
+ case CODEC_ID_MPEG4:
+ case CODEC_ID_AAC:
+ if (!strcmp(attr, "config")) {
+ /* decode the hexa encoded parameter */
+ int len = hex_to_data(NULL, value);
+ codec->extradata = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!codec->extradata)
+ return;
+ codec->extradata_size = len;
+ hex_to_data(codec->extradata, value);
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+typedef struct attrname_map
+{
+ const char *str;
+ uint16_t type;
+ uint32_t offset;
+} attrname_map_t;
+
+/* All known fmtp parmeters and the corresping RTPAttrTypeEnum */
+#define ATTR_NAME_TYPE_INT 0
+#define ATTR_NAME_TYPE_STR 1
+static attrname_map_t attr_names[]=
+{
+ {"SizeLength", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, sizelength)},
+ {"IndexLength", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, indexlength)},
+ {"IndexDeltaLength", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, indexdeltalength)},
+ {"profile-level-id", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, profile_level_id)},
+ {"StreamType", ATTR_NAME_TYPE_INT, offsetof(rtp_payload_data_t, streamtype)},
+ {"mode", ATTR_NAME_TYPE_STR, offsetof(rtp_payload_data_t, mode)},
+ {NULL, -1, -1},
+};
+
+/** parse the attribute line from the fmtp a line of an sdp resonse. This is broken out as a function
+* because it is used in rtp_h264.c, which is forthcoming.
+*/
+int rtsp_next_attr_and_value(const char **p, char *attr, int attr_size, char *value, int value_size)
+{
+ skip_spaces(p);
+ if(**p)
+ {
+ get_word_sep(attr, attr_size, "=", p);
+ if (**p == '=')
+ (*p)++;
+ get_word_sep(value, value_size, ";", p);
+ if (**p == ';')
+ (*p)++;
+ return 1;
+ }
+ return 0;
+}
+
+/* parse a SDP line and save stream attributes */
+static void sdp_parse_fmtp(AVStream *st, const char *p)
+{
+ char attr[256];
+ char value[4096];
+ int i;
+
+ RTSPStream *rtsp_st = st->priv_data;
+ AVCodecContext *codec = st->codec;
+ rtp_payload_data_t *rtp_payload_data = &rtsp_st->rtp_payload_data;
+
+ /* loop on each attribute */
+ while(rtsp_next_attr_and_value(&p, attr, sizeof(attr), value, sizeof(value)))
+ {
+ /* grab the codec extra_data from the config parameter of the fmtp line */
+ sdp_parse_fmtp_config(codec, attr, value);
+ /* Looking for a known attribute */
+ for (i = 0; attr_names[i].str; ++i) {
+ if (!strcasecmp(attr, attr_names[i].str)) {
+ if (attr_names[i].type == ATTR_NAME_TYPE_INT)
+ *(int *)((char *)rtp_payload_data + attr_names[i].offset) = atoi(value);
+ else if (attr_names[i].type == ATTR_NAME_TYPE_STR)
+ *(char **)((char *)rtp_payload_data + attr_names[i].offset) = av_strdup(value);
+ }
+ }
+ }
+}
+
+/** Parse a string \p in the form of Range:npt=xx-xx, and determine the start
+ * and end time.
+ * Used for seeking in the rtp stream.
+ */
+static void rtsp_parse_range_npt(const char *p, int64_t *start, int64_t *end)
+{
+ char buf[256];
+
+ skip_spaces(&p);
+ if (!stristart(p, "npt=", &p))
+ return;
+
+ *start = AV_NOPTS_VALUE;
+ *end = AV_NOPTS_VALUE;
+
+ get_word_sep(buf, sizeof(buf), "-", &p);
+ *start = parse_date(buf, 1);
+ if (*p == '-') {
+ p++;
+ get_word_sep(buf, sizeof(buf), "-", &p);
+ *end = parse_date(buf, 1);
+ }
+// av_log(NULL, AV_LOG_DEBUG, "Range Start: %lld\n", *start);
+// av_log(NULL, AV_LOG_DEBUG, "Range End: %lld\n", *end);
+}
+
+typedef struct SDPParseState {
+ /* SDP only */
+ struct in_addr default_ip;
+ int default_ttl;
+} SDPParseState;
+
+static void sdp_parse_line(AVFormatContext *s, SDPParseState *s1,
+ int letter, const char *buf)
+{
+ RTSPState *rt = s->priv_data;
+ char buf1[64], st_type[64];
+ const char *p;
+ int codec_type, payload_type, i;
+ AVStream *st;
+ RTSPStream *rtsp_st;
+ struct in_addr sdp_ip;
+ int ttl;
+
+#ifdef DEBUG
+ printf("sdp: %c='%s'\n", letter, buf);
+#endif
+
+ p = buf;
+ switch(letter) {
+ case 'c':
+ get_word(buf1, sizeof(buf1), &p);
+ if (strcmp(buf1, "IN") != 0)
+ return;
+ get_word(buf1, sizeof(buf1), &p);
+ if (strcmp(buf1, "IP4") != 0)
+ return;
+ get_word_sep(buf1, sizeof(buf1), "/", &p);
+ if (inet_aton(buf1, &sdp_ip) == 0)
+ return;
+ ttl = 16;
+ if (*p == '/') {
+ p++;
+ get_word_sep(buf1, sizeof(buf1), "/", &p);
+ ttl = atoi(buf1);
+ }
+ if (s->nb_streams == 0) {
+ s1->default_ip = sdp_ip;
+ s1->default_ttl = ttl;
+ } else {
+ st = s->streams[s->nb_streams - 1];
+ rtsp_st = st->priv_data;
+ rtsp_st->sdp_ip = sdp_ip;
+ rtsp_st->sdp_ttl = ttl;
+ }
+ break;
+ case 's':
+ pstrcpy(s->title, sizeof(s->title), p);
+ break;
+ case 'i':
+ if (s->nb_streams == 0) {
+ pstrcpy(s->comment, sizeof(s->comment), p);
+ break;
+ }
+ break;
+ case 'm':
+ /* new stream */
+ get_word(st_type, sizeof(st_type), &p);
+ if (!strcmp(st_type, "audio")) {
+ codec_type = CODEC_TYPE_AUDIO;
+ } else if (!strcmp(st_type, "video")) {
+ codec_type = CODEC_TYPE_VIDEO;
+ } else {
+ return;
+ }
+ rtsp_st = av_mallocz(sizeof(RTSPStream));
+ if (!rtsp_st)
+ return;
+ rtsp_st->stream_index = -1;
+ dynarray_add(&rt->rtsp_streams, &rt->nb_rtsp_streams, rtsp_st);
+
+ rtsp_st->sdp_ip = s1->default_ip;
+ rtsp_st->sdp_ttl = s1->default_ttl;
+
+ get_word(buf1, sizeof(buf1), &p); /* port */
+ rtsp_st->sdp_port = atoi(buf1);
+
+ get_word(buf1, sizeof(buf1), &p); /* protocol (ignored) */
+
+ /* XXX: handle list of formats */
+ get_word(buf1, sizeof(buf1), &p); /* format list */
+ rtsp_st->sdp_payload_type = atoi(buf1);
+
+ if (!strcmp(AVRtpPayloadTypes[rtsp_st->sdp_payload_type].enc_name, "MP2T")) {
+ /* no corresponding stream */
+ } else {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return;
+ st->priv_data = rtsp_st;
+ rtsp_st->stream_index = st->index;
+ st->codec->codec_type = codec_type;
+ if (rtsp_st->sdp_payload_type < RTP_PT_PRIVATE) {
+ /* if standard payload type, we can find the codec right now */
+ rtp_get_codec_info(st->codec, rtsp_st->sdp_payload_type);
+ }
+ }
+ /* put a default control url */
+ pstrcpy(rtsp_st->control_url, sizeof(rtsp_st->control_url), s->filename);
+ break;
+ case 'a':
+ if (strstart(p, "control:", &p) && s->nb_streams > 0) {
+ char proto[32];
+ /* get the control url */
+ st = s->streams[s->nb_streams - 1];
+ rtsp_st = st->priv_data;
+
+ /* XXX: may need to add full url resolution */
+ url_split(proto, sizeof(proto), NULL, 0, NULL, 0, NULL, NULL, 0, p);
+ if (proto[0] == '\0') {
+ /* relative control URL */
+ pstrcat(rtsp_st->control_url, sizeof(rtsp_st->control_url), "/");
+ pstrcat(rtsp_st->control_url, sizeof(rtsp_st->control_url), p);
+ } else {
+ pstrcpy(rtsp_st->control_url, sizeof(rtsp_st->control_url), p);
+ }
+ } else if (strstart(p, "rtpmap:", &p)) {
+ /* NOTE: rtpmap is only supported AFTER the 'm=' tag */
+ get_word(buf1, sizeof(buf1), &p);
+ payload_type = atoi(buf1);
+ for(i = 0; i < s->nb_streams;i++) {
+ st = s->streams[i];
+ rtsp_st = st->priv_data;
+ if (rtsp_st->sdp_payload_type == payload_type) {
+ sdp_parse_rtpmap(st->codec, rtsp_st, payload_type, p);
+ }
+ }
+ } else if (strstart(p, "fmtp:", &p)) {
+ /* NOTE: fmtp is only supported AFTER the 'a=rtpmap:xxx' tag */
+ get_word(buf1, sizeof(buf1), &p);
+ payload_type = atoi(buf1);
+ for(i = 0; i < s->nb_streams;i++) {
+ st = s->streams[i];
+ rtsp_st = st->priv_data;
+ if (rtsp_st->sdp_payload_type == payload_type) {
+ if(rtsp_st->dynamic_handler && rtsp_st->dynamic_handler->parse_sdp_a_line) {
+ if(!rtsp_st->dynamic_handler->parse_sdp_a_line(st, rtsp_st->dynamic_protocol_context, buf)) {
+ sdp_parse_fmtp(st, p);
+ }
+ } else {
+ sdp_parse_fmtp(st, p);
+ }
+ }
+ }
+ } else if(strstart(p, "framesize:", &p)) {
+ // let dynamic protocol handlers have a stab at the line.
+ get_word(buf1, sizeof(buf1), &p);
+ payload_type = atoi(buf1);
+ for(i = 0; i < s->nb_streams;i++) {
+ st = s->streams[i];
+ rtsp_st = st->priv_data;
+ if (rtsp_st->sdp_payload_type == payload_type) {
+ if(rtsp_st->dynamic_handler && rtsp_st->dynamic_handler->parse_sdp_a_line) {
+ rtsp_st->dynamic_handler->parse_sdp_a_line(st, rtsp_st->dynamic_protocol_context, buf);
+ }
+ }
+ }
+ } else if(strstart(p, "range:", &p)) {
+ int64_t start, end;
+
+ // this is so that seeking on a streamed file can work.
+ rtsp_parse_range_npt(p, &start, &end);
+ s->start_time= start;
+ s->duration= (end==AV_NOPTS_VALUE)?AV_NOPTS_VALUE:end-start; // AV_NOPTS_VALUE means live broadcast (and can't seek)
+ }
+ break;
+ }
+}
+
+static int sdp_parse(AVFormatContext *s, const char *content)
+{
+ const char *p;
+ int letter;
+ char buf[1024], *q;
+ SDPParseState sdp_parse_state, *s1 = &sdp_parse_state;
+
+ memset(s1, 0, sizeof(SDPParseState));
+ p = content;
+ for(;;) {
+ skip_spaces(&p);
+ letter = *p;
+ if (letter == '\0')
+ break;
+ p++;
+ if (*p != '=')
+ goto next_line;
+ p++;
+ /* get the content */
+ q = buf;
+ while (*p != '\n' && *p != '\r' && *p != '\0') {
+ if ((q - buf) < sizeof(buf) - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+ sdp_parse_line(s, s1, letter, buf);
+ next_line:
+ while (*p != '\n' && *p != '\0')
+ p++;
+ if (*p == '\n')
+ p++;
+ }
+ return 0;
+}
+
+static void rtsp_parse_range(int *min_ptr, int *max_ptr, const char **pp)
+{
+ const char *p;
+ int v;
+
+ p = *pp;
+ skip_spaces(&p);
+ v = strtol(p, (char **)&p, 10);
+ if (*p == '-') {
+ p++;
+ *min_ptr = v;
+ v = strtol(p, (char **)&p, 10);
+ *max_ptr = v;
+ } else {
+ *min_ptr = v;
+ *max_ptr = v;
+ }
+ *pp = p;
+}
+
+/* XXX: only one transport specification is parsed */
+static void rtsp_parse_transport(RTSPHeader *reply, const char *p)
+{
+ char transport_protocol[16];
+ char profile[16];
+ char lower_transport[16];
+ char parameter[16];
+ RTSPTransportField *th;
+ char buf[256];
+
+ reply->nb_transports = 0;
+
+ for(;;) {
+ skip_spaces(&p);
+ if (*p == '\0')
+ break;
+
+ th = &reply->transports[reply->nb_transports];
+
+ get_word_sep(transport_protocol, sizeof(transport_protocol),
+ "/", &p);
+ if (*p == '/')
+ p++;
+ get_word_sep(profile, sizeof(profile), "/;,", &p);
+ lower_transport[0] = '\0';
+ if (*p == '/') {
+ p++;
+ get_word_sep(lower_transport, sizeof(lower_transport),
+ ";,", &p);
+ }
+ if (!strcasecmp(lower_transport, "TCP"))
+ th->protocol = RTSP_PROTOCOL_RTP_TCP;
+ else
+ th->protocol = RTSP_PROTOCOL_RTP_UDP;
+
+ if (*p == ';')
+ p++;
+ /* get each parameter */
+ while (*p != '\0' && *p != ',') {
+ get_word_sep(parameter, sizeof(parameter), "=;,", &p);
+ if (!strcmp(parameter, "port")) {
+ if (*p == '=') {
+ p++;
+ rtsp_parse_range(&th->port_min, &th->port_max, &p);
+ }
+ } else if (!strcmp(parameter, "client_port")) {
+ if (*p == '=') {
+ p++;
+ rtsp_parse_range(&th->client_port_min,
+ &th->client_port_max, &p);
+ }
+ } else if (!strcmp(parameter, "server_port")) {
+ if (*p == '=') {
+ p++;
+ rtsp_parse_range(&th->server_port_min,
+ &th->server_port_max, &p);
+ }
+ } else if (!strcmp(parameter, "interleaved")) {
+ if (*p == '=') {
+ p++;
+ rtsp_parse_range(&th->interleaved_min,
+ &th->interleaved_max, &p);
+ }
+ } else if (!strcmp(parameter, "multicast")) {
+ if (th->protocol == RTSP_PROTOCOL_RTP_UDP)
+ th->protocol = RTSP_PROTOCOL_RTP_UDP_MULTICAST;
+ } else if (!strcmp(parameter, "ttl")) {
+ if (*p == '=') {
+ p++;
+ th->ttl = strtol(p, (char **)&p, 10);
+ }
+ } else if (!strcmp(parameter, "destination")) {
+ struct in_addr ipaddr;
+
+ if (*p == '=') {
+ p++;
+ get_word_sep(buf, sizeof(buf), ";,", &p);
+ if (inet_aton(buf, &ipaddr))
+ th->destination = ntohl(ipaddr.s_addr);
+ }
+ }
+ while (*p != ';' && *p != '\0' && *p != ',')
+ p++;
+ if (*p == ';')
+ p++;
+ }
+ if (*p == ',')
+ p++;
+
+ reply->nb_transports++;
+ }
+}
+
+void rtsp_parse_line(RTSPHeader *reply, const char *buf)
+{
+ const char *p;
+
+ /* NOTE: we do case independent match for broken servers */
+ p = buf;
+ if (stristart(p, "Session:", &p)) {
+ get_word_sep(reply->session_id, sizeof(reply->session_id), ";", &p);
+ } else if (stristart(p, "Content-Length:", &p)) {
+ reply->content_length = strtol(p, NULL, 10);
+ } else if (stristart(p, "Transport:", &p)) {
+ rtsp_parse_transport(reply, p);
+ } else if (stristart(p, "CSeq:", &p)) {
+ reply->seq = strtol(p, NULL, 10);
+ } else if (stristart(p, "Range:", &p)) {
+ rtsp_parse_range_npt(p, &reply->range_start, &reply->range_end);
+ }
+}
+
+static int url_readbuf(URLContext *h, unsigned char *buf, int size)
+{
+ int ret, len;
+
+ len = 0;
+ while (len < size) {
+ ret = url_read(h, buf+len, size-len);
+ if (ret < 1)
+ return ret;
+ len += ret;
+ }
+ return len;
+}
+
+/* skip a RTP/TCP interleaved packet */
+static void rtsp_skip_packet(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ int ret, len, len1;
+ uint8_t buf[1024];
+
+ ret = url_readbuf(rt->rtsp_hd, buf, 3);
+ if (ret != 3)
+ return;
+ len = (buf[1] << 8) | buf[2];
+#ifdef DEBUG
+ printf("skipping RTP packet len=%d\n", len);
+#endif
+ /* skip payload */
+ while (len > 0) {
+ len1 = len;
+ if (len1 > sizeof(buf))
+ len1 = sizeof(buf);
+ ret = url_readbuf(rt->rtsp_hd, buf, len1);
+ if (ret != len1)
+ return;
+ len -= len1;
+ }
+}
+
+static void rtsp_send_cmd(AVFormatContext *s,
+ const char *cmd, RTSPHeader *reply,
+ unsigned char **content_ptr)
+{
+ RTSPState *rt = s->priv_data;
+ char buf[4096], buf1[1024], *q;
+ unsigned char ch;
+ const char *p;
+ int content_length, line_count;
+ unsigned char *content = NULL;
+
+ memset(reply, 0, sizeof(RTSPHeader));
+
+ rt->seq++;
+ pstrcpy(buf, sizeof(buf), cmd);
+ snprintf(buf1, sizeof(buf1), "CSeq: %d\r\n", rt->seq);
+ pstrcat(buf, sizeof(buf), buf1);
+ if (rt->session_id[0] != '\0' && !strstr(cmd, "\nIf-Match:")) {
+ snprintf(buf1, sizeof(buf1), "Session: %s\r\n", rt->session_id);
+ pstrcat(buf, sizeof(buf), buf1);
+ }
+ pstrcat(buf, sizeof(buf), "\r\n");
+#ifdef DEBUG
+ printf("Sending:\n%s--\n", buf);
+#endif
+ url_write(rt->rtsp_hd, buf, strlen(buf));
+
+ /* parse reply (XXX: use buffers) */
+ line_count = 0;
+ rt->last_reply[0] = '\0';
+ for(;;) {
+ q = buf;
+ for(;;) {
+ if (url_readbuf(rt->rtsp_hd, &ch, 1) != 1)
+ break;
+ if (ch == '\n')
+ break;
+ if (ch == '$') {
+ /* XXX: only parse it if first char on line ? */
+ rtsp_skip_packet(s);
+ } else if (ch != '\r') {
+ if ((q - buf) < sizeof(buf) - 1)
+ *q++ = ch;
+ }
+ }
+ *q = '\0';
+#ifdef DEBUG
+ printf("line='%s'\n", buf);
+#endif
+ /* test if last line */
+ if (buf[0] == '\0')
+ break;
+ p = buf;
+ if (line_count == 0) {
+ /* get reply code */
+ get_word(buf1, sizeof(buf1), &p);
+ get_word(buf1, sizeof(buf1), &p);
+ reply->status_code = atoi(buf1);
+ } else {
+ rtsp_parse_line(reply, p);
+ pstrcat(rt->last_reply, sizeof(rt->last_reply), p);
+ pstrcat(rt->last_reply, sizeof(rt->last_reply), "\n");
+ }
+ line_count++;
+ }
+
+ if (rt->session_id[0] == '\0' && reply->session_id[0] != '\0')
+ pstrcpy(rt->session_id, sizeof(rt->session_id), reply->session_id);
+
+ content_length = reply->content_length;
+ if (content_length > 0) {
+ /* leave some room for a trailing '\0' (useful for simple parsing) */
+ content = av_malloc(content_length + 1);
+ (void)url_readbuf(rt->rtsp_hd, content, content_length);
+ content[content_length] = '\0';
+ }
+ if (content_ptr)
+ *content_ptr = content;
+}
+
+/* useful for modules: set RTSP callback function */
+
+void rtsp_set_callback(FFRTSPCallback *rtsp_cb)
+{
+ ff_rtsp_callback = rtsp_cb;
+}
+
+
+/* close and free RTSP streams */
+static void rtsp_close_streams(RTSPState *rt)
+{
+ int i;
+ RTSPStream *rtsp_st;
+
+ for(i=0;i<rt->nb_rtsp_streams;i++) {
+ rtsp_st = rt->rtsp_streams[i];
+ if (rtsp_st) {
+ if (rtsp_st->rtp_ctx)
+ rtp_parse_close(rtsp_st->rtp_ctx);
+ if (rtsp_st->rtp_handle)
+ url_close(rtsp_st->rtp_handle);
+ if (rtsp_st->dynamic_handler && rtsp_st->dynamic_protocol_context)
+ rtsp_st->dynamic_handler->close(rtsp_st->dynamic_protocol_context);
+ }
+ av_free(rtsp_st);
+ }
+ av_free(rt->rtsp_streams);
+}
+
+static int rtsp_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ RTSPState *rt = s->priv_data;
+ char host[1024], path[1024], tcpname[1024], cmd[2048];
+ URLContext *rtsp_hd;
+ int port, i, j, ret, err;
+ RTSPHeader reply1, *reply = &reply1;
+ unsigned char *content = NULL;
+ RTSPStream *rtsp_st;
+ int protocol_mask;
+ AVStream *st;
+
+ /* extract hostname and port */
+ url_split(NULL, 0, NULL, 0,
+ host, sizeof(host), &port, path, sizeof(path), s->filename);
+ if (port < 0)
+ port = RTSP_DEFAULT_PORT;
+
+ /* open the tcp connexion */
+ snprintf(tcpname, sizeof(tcpname), "tcp://%s:%d", host, port);
+ if (url_open(&rtsp_hd, tcpname, URL_RDWR) < 0)
+ return AVERROR_IO;
+ rt->rtsp_hd = rtsp_hd;
+ rt->seq = 0;
+
+ /* describe the stream */
+ snprintf(cmd, sizeof(cmd),
+ "DESCRIBE %s RTSP/1.0\r\n"
+ "Accept: application/sdp\r\n",
+ s->filename);
+ rtsp_send_cmd(s, cmd, reply, &content);
+ if (!content) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ if (reply->status_code != RTSP_STATUS_OK) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ /* now we got the SDP description, we parse it */
+ ret = sdp_parse(s, (const char *)content);
+ av_freep(&content);
+ if (ret < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ protocol_mask = rtsp_default_protocols;
+
+ /* for each stream, make the setup request */
+ /* XXX: we assume the same server is used for the control of each
+ RTSP stream */
+
+ for(j = RTSP_RTP_PORT_MIN, i = 0; i < rt->nb_rtsp_streams; ++i) {
+ char transport[2048];
+
+ rtsp_st = rt->rtsp_streams[i];
+
+ /* compute available transports */
+ transport[0] = '\0';
+
+ /* RTP/UDP */
+ if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP)) {
+ char buf[256];
+
+ /* first try in specified port range */
+ if (RTSP_RTP_PORT_MIN != 0) {
+ while(j <= RTSP_RTP_PORT_MAX) {
+ snprintf(buf, sizeof(buf), "rtp://?localport=%d", j);
+ if (url_open(&rtsp_st->rtp_handle, buf, URL_RDWR) == 0) {
+ j += 2; /* we will use two port by rtp stream (rtp and rtcp) */
+ goto rtp_opened;
+ }
+ }
+ }
+
+/* then try on any port
+** if (url_open(&rtsp_st->rtp_handle, "rtp://", URL_RDONLY) < 0) {
+** err = AVERROR_INVALIDDATA;
+** goto fail;
+** }
+*/
+
+ rtp_opened:
+ port = rtp_get_local_port(rtsp_st->rtp_handle);
+ if (transport[0] != '\0')
+ pstrcat(transport, sizeof(transport), ",");
+ snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1,
+ "RTP/AVP/UDP;unicast;client_port=%d-%d",
+ port, port + 1);
+ }
+
+ /* RTP/TCP */
+ else if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_TCP)) {
+ if (transport[0] != '\0')
+ pstrcat(transport, sizeof(transport), ",");
+ snprintf(transport + strlen(transport), sizeof(transport) - strlen(transport) - 1,
+ "RTP/AVP/TCP");
+ }
+
+ else if (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP_MULTICAST)) {
+ if (transport[0] != '\0')
+ pstrcat(transport, sizeof(transport), ",");
+ snprintf(transport + strlen(transport),
+ sizeof(transport) - strlen(transport) - 1,
+ "RTP/AVP/UDP;multicast");
+ }
+ snprintf(cmd, sizeof(cmd),
+ "SETUP %s RTSP/1.0\r\n"
+ "Transport: %s\r\n",
+ rtsp_st->control_url, transport);
+ rtsp_send_cmd(s, cmd, reply, NULL);
+ if (reply->status_code != RTSP_STATUS_OK ||
+ reply->nb_transports != 1) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ /* XXX: same protocol for all streams is required */
+ if (i > 0) {
+ if (reply->transports[0].protocol != rt->protocol) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ } else {
+ rt->protocol = reply->transports[0].protocol;
+ }
+
+ /* close RTP connection if not choosen */
+ if (reply->transports[0].protocol != RTSP_PROTOCOL_RTP_UDP &&
+ (protocol_mask & (1 << RTSP_PROTOCOL_RTP_UDP))) {
+ url_close(rtsp_st->rtp_handle);
+ rtsp_st->rtp_handle = NULL;
+ }
+
+ switch(reply->transports[0].protocol) {
+ case RTSP_PROTOCOL_RTP_TCP:
+ rtsp_st->interleaved_min = reply->transports[0].interleaved_min;
+ rtsp_st->interleaved_max = reply->transports[0].interleaved_max;
+ break;
+
+ case RTSP_PROTOCOL_RTP_UDP:
+ {
+ char url[1024];
+
+ /* XXX: also use address if specified */
+ snprintf(url, sizeof(url), "rtp://%s:%d",
+ host, reply->transports[0].server_port_min);
+ if (rtp_set_remote_url(rtsp_st->rtp_handle, url) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+ break;
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ {
+ char url[1024];
+ int ttl;
+
+ ttl = reply->transports[0].ttl;
+ if (!ttl)
+ ttl = 16;
+ snprintf(url, sizeof(url), "rtp://%s:%d?multicast=1&ttl=%d",
+ host,
+ reply->transports[0].server_port_min,
+ ttl);
+ if (url_open(&rtsp_st->rtp_handle, url, URL_RDWR) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+ break;
+ }
+ /* open the RTP context */
+ st = NULL;
+ if (rtsp_st->stream_index >= 0)
+ st = s->streams[rtsp_st->stream_index];
+ if (!st)
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+ rtsp_st->rtp_ctx = rtp_parse_open(s, st, rtsp_st->rtp_handle, rtsp_st->sdp_payload_type, &rtsp_st->rtp_payload_data);
+
+ if (!rtsp_st->rtp_ctx) {
+ err = AVERROR_NOMEM;
+ goto fail;
+ } else {
+ if(rtsp_st->dynamic_handler) {
+ rtsp_st->rtp_ctx->dynamic_protocol_context= rtsp_st->dynamic_protocol_context;
+ rtsp_st->rtp_ctx->parse_packet= rtsp_st->dynamic_handler->parse_packet;
+ }
+ }
+ }
+
+ /* use callback if available to extend setup */
+ if (ff_rtsp_callback) {
+ if (ff_rtsp_callback(RTSP_ACTION_CLIENT_SETUP, rt->session_id,
+ NULL, 0, rt->last_reply) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+
+
+ rt->state = RTSP_STATE_IDLE;
+ rt->seek_timestamp = 0; /* default is to start stream at position
+ zero */
+ if (ap->initial_pause) {
+ /* do not start immediately */
+ } else {
+ if (rtsp_read_play(s) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+ return 0;
+ fail:
+ rtsp_close_streams(rt);
+ av_freep(&content);
+ url_close(rt->rtsp_hd);
+ return err;
+}
+
+static int tcp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
+ uint8_t *buf, int buf_size)
+{
+ RTSPState *rt = s->priv_data;
+ int id, len, i, ret;
+ RTSPStream *rtsp_st;
+
+#ifdef DEBUG_RTP_TCP
+ printf("tcp_read_packet:\n");
+#endif
+ redo:
+ for(;;) {
+ ret = url_readbuf(rt->rtsp_hd, buf, 1);
+#ifdef DEBUG_RTP_TCP
+ printf("ret=%d c=%02x [%c]\n", ret, buf[0], buf[0]);
+#endif
+ if (ret != 1)
+ return -1;
+ if (buf[0] == '$')
+ break;
+ }
+ ret = url_readbuf(rt->rtsp_hd, buf, 3);
+ if (ret != 3)
+ return -1;
+ id = buf[0];
+ len = (buf[1] << 8) | buf[2];
+#ifdef DEBUG_RTP_TCP
+ printf("id=%d len=%d\n", id, len);
+#endif
+ if (len > buf_size || len < 12)
+ goto redo;
+ /* get the data */
+ ret = url_readbuf(rt->rtsp_hd, buf, len);
+ if (ret != len)
+ return -1;
+
+ /* find the matching stream */
+ for(i = 0; i < rt->nb_rtsp_streams; i++) {
+ rtsp_st = rt->rtsp_streams[i];
+ if (id >= rtsp_st->interleaved_min &&
+ id <= rtsp_st->interleaved_max)
+ goto found;
+ }
+ goto redo;
+ found:
+ *prtsp_st = rtsp_st;
+ return len;
+}
+
+static int udp_read_packet(AVFormatContext *s, RTSPStream **prtsp_st,
+ uint8_t *buf, int buf_size)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPStream *rtsp_st;
+ fd_set rfds;
+ int fd1, fd2, fd_max, n, i, ret;
+ struct timeval tv;
+
+ for(;;) {
+ if (url_interrupt_cb())
+ return -1;
+ FD_ZERO(&rfds);
+ fd_max = -1;
+ for(i = 0; i < rt->nb_rtsp_streams; i++) {
+ rtsp_st = rt->rtsp_streams[i];
+ /* currently, we cannot probe RTCP handle because of blocking restrictions */
+ rtp_get_file_handles(rtsp_st->rtp_handle, &fd1, &fd2);
+ if (fd1 > fd_max)
+ fd_max = fd1;
+ FD_SET(fd1, &rfds);
+ }
+ tv.tv_sec = 0;
+ tv.tv_usec = 100 * 1000;
+ n = select(fd_max + 1, &rfds, NULL, NULL, &tv);
+ if (n > 0) {
+ for(i = 0; i < rt->nb_rtsp_streams; i++) {
+ rtsp_st = rt->rtsp_streams[i];
+ rtp_get_file_handles(rtsp_st->rtp_handle, &fd1, &fd2);
+ if (FD_ISSET(fd1, &rfds)) {
+ ret = url_read(rtsp_st->rtp_handle, buf, buf_size);
+ if (ret > 0) {
+ *prtsp_st = rtsp_st;
+ return ret;
+ }
+ }
+ }
+ }
+ }
+}
+
+static int rtsp_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPStream *rtsp_st;
+ int ret, len;
+ uint8_t buf[RTP_MAX_PACKET_LENGTH];
+
+ /* get next frames from the same RTP packet */
+ if (rt->cur_rtp) {
+ ret = rtp_parse_packet(rt->cur_rtp, pkt, NULL, 0);
+ if (ret == 0) {
+ rt->cur_rtp = NULL;
+ return 0;
+ } else if (ret == 1) {
+ return 0;
+ } else {
+ rt->cur_rtp = NULL;
+ }
+ }
+
+ /* read next RTP packet */
+ redo:
+ switch(rt->protocol) {
+ default:
+ case RTSP_PROTOCOL_RTP_TCP:
+ len = tcp_read_packet(s, &rtsp_st, buf, sizeof(buf));
+ break;
+ case RTSP_PROTOCOL_RTP_UDP:
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ len = udp_read_packet(s, &rtsp_st, buf, sizeof(buf));
+ if (rtsp_st->rtp_ctx)
+ rtp_check_and_send_back_rr(rtsp_st->rtp_ctx, len);
+ break;
+ }
+ if (len < 0)
+ return AVERROR_IO;
+ ret = rtp_parse_packet(rtsp_st->rtp_ctx, pkt, buf, len);
+ if (ret < 0)
+ goto redo;
+ if (ret == 1) {
+ /* more packets may follow, so we save the RTP context */
+ rt->cur_rtp = rtsp_st->rtp_ctx;
+ }
+ return 0;
+}
+
+static int rtsp_read_play(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPHeader reply1, *reply = &reply1;
+ char cmd[1024];
+
+ av_log(s, AV_LOG_DEBUG, "hello state=%d\n", rt->state);
+
+ if (rt->state == RTSP_STATE_PAUSED) {
+ snprintf(cmd, sizeof(cmd),
+ "PLAY %s RTSP/1.0\r\n",
+ s->filename);
+ } else {
+ snprintf(cmd, sizeof(cmd),
+ "PLAY %s RTSP/1.0\r\n"
+ "Range: npt=%0.3f-\r\n",
+ s->filename,
+ (double)rt->seek_timestamp / AV_TIME_BASE);
+ }
+ rtsp_send_cmd(s, cmd, reply, NULL);
+ if (reply->status_code != RTSP_STATUS_OK) {
+ return -1;
+ } else {
+ rt->state = RTSP_STATE_PLAYING;
+ return 0;
+ }
+}
+
+/* pause the stream */
+static int rtsp_read_pause(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPHeader reply1, *reply = &reply1;
+ char cmd[1024];
+
+ rt = s->priv_data;
+
+ if (rt->state != RTSP_STATE_PLAYING)
+ return 0;
+
+ snprintf(cmd, sizeof(cmd),
+ "PAUSE %s RTSP/1.0\r\n",
+ s->filename);
+ rtsp_send_cmd(s, cmd, reply, NULL);
+ if (reply->status_code != RTSP_STATUS_OK) {
+ return -1;
+ } else {
+ rt->state = RTSP_STATE_PAUSED;
+ return 0;
+ }
+}
+
+static int rtsp_read_seek(AVFormatContext *s, int stream_index,
+ int64_t timestamp, int flags)
+{
+ RTSPState *rt = s->priv_data;
+
+ rt->seek_timestamp = timestamp;
+ switch(rt->state) {
+ default:
+ case RTSP_STATE_IDLE:
+ break;
+ case RTSP_STATE_PLAYING:
+ if (rtsp_read_play(s) != 0)
+ return -1;
+ break;
+ case RTSP_STATE_PAUSED:
+ rt->state = RTSP_STATE_IDLE;
+ break;
+ }
+ return 0;
+}
+
+static int rtsp_read_close(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPHeader reply1, *reply = &reply1;
+ char cmd[1024];
+
+#if 0
+ /* NOTE: it is valid to flush the buffer here */
+ if (rt->protocol == RTSP_PROTOCOL_RTP_TCP) {
+ url_fclose(&rt->rtsp_gb);
+ }
+#endif
+ snprintf(cmd, sizeof(cmd),
+ "TEARDOWN %s RTSP/1.0\r\n",
+ s->filename);
+ rtsp_send_cmd(s, cmd, reply, NULL);
+
+ if (ff_rtsp_callback) {
+ ff_rtsp_callback(RTSP_ACTION_CLIENT_TEARDOWN, rt->session_id,
+ NULL, 0, NULL);
+ }
+
+ rtsp_close_streams(rt);
+ url_close(rt->rtsp_hd);
+ return 0;
+}
+
+AVInputFormat rtsp_demuxer = {
+ "rtsp",
+ "RTSP input format",
+ sizeof(RTSPState),
+ rtsp_probe,
+ rtsp_read_header,
+ rtsp_read_packet,
+ rtsp_read_close,
+ rtsp_read_seek,
+ .flags = AVFMT_NOFILE,
+ .read_play = rtsp_read_play,
+ .read_pause = rtsp_read_pause,
+};
+
+static int sdp_probe(AVProbeData *p1)
+{
+ const char *p = p1->buf, *p_end = p1->buf + p1->buf_size;
+
+ /* we look for a line beginning "c=IN IP4" */
+ while (p < p_end && *p != '\0') {
+ if (p + sizeof("c=IN IP4") - 1 < p_end && strstart(p, "c=IN IP4", NULL))
+ return AVPROBE_SCORE_MAX / 2;
+
+ while(p < p_end - 1 && *p != '\n') p++;
+ if (++p >= p_end)
+ break;
+ if (*p == '\r')
+ p++;
+ }
+ return 0;
+}
+
+#define SDP_MAX_SIZE 8192
+
+static int sdp_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ RTSPState *rt = s->priv_data;
+ RTSPStream *rtsp_st;
+ int size, i, err;
+ char *content;
+ char url[1024];
+ AVStream *st;
+
+ /* read the whole sdp file */
+ /* XXX: better loading */
+ content = av_malloc(SDP_MAX_SIZE);
+ size = get_buffer(&s->pb, content, SDP_MAX_SIZE - 1);
+ if (size <= 0) {
+ av_free(content);
+ return AVERROR_INVALIDDATA;
+ }
+ content[size] ='\0';
+
+ sdp_parse(s, content);
+ av_free(content);
+
+ /* open each RTP stream */
+ for(i=0;i<rt->nb_rtsp_streams;i++) {
+ rtsp_st = rt->rtsp_streams[i];
+
+ snprintf(url, sizeof(url), "rtp://%s:%d?multicast=1&ttl=%d",
+ inet_ntoa(rtsp_st->sdp_ip),
+ rtsp_st->sdp_port,
+ rtsp_st->sdp_ttl);
+ if (url_open(&rtsp_st->rtp_handle, url, URL_RDWR) < 0) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ /* open the RTP context */
+ st = NULL;
+ if (rtsp_st->stream_index >= 0)
+ st = s->streams[rtsp_st->stream_index];
+ if (!st)
+ s->ctx_flags |= AVFMTCTX_NOHEADER;
+ rtsp_st->rtp_ctx = rtp_parse_open(s, st, rtsp_st->rtp_handle, rtsp_st->sdp_payload_type, &rtsp_st->rtp_payload_data);
+ if (!rtsp_st->rtp_ctx) {
+ err = AVERROR_NOMEM;
+ goto fail;
+ } else {
+ if(rtsp_st->dynamic_handler) {
+ rtsp_st->rtp_ctx->dynamic_protocol_context= rtsp_st->dynamic_protocol_context;
+ rtsp_st->rtp_ctx->parse_packet= rtsp_st->dynamic_handler->parse_packet;
+ }
+ }
+ }
+ return 0;
+ fail:
+ rtsp_close_streams(rt);
+ return err;
+}
+
+static int sdp_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ return rtsp_read_packet(s, pkt);
+}
+
+static int sdp_read_close(AVFormatContext *s)
+{
+ RTSPState *rt = s->priv_data;
+ rtsp_close_streams(rt);
+ return 0;
+}
+
+#ifdef CONFIG_SDP_DEMUXER
+AVInputFormat sdp_demuxer = {
+ "sdp",
+ "SDP",
+ sizeof(RTSPState),
+ sdp_probe,
+ sdp_read_header,
+ sdp_read_packet,
+ sdp_read_close,
+};
+#endif
+
+/* dummy redirector format (used directly in av_open_input_file now) */
+static int redir_probe(AVProbeData *pd)
+{
+ const char *p;
+ p = pd->buf;
+ while (redir_isspace(*p))
+ p++;
+ if (strstart(p, "http://", NULL) ||
+ strstart(p, "rtsp://", NULL))
+ return AVPROBE_SCORE_MAX;
+ return 0;
+}
+
+/* called from utils.c */
+int redir_open(AVFormatContext **ic_ptr, ByteIOContext *f)
+{
+ char buf[4096], *q;
+ int c;
+ AVFormatContext *ic = NULL;
+
+ /* parse each URL and try to open it */
+ c = url_fgetc(f);
+ while (c != URL_EOF) {
+ /* skip spaces */
+ for(;;) {
+ if (!redir_isspace(c))
+ break;
+ c = url_fgetc(f);
+ }
+ if (c == URL_EOF)
+ break;
+ /* record url */
+ q = buf;
+ for(;;) {
+ if (c == URL_EOF || redir_isspace(c))
+ break;
+ if ((q - buf) < sizeof(buf) - 1)
+ *q++ = c;
+ c = url_fgetc(f);
+ }
+ *q = '\0';
+ //printf("URL='%s'\n", buf);
+ /* try to open the media file */
+ if (av_open_input_file(&ic, buf, NULL, 0, NULL) == 0)
+ break;
+ }
+ *ic_ptr = ic;
+ if (!ic)
+ return AVERROR_IO;
+ else
+ return 0;
+}
+
+AVInputFormat redir_demuxer = {
+ "redir",
+ "Redirector format",
+ 0,
+ redir_probe,
+ NULL,
+ NULL,
+ NULL,
+};
diff --git a/contrib/ffmpeg/libavformat/rtsp.h b/contrib/ffmpeg/libavformat/rtsp.h
new file mode 100644
index 000000000..c08aaa6ac
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtsp.h
@@ -0,0 +1,98 @@
+/*
+ * RTSP definitions
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef RTSP_H
+#define RTSP_H
+
+/* RTSP handling */
+enum RTSPStatusCode {
+#define DEF(n, c, s) c = n,
+#include "rtspcodes.h"
+#undef DEF
+};
+
+enum RTSPProtocol {
+ RTSP_PROTOCOL_RTP_UDP = 0,
+ RTSP_PROTOCOL_RTP_TCP = 1,
+ RTSP_PROTOCOL_RTP_UDP_MULTICAST = 2,
+};
+
+#define RTSP_DEFAULT_PORT 554
+#define RTSP_MAX_TRANSPORTS 8
+#define RTSP_TCP_MAX_PACKET_SIZE 1472
+#define RTSP_DEFAULT_NB_AUDIO_CHANNELS 2
+#define RTSP_DEFAULT_AUDIO_SAMPLERATE 44100
+#define RTSP_RTP_PORT_MIN 5000
+#define RTSP_RTP_PORT_MAX 10000
+
+typedef struct RTSPTransportField {
+ int interleaved_min, interleaved_max; /* interleave ids, if TCP transport */
+ int port_min, port_max; /* RTP ports */
+ int client_port_min, client_port_max; /* RTP ports */
+ int server_port_min, server_port_max; /* RTP ports */
+ int ttl; /* ttl value */
+ uint32_t destination; /* destination IP address */
+ enum RTSPProtocol protocol;
+} RTSPTransportField;
+
+typedef struct RTSPHeader {
+ int content_length;
+ enum RTSPStatusCode status_code; /* response code from server */
+ int nb_transports;
+ /* in AV_TIME_BASE unit, AV_NOPTS_VALUE if not used */
+ int64_t range_start, range_end;
+ RTSPTransportField transports[RTSP_MAX_TRANSPORTS];
+ int seq; /* sequence number */
+ char session_id[512];
+} RTSPHeader;
+
+/* the callback can be used to extend the connection setup/teardown step */
+enum RTSPCallbackAction {
+ RTSP_ACTION_SERVER_SETUP,
+ RTSP_ACTION_SERVER_TEARDOWN,
+ RTSP_ACTION_CLIENT_SETUP,
+ RTSP_ACTION_CLIENT_TEARDOWN,
+};
+
+typedef struct RTSPActionServerSetup {
+ uint32_t ipaddr;
+ char transport_option[512];
+} RTSPActionServerSetup;
+
+typedef int FFRTSPCallback(enum RTSPCallbackAction action,
+ const char *session_id,
+ char *buf, int buf_size,
+ void *arg);
+
+void rtsp_set_callback(FFRTSPCallback *rtsp_cb);
+
+int rtsp_init(void);
+void rtsp_parse_line(RTSPHeader *reply, const char *buf);
+
+extern int rtsp_default_protocols;
+extern int rtsp_rtp_port_min;
+extern int rtsp_rtp_port_max;
+extern FFRTSPCallback *ff_rtsp_callback;
+extern AVInputFormat rtsp_demuxer;
+
+int rtsp_pause(AVFormatContext *s);
+int rtsp_resume(AVFormatContext *s);
+
+#endif /* RTSP_H */
diff --git a/contrib/ffmpeg/libavformat/rtspcodes.h b/contrib/ffmpeg/libavformat/rtspcodes.h
new file mode 100644
index 000000000..f7aab31c9
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/rtspcodes.h
@@ -0,0 +1,31 @@
+/*
+ * RTSP definitions
+ * copyright (c) 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+DEF(200, RTSP_STATUS_OK, "OK")
+DEF(405, RTSP_STATUS_METHOD, "Method Not Allowed")
+DEF(453, RTSP_STATUS_BANDWIDTH, "Not Enough Bandwidth")
+DEF(454, RTSP_STATUS_SESSION, "Session Not Found")
+DEF(455, RTSP_STATUS_STATE, "Method Not Valid in This State")
+DEF(459, RTSP_STATUS_AGGREGATE, "Aggregate operation not allowed")
+DEF(460, RTSP_STATUS_ONLY_AGGREGATE, "Only aggregate operation allowed")
+DEF(461, RTSP_STATUS_TRANSPORT, "Unsupported transport")
+DEF(500, RTSP_STATUS_INTERNAL, "Internal Server Error")
+DEF(503, RTSP_STATUS_SERVICE, "Service Unavailable")
+DEF(505, RTSP_STATUS_VERSION, "RTSP Version not supported")
diff --git a/contrib/ffmpeg/libavformat/segafilm.c b/contrib/ffmpeg/libavformat/segafilm.c
new file mode 100644
index 000000000..4feb97262
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/segafilm.c
@@ -0,0 +1,310 @@
+/*
+ * Sega FILM Format (CPK) Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file segafilm.c
+ * Sega FILM (.cpk) file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * For more information regarding the Sega FILM file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+#include "avformat.h"
+
+#define FILM_TAG MKBETAG('F', 'I', 'L', 'M')
+#define FDSC_TAG MKBETAG('F', 'D', 'S', 'C')
+#define STAB_TAG MKBETAG('S', 'T', 'A', 'B')
+#define CVID_TAG MKBETAG('c', 'v', 'i', 'd')
+
+typedef struct {
+ int stream;
+ offset_t sample_offset;
+ unsigned int sample_size;
+ int64_t pts;
+ int keyframe;
+} film_sample_t;
+
+typedef struct FilmDemuxContext {
+ int video_stream_index;
+ int audio_stream_index;
+
+ unsigned int audio_type;
+ unsigned int audio_samplerate;
+ unsigned int audio_bits;
+ unsigned int audio_channels;
+
+ unsigned int video_type;
+ unsigned int sample_count;
+ film_sample_t *sample_table;
+ unsigned int current_sample;
+
+ unsigned int base_clock;
+ unsigned int version;
+ int cvid_extra_bytes; /* the number of bytes thrown into the Cinepak
+ * chunk header to throw off decoders */
+
+ /* buffer used for interleaving stereo PCM data */
+ unsigned char *stereo_buffer;
+ int stereo_buffer_size;
+} FilmDemuxContext;
+
+static int film_probe(AVProbeData *p)
+{
+ if (p->buf_size < 4)
+ return 0;
+
+ if (BE_32(&p->buf[0]) != FILM_TAG)
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int film_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ unsigned char scratch[256];
+ int i;
+ unsigned int data_offset;
+ unsigned int audio_frame_counter;
+
+ film->sample_table = NULL;
+ film->stereo_buffer = NULL;
+ film->stereo_buffer_size = 0;
+
+ /* load the main FILM header */
+ if (get_buffer(pb, scratch, 16) != 16)
+ return AVERROR_IO;
+ data_offset = BE_32(&scratch[4]);
+ film->version = BE_32(&scratch[8]);
+
+ /* load the FDSC chunk */
+ if (film->version == 0) {
+ /* special case for Lemmings .film files; 20-byte header */
+ if (get_buffer(pb, scratch, 20) != 20)
+ return AVERROR_IO;
+ /* make some assumptions about the audio parameters */
+ film->audio_type = CODEC_ID_PCM_S8;
+ film->audio_samplerate = 22050;
+ film->audio_channels = 1;
+ film->audio_bits = 8;
+ } else {
+ /* normal Saturn .cpk files; 32-byte header */
+ if (get_buffer(pb, scratch, 32) != 32)
+ return AVERROR_IO;
+ film->audio_samplerate = BE_16(&scratch[24]);;
+ film->audio_channels = scratch[21];
+ film->audio_bits = scratch[22];
+ if (film->audio_bits == 8)
+ film->audio_type = CODEC_ID_PCM_S8;
+ else if (film->audio_bits == 16)
+ film->audio_type = CODEC_ID_PCM_S16BE;
+ else
+ film->audio_type = 0;
+ }
+
+ if (BE_32(&scratch[0]) != FDSC_TAG)
+ return AVERROR_INVALIDDATA;
+
+ film->cvid_extra_bytes = 0;
+ if (BE_32(&scratch[8]) == CVID_TAG) {
+ film->video_type = CODEC_ID_CINEPAK;
+ if (film->version)
+ film->cvid_extra_bytes = 2;
+ else
+ film->cvid_extra_bytes = 6; /* Lemmings 3DO case */
+ } else
+ film->video_type = 0;
+
+ /* initialize the decoder streams */
+ if (film->video_type) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ film->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = film->video_type;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = BE_32(&scratch[16]);
+ st->codec->height = BE_32(&scratch[12]);
+ }
+
+ if (film->audio_type) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ film->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = film->audio_type;
+ st->codec->codec_tag = 1;
+ st->codec->channels = film->audio_channels;
+ st->codec->bits_per_sample = film->audio_bits;
+ st->codec->sample_rate = film->audio_samplerate;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ st->codec->block_align = st->codec->channels *
+ st->codec->bits_per_sample / 8;
+ }
+
+ /* load the sample table */
+ if (get_buffer(pb, scratch, 16) != 16)
+ return AVERROR_IO;
+ if (BE_32(&scratch[0]) != STAB_TAG)
+ return AVERROR_INVALIDDATA;
+ film->base_clock = BE_32(&scratch[8]);
+ film->sample_count = BE_32(&scratch[12]);
+ if(film->sample_count >= UINT_MAX / sizeof(film_sample_t))
+ return -1;
+ film->sample_table = av_malloc(film->sample_count * sizeof(film_sample_t));
+
+ for(i=0; i<s->nb_streams; i++)
+ av_set_pts_info(s->streams[i], 33, 1, film->base_clock);
+
+ audio_frame_counter = 0;
+ for (i = 0; i < film->sample_count; i++) {
+ /* load the next sample record and transfer it to an internal struct */
+ if (get_buffer(pb, scratch, 16) != 16) {
+ av_free(film->sample_table);
+ return AVERROR_IO;
+ }
+ film->sample_table[i].sample_offset =
+ data_offset + BE_32(&scratch[0]);
+ film->sample_table[i].sample_size = BE_32(&scratch[4]);
+ if (BE_32(&scratch[8]) == 0xFFFFFFFF) {
+ film->sample_table[i].stream = film->audio_stream_index;
+ film->sample_table[i].pts = audio_frame_counter;
+ film->sample_table[i].pts *= film->base_clock;
+ film->sample_table[i].pts /= film->audio_samplerate;
+
+ audio_frame_counter += (film->sample_table[i].sample_size /
+ (film->audio_channels * film->audio_bits / 8));
+ } else {
+ film->sample_table[i].stream = film->video_stream_index;
+ film->sample_table[i].pts = BE_32(&scratch[8]) & 0x7FFFFFFF;
+ film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
+ }
+ }
+
+ film->current_sample = 0;
+
+ return 0;
+}
+
+static int film_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ film_sample_t *sample;
+ int ret = 0;
+ int i;
+ int left, right;
+
+ if (film->current_sample >= film->sample_count)
+ return AVERROR_IO;
+
+ sample = &film->sample_table[film->current_sample];
+
+ /* position the stream (will probably be there anyway) */
+ url_fseek(pb, sample->sample_offset, SEEK_SET);
+
+ /* do a special song and dance when loading FILM Cinepak chunks */
+ if ((sample->stream == film->video_stream_index) &&
+ (film->video_type == CODEC_ID_CINEPAK)) {
+ if (av_new_packet(pkt, sample->sample_size - film->cvid_extra_bytes))
+ return AVERROR_NOMEM;
+ if(pkt->size < 10)
+ return -1;
+ pkt->pos= url_ftell(pb);
+ ret = get_buffer(pb, pkt->data, 10);
+ /* skip the non-spec CVID bytes */
+ url_fseek(pb, film->cvid_extra_bytes, SEEK_CUR);
+ ret += get_buffer(pb, pkt->data + 10,
+ sample->sample_size - 10 - film->cvid_extra_bytes);
+ if (ret != sample->sample_size - film->cvid_extra_bytes)
+ ret = AVERROR_IO;
+ } else if ((sample->stream == film->audio_stream_index) &&
+ (film->audio_channels == 2)) {
+ /* stereo PCM needs to be interleaved */
+
+ if (av_new_packet(pkt, sample->sample_size))
+ return AVERROR_NOMEM;
+
+ /* make sure the interleave buffer is large enough */
+ if (sample->sample_size > film->stereo_buffer_size) {
+ av_free(film->stereo_buffer);
+ film->stereo_buffer_size = sample->sample_size;
+ film->stereo_buffer = av_malloc(film->stereo_buffer_size);
+ }
+
+ pkt->pos= url_ftell(pb);
+ ret = get_buffer(pb, film->stereo_buffer, sample->sample_size);
+ if (ret != sample->sample_size)
+ ret = AVERROR_IO;
+
+ left = 0;
+ right = sample->sample_size / 2;
+ for (i = 0; i < sample->sample_size; ) {
+ if (film->audio_bits == 8) {
+ pkt->data[i++] = film->stereo_buffer[left++];
+ pkt->data[i++] = film->stereo_buffer[right++];
+ } else {
+ pkt->data[i++] = film->stereo_buffer[left++];
+ pkt->data[i++] = film->stereo_buffer[left++];
+ pkt->data[i++] = film->stereo_buffer[right++];
+ pkt->data[i++] = film->stereo_buffer[right++];
+ }
+ }
+ } else {
+ ret= av_get_packet(pb, pkt, sample->sample_size);
+ if (ret != sample->sample_size)
+ ret = AVERROR_IO;
+ }
+
+ pkt->stream_index = sample->stream;
+ pkt->pts = sample->pts;
+
+ film->current_sample++;
+
+ return ret;
+}
+
+static int film_read_close(AVFormatContext *s)
+{
+ FilmDemuxContext *film = (FilmDemuxContext *)s->priv_data;
+
+ av_free(film->sample_table);
+ av_free(film->stereo_buffer);
+
+ return 0;
+}
+
+AVInputFormat segafilm_demuxer = {
+ "film_cpk",
+ "Sega FILM/CPK format",
+ sizeof(FilmDemuxContext),
+ film_probe,
+ film_read_header,
+ film_read_packet,
+ film_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/sgi.c b/contrib/ffmpeg/libavformat/sgi.c
new file mode 100644
index 000000000..bf0297e81
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/sgi.c
@@ -0,0 +1,460 @@
+/*
+ * SGI image format
+ * Todd Kirby <doubleshot@pacbell.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "avio.h"
+
+/* #define DEBUG */
+
+/* sgi image file signature */
+#define SGI_MAGIC 474
+
+#define SGI_HEADER_SIZE 512
+
+#define SGI_GRAYSCALE 1
+#define SGI_RGB 3
+#define SGI_RGBA 4
+
+#define SGI_SINGLE_CHAN 2
+#define SGI_MULTI_CHAN 3
+
+typedef struct SGIInfo{
+ short magic;
+ char rle;
+ char bytes_per_channel;
+ unsigned short dimension;
+ unsigned short xsize;
+ unsigned short ysize;
+ unsigned short zsize;
+} SGIInfo;
+
+
+static int sgi_probe(AVProbeData *pd)
+{
+ /* test for sgi magic */
+ if (pd->buf_size >= 2 && BE_16(&pd->buf[0]) == SGI_MAGIC) {
+ return AVPROBE_SCORE_MAX;
+ } else {
+ return 0;
+ }
+}
+
+/* read sgi header fields */
+static void read_sgi_header(ByteIOContext *f, SGIInfo *info)
+{
+ info->magic = (unsigned short) get_be16(f);
+ info->rle = get_byte(f);
+ info->bytes_per_channel = get_byte(f);
+ info->dimension = (unsigned short)get_be16(f);
+ info->xsize = (unsigned short) get_be16(f);
+ info->ysize = (unsigned short) get_be16(f);
+ info->zsize = (unsigned short) get_be16(f);
+
+ if(info->zsize > 4096)
+ info->zsize= 0;
+
+#ifdef DEBUG
+ printf("sgi header fields:\n");
+ printf(" magic: %d\n", info->magic);
+ printf(" rle: %d\n", info->rle);
+ printf(" bpc: %d\n", info->bytes_per_channel);
+ printf(" dim: %d\n", info->dimension);
+ printf(" xsize: %d\n", info->xsize);
+ printf(" ysize: %d\n", info->ysize);
+ printf(" zsize: %d\n", info->zsize);
+#endif
+
+ return;
+}
+
+
+/* read an uncompressed sgi image */
+static int read_uncompressed_sgi(const SGIInfo *si,
+ AVPicture *pict, ByteIOContext *f)
+{
+ int x, y, z, chan_offset, ret = 0;
+ uint8_t *dest_row;
+
+ /* skip header */
+ url_fseek(f, SGI_HEADER_SIZE, SEEK_SET);
+
+ pict->linesize[0] = si->xsize;
+
+ for (z = 0; z < si->zsize; z++) {
+
+#ifndef WORDS_BIGENDIAN
+ /* rgba -> bgra for rgba32 on little endian cpus */
+ if (si->zsize == 4 && z != 3)
+ chan_offset = 2 - z;
+ else
+#endif
+ chan_offset = z;
+
+ for (y = si->ysize - 1; y >= 0; y--) {
+ dest_row = pict->data[0] + (y * si->xsize * si->zsize);
+
+ for (x = 0; x < si->xsize; x++) {
+ dest_row[chan_offset] = get_byte(f);
+ dest_row += si->zsize;
+ }
+ }
+ }
+
+ return ret;
+}
+
+
+/* expand an rle row into a channel */
+static int expand_rle_row(ByteIOContext *f, unsigned char *optr,
+ int chan_offset, int pixelstride)
+{
+ unsigned char pixel, count;
+ int length = 0;
+
+#ifndef WORDS_BIGENDIAN
+ /* rgba -> bgra for rgba32 on little endian cpus */
+ if (pixelstride == 4 && chan_offset != 3) {
+ chan_offset = 2 - chan_offset;
+ }
+#endif
+
+ optr += chan_offset;
+
+ while (1) {
+ pixel = get_byte(f);
+
+ if (!(count = (pixel & 0x7f))) {
+ return length;
+ }
+ if (pixel & 0x80) {
+ while (count--) {
+ *optr = get_byte(f);
+ length++;
+ optr += pixelstride;
+ }
+ } else {
+ pixel = get_byte(f);
+
+ while (count--) {
+ *optr = pixel;
+ length++;
+ optr += pixelstride;
+ }
+ }
+ }
+}
+
+
+/* read a run length encoded sgi image */
+static int read_rle_sgi(const SGIInfo *sgi_info,
+ AVPicture *pict, ByteIOContext *f)
+{
+ uint8_t *dest_row;
+ unsigned long *start_table;
+ int y, z, xsize, ysize, zsize, tablen;
+ long start_offset;
+ int ret = 0;
+
+ xsize = sgi_info->xsize;
+ ysize = sgi_info->ysize;
+ zsize = sgi_info->zsize;
+
+ /* skip header */
+ url_fseek(f, SGI_HEADER_SIZE, SEEK_SET);
+
+ /* size of rle offset and length tables */
+ tablen = ysize * zsize * sizeof(long);
+
+ start_table = (unsigned long *)av_malloc(tablen);
+
+ if (!get_buffer(f, (uint8_t *)start_table, tablen)) {
+ ret = AVERROR_IO;
+ goto fail;
+ }
+
+ /* skip run length table */
+ url_fseek(f, tablen, SEEK_CUR);
+
+ for (z = 0; z < zsize; z++) {
+ for (y = 0; y < ysize; y++) {
+ dest_row = pict->data[0] + (ysize - 1 - y) * (xsize * zsize);
+
+ start_offset = BE_32(&start_table[y + z * ysize]);
+
+ /* don't seek if already at the next rle start offset */
+ if (url_ftell(f) != start_offset) {
+ url_fseek(f, start_offset, SEEK_SET);
+ }
+
+ if (expand_rle_row(f, dest_row, z, zsize) != xsize) {
+ ret = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ av_free(start_table);
+
+ return ret;
+}
+
+
+static int sgi_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ SGIInfo sgi_info, *s = &sgi_info;
+ AVImageInfo info1, *info = &info1;
+ int ret;
+
+ read_sgi_header(f, s);
+
+ if (s->bytes_per_channel != 1) {
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* check for supported image dimensions */
+ if (s->dimension != 2 && s->dimension != 3) {
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (s->zsize == SGI_GRAYSCALE) {
+ info->pix_fmt = PIX_FMT_GRAY8;
+ } else if (s->zsize == SGI_RGB) {
+ info->pix_fmt = PIX_FMT_RGB24;
+ } else if (s->zsize == SGI_RGBA) {
+ info->pix_fmt = PIX_FMT_RGBA32;
+ } else {
+ return AVERROR_INVALIDDATA;
+ }
+
+ info->width = s->xsize;
+ info->height = s->ysize;
+
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ return ret;
+
+ if (s->rle) {
+ return read_rle_sgi(s, &info->pict, f);
+ } else {
+ return read_uncompressed_sgi(s, &info->pict, f);
+ }
+
+ return 0; /* not reached */
+}
+
+#ifdef CONFIG_MUXERS
+static void write_sgi_header(ByteIOContext *f, const SGIInfo *info)
+{
+ int i;
+
+ put_be16(f, SGI_MAGIC);
+ put_byte(f, info->rle);
+ put_byte(f, info->bytes_per_channel);
+ put_be16(f, info->dimension);
+ put_be16(f, info->xsize);
+ put_be16(f, info->ysize);
+ put_be16(f, info->zsize);
+
+ /* The rest are constant in this implementation */
+ put_be32(f, 0L); /* pixmin */
+ put_be32(f, 255L); /* pixmax */
+ put_be32(f, 0L); /* dummy */
+
+ /* name */
+ for (i = 0; i < 80; i++) {
+ put_byte(f, 0);
+ }
+
+ put_be32(f, 0L); /* colormap */
+
+ /* The rest of the 512 byte header is unused. */
+ for (i = 0; i < 404; i++) {
+ put_byte(f, 0);
+ }
+}
+
+
+static int rle_row(ByteIOContext *f, char *row, int stride, int rowsize)
+{
+ int length, count, i, x;
+ char *start, repeat = 0;
+
+ for (x = rowsize, length = 0; x > 0;) {
+ start = row;
+ row += (2 * stride);
+ x -= 2;
+
+ while (x > 0 && (row[-2 * stride] != row[-1 * stride] ||
+ row[-1 * stride] != row[0])) {
+ row += stride;
+ x--;
+ };
+
+ row -= (2 * stride);
+ x += 2;
+
+ count = (row - start) / stride;
+ while (count > 0) {
+ i = count > 126 ? 126 : count;
+ count -= i;
+
+ put_byte(f, 0x80 | i);
+ length++;
+
+ while (i > 0) {
+ put_byte(f, *start);
+ start += stride;
+ i--;
+ length++;
+ };
+ };
+
+ if (x <= 0) {
+ break;
+ }
+
+ start = row;
+ repeat = row[0];
+
+ row += stride;
+ x--;
+
+ while (x > 0 && *row == repeat) {
+ row += stride;
+ x--;
+ };
+
+ count = (row - start) / stride;
+ while (count > 0) {
+ i = count > 126 ? 126 : count;
+ count -= i;
+
+ put_byte(f, i);
+ length++;
+
+ put_byte(f, repeat);
+ length++;
+ };
+ };
+
+ length++;
+
+ put_byte(f, 0);
+ return (length);
+}
+
+
+static int sgi_write(ByteIOContext *pb, AVImageInfo *info)
+{
+ SGIInfo sgi_info, *si = &sgi_info;
+ long *offsettab, *lengthtab;
+ int i, y, z;
+ int tablesize, chan_offset;
+ uint8_t *srcrow;
+
+ si->xsize = info->width;
+ si->ysize = info->height;
+ si->rle = 1;
+ si->bytes_per_channel = 1;
+
+ switch(info->pix_fmt) {
+ case PIX_FMT_GRAY8:
+ si->dimension = SGI_SINGLE_CHAN;
+ si->zsize = SGI_GRAYSCALE;
+ break;
+ case PIX_FMT_RGB24:
+ si->dimension = SGI_MULTI_CHAN;
+ si->zsize = SGI_RGB;
+ break;
+ case PIX_FMT_RGBA32:
+ si->dimension = SGI_MULTI_CHAN;
+ si->zsize = SGI_RGBA;
+ break;
+ default:
+ return AVERROR_INVALIDDATA;
+ }
+
+ write_sgi_header(pb, si);
+
+ tablesize = si->zsize * si->ysize * sizeof(long);
+
+ /* skip rle offset and length tables, write them at the end. */
+ url_fseek(pb, tablesize * 2, SEEK_CUR);
+ put_flush_packet(pb);
+
+ lengthtab = av_malloc(tablesize);
+ offsettab = av_malloc(tablesize);
+
+ for (z = 0; z < si->zsize; z++) {
+
+#ifndef WORDS_BIGENDIAN
+ /* rgba -> bgra for rgba32 on little endian cpus */
+ if (si->zsize == 4 && z != 3)
+ chan_offset = 2 - z;
+ else
+#endif
+ chan_offset = z;
+
+ srcrow = info->pict.data[0] + chan_offset;
+
+ for (y = si->ysize -1; y >= 0; y--) {
+ offsettab[(z * si->ysize) + y] = url_ftell(pb);
+ lengthtab[(z * si->ysize) + y] = rle_row(pb, srcrow,
+ si->zsize, si->xsize);
+ srcrow += info->pict.linesize[0];
+ }
+ }
+
+ url_fseek(pb, 512, SEEK_SET);
+
+ /* write offset table */
+ for (i = 0; i < (si->ysize * si->zsize); i++) {
+ put_be32(pb, offsettab[i]);
+ }
+
+ /* write length table */
+ for (i = 0; i < (si->ysize * si->zsize); i++) {
+ put_be32(pb, lengthtab[i]);
+ }
+
+ put_flush_packet(pb);
+
+ av_free(lengthtab);
+ av_free(offsettab);
+
+ return 0;
+}
+#endif // CONFIG_MUXERS
+
+AVImageFormat sgi_image_format = {
+ "sgi",
+ "sgi,rgb,rgba,bw",
+ sgi_probe,
+ sgi_read,
+ (1 << PIX_FMT_GRAY8) | (1 << PIX_FMT_RGB24) | (1 << PIX_FMT_RGBA32),
+#ifdef CONFIG_MUXERS
+ sgi_write,
+#else
+ NULL,
+#endif // CONFIG_MUXERS
+};
diff --git a/contrib/ffmpeg/libavformat/sierravmd.c b/contrib/ffmpeg/libavformat/sierravmd.c
new file mode 100644
index 000000000..92dbce91d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/sierravmd.c
@@ -0,0 +1,302 @@
+/*
+ * Sierra VMD Format Demuxer
+ * Copyright (c) 2004 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file sierravmd.c
+ * Sierra VMD file demuxer
+ * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
+ * for more information on the Sierra VMD file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+#include "avformat.h"
+
+#define VMD_HEADER_SIZE 0x0330
+#define BYTES_PER_FRAME_RECORD 16
+
+typedef struct {
+ int stream_index;
+ offset_t frame_offset;
+ unsigned int frame_size;
+ int64_t pts;
+ int keyframe;
+ unsigned char frame_record[BYTES_PER_FRAME_RECORD];
+} vmd_frame_t;
+
+typedef struct VmdDemuxContext {
+ int video_stream_index;
+ int audio_stream_index;
+
+ unsigned int frame_count;
+ unsigned int frames_per_block;
+ vmd_frame_t *frame_table;
+ unsigned int current_frame;
+
+ int sample_rate;
+ int64_t audio_sample_counter;
+ int skiphdr;
+
+ unsigned char vmd_header[VMD_HEADER_SIZE];
+} VmdDemuxContext;
+
+static int vmd_probe(AVProbeData *p)
+{
+ if (p->buf_size < 2)
+ return 0;
+
+ /* check if the first 2 bytes of the file contain the appropriate size
+ * of a VMD header chunk */
+ if (LE_16(&p->buf[0]) != VMD_HEADER_SIZE - 2)
+ return 0;
+
+ /* only return half certainty since this check is a bit sketchy */
+ return AVPROBE_SCORE_MAX / 2;
+}
+
+static int vmd_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ VmdDemuxContext *vmd = (VmdDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st, *vst;
+ unsigned int toc_offset;
+ unsigned char *raw_frame_table;
+ int raw_frame_table_size;
+ offset_t current_offset;
+ int i, j;
+ unsigned int total_frames;
+ int64_t pts_inc = 1;
+ int64_t current_video_pts = 0, current_audio_pts = 0;
+ unsigned char chunk[BYTES_PER_FRAME_RECORD];
+ int num, den;
+ int sound_buffers;
+
+ /* fetch the main header, including the 2 header length bytes */
+ url_fseek(pb, 0, SEEK_SET);
+ if (get_buffer(pb, vmd->vmd_header, VMD_HEADER_SIZE) != VMD_HEADER_SIZE)
+ return AVERROR_IO;
+
+ /* start up the decoders */
+ vst = av_new_stream(s, 0);
+ if (!vst)
+ return AVERROR_NOMEM;
+ av_set_pts_info(vst, 33, 1, 10);
+ vmd->video_stream_index = vst->index;
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = CODEC_ID_VMDVIDEO;
+ vst->codec->codec_tag = 0; /* no fourcc */
+ vst->codec->width = LE_16(&vmd->vmd_header[12]);
+ vst->codec->height = LE_16(&vmd->vmd_header[14]);
+ vst->codec->extradata_size = VMD_HEADER_SIZE;
+ vst->codec->extradata = av_mallocz(VMD_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
+ memcpy(vst->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE);
+
+ /* if sample rate is 0, assume no audio */
+ vmd->sample_rate = LE_16(&vmd->vmd_header[804]);
+ if (vmd->sample_rate) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ vmd->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_VMDAUDIO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->channels = (vmd->vmd_header[811] & 0x80) ? 2 : 1;
+ st->codec->sample_rate = vmd->sample_rate;
+ st->codec->block_align = LE_16(&vmd->vmd_header[806]);
+ if (st->codec->block_align & 0x8000) {
+ st->codec->bits_per_sample = 16;
+ st->codec->block_align = -(st->codec->block_align - 0x10000);
+ } else {
+ st->codec->bits_per_sample = 8;
+ }
+ st->codec->bit_rate = st->codec->sample_rate *
+ st->codec->bits_per_sample * st->codec->channels;
+
+ /* calculate pts */
+ num = st->codec->block_align;
+ den = st->codec->sample_rate * st->codec->channels;
+ av_reduce(&den, &num, den, num, (1UL<<31)-1);
+ av_set_pts_info(vst, 33, num, den);
+ av_set_pts_info(st, 33, num, den);
+ pts_inc = num;
+ }
+
+ toc_offset = LE_32(&vmd->vmd_header[812]);
+ vmd->frame_count = LE_16(&vmd->vmd_header[6]);
+ vmd->frames_per_block = LE_16(&vmd->vmd_header[18]);
+ url_fseek(pb, toc_offset, SEEK_SET);
+
+ raw_frame_table = NULL;
+ vmd->frame_table = NULL;
+ sound_buffers = LE_16(&vmd->vmd_header[808]);
+ raw_frame_table_size = vmd->frame_count * 6;
+ raw_frame_table = av_malloc(raw_frame_table_size);
+ if(vmd->frame_count * vmd->frames_per_block >= UINT_MAX / sizeof(vmd_frame_t)){
+ av_log(s, AV_LOG_ERROR, "vmd->frame_count * vmd->frames_per_block too large\n");
+ return -1;
+ }
+ vmd->frame_table = av_malloc((vmd->frame_count * vmd->frames_per_block + sound_buffers) * sizeof(vmd_frame_t));
+ if (!raw_frame_table || !vmd->frame_table) {
+ av_free(raw_frame_table);
+ av_free(vmd->frame_table);
+ return AVERROR_NOMEM;
+ }
+ if (get_buffer(pb, raw_frame_table, raw_frame_table_size) !=
+ raw_frame_table_size) {
+ av_free(raw_frame_table);
+ av_free(vmd->frame_table);
+ return AVERROR_IO;
+ }
+
+ total_frames = 0;
+ for (i = 0; i < vmd->frame_count; i++) {
+
+ current_offset = LE_32(&raw_frame_table[6 * i + 2]);
+
+ /* handle each entry in index block */
+ for (j = 0; j < vmd->frames_per_block; j++) {
+ int type;
+ uint32_t size;
+
+ get_buffer(pb, chunk, BYTES_PER_FRAME_RECORD);
+ type = chunk[0];
+ size = LE_32(&chunk[2]);
+ if(!size)
+ continue;
+ switch(type) {
+ case 1: /* Audio Chunk */
+ /* first audio chunk contains several audio buffers */
+ if(current_audio_pts){
+ vmd->frame_table[total_frames].frame_offset = current_offset;
+ vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index;
+ vmd->frame_table[total_frames].frame_size = size;
+ memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
+ vmd->frame_table[total_frames].pts = current_audio_pts;
+ total_frames++;
+ current_audio_pts += pts_inc;
+ }else{
+ uint32_t flags;
+ int k;
+ int noff;
+ int64_t pos;
+
+ pos = url_ftell(pb);
+ url_fseek(pb, current_offset, SEEK_SET);
+ flags = get_le32(pb);
+ noff = 4;
+ url_fseek(pb, pos, SEEK_SET);
+ av_log(s, AV_LOG_DEBUG, "Sound mapping = %08X (%i bufs)\n", flags, sound_buffers);
+ for(k = 0; k < sound_buffers - 1; k++){
+ if(flags & 1) { /* silent block */
+ vmd->frame_table[total_frames].frame_size = 0;
+ }else{
+ vmd->frame_table[total_frames].frame_size = st->codec->block_align + (st->codec->block_align & 1);
+ }
+ noff += vmd->frame_table[total_frames].frame_size;
+ vmd->frame_table[total_frames].frame_offset = current_offset + noff;
+ vmd->frame_table[total_frames].stream_index = vmd->audio_stream_index;
+ memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
+ vmd->frame_table[total_frames].pts = current_audio_pts;
+ total_frames++;
+ current_audio_pts += pts_inc;
+ flags >>= 1;
+ }
+ }
+ break;
+ case 2: /* Video Chunk */
+ vmd->frame_table[total_frames].frame_offset = current_offset;
+ vmd->frame_table[total_frames].stream_index = vmd->video_stream_index;
+ vmd->frame_table[total_frames].frame_size = size;
+ memcpy(vmd->frame_table[total_frames].frame_record, chunk, BYTES_PER_FRAME_RECORD);
+ vmd->frame_table[total_frames].pts = current_video_pts;
+ total_frames++;
+ break;
+ }
+ current_offset += size;
+ }
+ current_video_pts += pts_inc;
+ }
+
+ av_free(raw_frame_table);
+
+ vmd->current_frame = 0;
+ vmd->frame_count = total_frames;
+
+ return 0;
+}
+
+static int vmd_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ VmdDemuxContext *vmd = (VmdDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret = 0;
+ vmd_frame_t *frame;
+
+ if (vmd->current_frame >= vmd->frame_count)
+ return AVERROR_IO;
+
+ frame = &vmd->frame_table[vmd->current_frame];
+ /* position the stream (will probably be there already) */
+ url_fseek(pb, frame->frame_offset, SEEK_SET);
+
+ if (av_new_packet(pkt, frame->frame_size + BYTES_PER_FRAME_RECORD))
+ return AVERROR_NOMEM;
+ pkt->pos= url_ftell(pb);
+ memcpy(pkt->data, frame->frame_record, BYTES_PER_FRAME_RECORD);
+ ret = get_buffer(pb, pkt->data + BYTES_PER_FRAME_RECORD,
+ frame->frame_size);
+
+ if (ret != frame->frame_size) {
+ av_free_packet(pkt);
+ ret = AVERROR_IO;
+ }
+ pkt->stream_index = frame->stream_index;
+ pkt->pts = frame->pts;
+ av_log(NULL, AV_LOG_INFO, " dispatching %s frame with %d bytes and pts %"PRId64"\n",
+ (frame->frame_record[0] == 0x02) ? "video" : "audio",
+ frame->frame_size + BYTES_PER_FRAME_RECORD,
+ pkt->pts);
+
+ vmd->current_frame++;
+
+ return ret;
+}
+
+static int vmd_read_close(AVFormatContext *s)
+{
+ VmdDemuxContext *vmd = (VmdDemuxContext *)s->priv_data;
+
+ av_free(vmd->frame_table);
+
+ return 0;
+}
+
+AVInputFormat vmd_demuxer = {
+ "vmd",
+ "Sierra VMD format",
+ sizeof(VmdDemuxContext),
+ vmd_probe,
+ vmd_read_header,
+ vmd_read_packet,
+ vmd_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/smacker.c b/contrib/ffmpeg/libavformat/smacker.c
new file mode 100644
index 000000000..a08bd2d9f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/smacker.c
@@ -0,0 +1,345 @@
+/*
+ * Smacker demuxer
+ * Copyright (c) 2006 Konstantin Shishkov.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Based on http://wiki.multimedia.cx/index.php?title=Smacker
+ */
+
+#include "avformat.h"
+#include "riff.h"
+#include "bswap.h"
+
+#define SMACKER_PAL 0x01
+
+enum SAudFlags {
+ SMK_AUD_PACKED = 0x80000000,
+ SMK_AUD_16BITS = 0x20000000,
+ SMK_AUD_STEREO = 0x10000000,
+ SMK_AUD_BINKAUD = 0x08000000,
+ SMK_AUD_USEDCT = 0x04000000
+};
+
+typedef struct SmackerContext {
+ /* Smacker file header */
+ uint32_t magic;
+ uint32_t width, height;
+ uint32_t frames;
+ int pts_inc;
+ uint32_t flags;
+ uint32_t audio[7];
+ uint32_t treesize;
+ uint32_t mmap_size, mclr_size, full_size, type_size;
+ uint32_t rates[7];
+ uint32_t pad;
+ /* frame info */
+ uint32_t *frm_size;
+ uint8_t *frm_flags;
+ /* internal variables */
+ int cur_frame;
+ int is_ver4;
+ int64_t cur_pts;
+ /* current frame for demuxing */
+ uint8_t pal[768];
+ int indexes[7];
+ int videoindex;
+ uint8_t *bufs[7];
+ int buf_sizes[7];
+ int stream_id[7];
+ int curstream;
+ offset_t nextpos;
+ int64_t aud_pts[7];
+} SmackerContext;
+
+typedef struct SmackerFrame {
+ int64_t pts;
+ int stream;
+} SmackerFrame;
+
+/* palette used in Smacker */
+static const uint8_t smk_pal[64] = {
+ 0x00, 0x04, 0x08, 0x0C, 0x10, 0x14, 0x18, 0x1C,
+ 0x20, 0x24, 0x28, 0x2C, 0x30, 0x34, 0x38, 0x3C,
+ 0x41, 0x45, 0x49, 0x4D, 0x51, 0x55, 0x59, 0x5D,
+ 0x61, 0x65, 0x69, 0x6D, 0x71, 0x75, 0x79, 0x7D,
+ 0x82, 0x86, 0x8A, 0x8E, 0x92, 0x96, 0x9A, 0x9E,
+ 0xA2, 0xA6, 0xAA, 0xAE, 0xB2, 0xB6, 0xBA, 0xBE,
+ 0xC3, 0xC7, 0xCB, 0xCF, 0xD3, 0xD7, 0xDB, 0xDF,
+ 0xE3, 0xE7, 0xEB, 0xEF, 0xF3, 0xF7, 0xFB, 0xFF
+};
+
+
+static int smacker_probe(AVProbeData *p)
+{
+ if (p->buf_size < 4)
+ return 0;
+ if(p->buf[0] == 'S' && p->buf[1] == 'M' && p->buf[2] == 'K'
+ && (p->buf[3] == '2' || p->buf[3] == '4'))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int smacker_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ SmackerContext *smk = (SmackerContext *)s->priv_data;
+ AVStream *st, *ast[7];
+ int i, ret;
+ int tbase;
+
+ /* read and check header */
+ smk->magic = get_le32(pb);
+ if (smk->magic != MKTAG('S', 'M', 'K', '2') && smk->magic != MKTAG('S', 'M', 'K', '4'))
+ return -1;
+ smk->width = get_le32(pb);
+ smk->height = get_le32(pb);
+ smk->frames = get_le32(pb);
+ smk->pts_inc = (int32_t)get_le32(pb);
+ smk->flags = get_le32(pb);
+ for(i = 0; i < 7; i++)
+ smk->audio[i] = get_le32(pb);
+ smk->treesize = get_le32(pb);
+
+ if(smk->treesize >= UINT_MAX/4){ // smk->treesize + 16 must not overflow (this check is probably redundant)
+ av_log(s, AV_LOG_ERROR, "treesize too large\n");
+ return -1;
+ }
+
+//FIXME remove extradata "rebuilding"
+ smk->mmap_size = get_le32(pb);
+ smk->mclr_size = get_le32(pb);
+ smk->full_size = get_le32(pb);
+ smk->type_size = get_le32(pb);
+ for(i = 0; i < 7; i++)
+ smk->rates[i] = get_le32(pb);
+ smk->pad = get_le32(pb);
+ /* setup data */
+ if(smk->frames > 0xFFFFFF) {
+ av_log(s, AV_LOG_ERROR, "Too many frames: %i\n", smk->frames);
+ return -1;
+ }
+ smk->frm_size = av_malloc(smk->frames * 4);
+ smk->frm_flags = av_malloc(smk->frames);
+
+ smk->is_ver4 = (smk->magic != MKTAG('S', 'M', 'K', '2'));
+
+ /* read frame info */
+ for(i = 0; i < smk->frames; i++) {
+ smk->frm_size[i] = get_le32(pb);
+ }
+ for(i = 0; i < smk->frames; i++) {
+ smk->frm_flags[i] = get_byte(pb);
+ }
+
+ /* init video codec */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ smk->videoindex = st->index;
+ st->codec->width = smk->width;
+ st->codec->height = smk->height;
+ st->codec->pix_fmt = PIX_FMT_PAL8;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_SMACKVIDEO;
+ st->codec->codec_tag = smk->magic;
+ /* Smacker uses 100000 as internal timebase */
+ if(smk->pts_inc < 0)
+ smk->pts_inc = -smk->pts_inc;
+ else
+ smk->pts_inc *= 100;
+ tbase = 100000;
+ av_reduce(&tbase, &smk->pts_inc, tbase, smk->pts_inc, (1UL<<31)-1);
+ av_set_pts_info(st, 33, smk->pts_inc, tbase);
+ /* handle possible audio streams */
+ for(i = 0; i < 7; i++) {
+ smk->indexes[i] = -1;
+ if((smk->rates[i] & 0xFFFFFF) && !(smk->rates[i] & SMK_AUD_BINKAUD)){
+ ast[i] = av_new_stream(s, 0);
+ smk->indexes[i] = ast[i]->index;
+ ast[i]->codec->codec_type = CODEC_TYPE_AUDIO;
+ ast[i]->codec->codec_id = (smk->rates[i] & SMK_AUD_PACKED) ? CODEC_ID_SMACKAUDIO : CODEC_ID_PCM_U8;
+ ast[i]->codec->codec_tag = MKTAG('S', 'M', 'K', 'A');
+ ast[i]->codec->channels = (smk->rates[i] & SMK_AUD_STEREO) ? 2 : 1;
+ ast[i]->codec->sample_rate = smk->rates[i] & 0xFFFFFF;
+ ast[i]->codec->bits_per_sample = (smk->rates[i] & SMK_AUD_16BITS) ? 16 : 8;
+ if(ast[i]->codec->bits_per_sample == 16 && ast[i]->codec->codec_id == CODEC_ID_PCM_U8)
+ ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE;
+ av_set_pts_info(ast[i], 64, 1, ast[i]->codec->sample_rate
+ * ast[i]->codec->channels * ast[i]->codec->bits_per_sample / 8);
+ }
+ }
+
+
+ /* load trees to extradata, they will be unpacked by decoder */
+ st->codec->extradata = av_malloc(smk->treesize + 16);
+ st->codec->extradata_size = smk->treesize + 16;
+ if(!st->codec->extradata){
+ av_log(s, AV_LOG_ERROR, "Cannot allocate %i bytes of extradata\n", smk->treesize + 16);
+ av_free(smk->frm_size);
+ av_free(smk->frm_flags);
+ return -1;
+ }
+ ret = get_buffer(pb, st->codec->extradata + 16, st->codec->extradata_size - 16);
+ if(ret != st->codec->extradata_size - 16){
+ av_free(smk->frm_size);
+ av_free(smk->frm_flags);
+ return AVERROR_IO;
+ }
+ ((int32_t*)st->codec->extradata)[0] = le2me_32(smk->mmap_size);
+ ((int32_t*)st->codec->extradata)[1] = le2me_32(smk->mclr_size);
+ ((int32_t*)st->codec->extradata)[2] = le2me_32(smk->full_size);
+ ((int32_t*)st->codec->extradata)[3] = le2me_32(smk->type_size);
+
+ smk->curstream = -1;
+ smk->nextpos = url_ftell(pb);
+
+ return 0;
+}
+
+
+static int smacker_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ SmackerContext *smk = (SmackerContext *)s->priv_data;
+ int flags;
+ int ret;
+ int i;
+ int frame_size = 0;
+ int palchange = 0;
+ int pos;
+
+ if (url_feof(&s->pb) || smk->cur_frame >= smk->frames)
+ return -EIO;
+
+ /* if we demuxed all streams, pass another frame */
+ if(smk->curstream < 0) {
+ url_fseek(&s->pb, smk->nextpos, 0);
+ frame_size = smk->frm_size[smk->cur_frame] & (~3);
+ flags = smk->frm_flags[smk->cur_frame];
+ /* handle palette change event */
+ pos = url_ftell(&s->pb);
+ if(flags & SMACKER_PAL){
+ int size, sz, t, off, j, pos;
+ uint8_t *pal = smk->pal;
+ uint8_t oldpal[768];
+
+ memcpy(oldpal, pal, 768);
+ size = get_byte(&s->pb);
+ size = size * 4 - 1;
+ frame_size -= size;
+ frame_size--;
+ sz = 0;
+ pos = url_ftell(&s->pb) + size;
+ while(sz < 256){
+ t = get_byte(&s->pb);
+ if(t & 0x80){ /* skip palette entries */
+ sz += (t & 0x7F) + 1;
+ pal += ((t & 0x7F) + 1) * 3;
+ } else if(t & 0x40){ /* copy with offset */
+ off = get_byte(&s->pb) * 3;
+ j = (t & 0x3F) + 1;
+ while(j-- && sz < 256) {
+ *pal++ = oldpal[off + 0];
+ *pal++ = oldpal[off + 1];
+ *pal++ = oldpal[off + 2];
+ sz++;
+ off += 3;
+ }
+ } else { /* new entries */
+ *pal++ = smk_pal[t];
+ *pal++ = smk_pal[get_byte(&s->pb) & 0x3F];
+ *pal++ = smk_pal[get_byte(&s->pb) & 0x3F];
+ sz++;
+ }
+ }
+ url_fseek(&s->pb, pos, 0);
+ palchange |= 1;
+ }
+ flags >>= 1;
+ smk->curstream = -1;
+ /* if audio chunks are present, put them to stack and retrieve later */
+ for(i = 0; i < 7; i++) {
+ if(flags & 1) {
+ int size;
+ size = get_le32(&s->pb) - 4;
+ frame_size -= size;
+ frame_size -= 4;
+ smk->curstream++;
+ smk->bufs[smk->curstream] = av_realloc(smk->bufs[smk->curstream], size);
+ smk->buf_sizes[smk->curstream] = size;
+ ret = get_buffer(&s->pb, smk->bufs[smk->curstream], size);
+ if(ret != size)
+ return AVERROR_IO;
+ smk->stream_id[smk->curstream] = smk->indexes[i];
+ }
+ flags >>= 1;
+ }
+ if (av_new_packet(pkt, frame_size + 768))
+ return AVERROR_NOMEM;
+ if(smk->frm_size[smk->cur_frame] & 1)
+ palchange |= 2;
+ pkt->data[0] = palchange;
+ memcpy(pkt->data + 1, smk->pal, 768);
+ ret = get_buffer(&s->pb, pkt->data + 769, frame_size);
+ if(ret != frame_size)
+ return AVERROR_IO;
+ pkt->stream_index = smk->videoindex;
+ pkt->size = ret + 769;
+ smk->cur_frame++;
+ smk->nextpos = url_ftell(&s->pb);
+ } else {
+ if (av_new_packet(pkt, smk->buf_sizes[smk->curstream]))
+ return AVERROR_NOMEM;
+ memcpy(pkt->data, smk->bufs[smk->curstream], smk->buf_sizes[smk->curstream]);
+ pkt->size = smk->buf_sizes[smk->curstream];
+ pkt->stream_index = smk->stream_id[smk->curstream];
+ pkt->pts = smk->aud_pts[smk->curstream];
+ smk->aud_pts[smk->curstream] += LE_32(pkt->data);
+ smk->curstream--;
+ }
+
+ return 0;
+}
+
+static int smacker_read_close(AVFormatContext *s)
+{
+ SmackerContext *smk = (SmackerContext *)s->priv_data;
+ int i;
+
+ for(i = 0; i < 7; i++)
+ if(smk->bufs[i])
+ av_free(smk->bufs[i]);
+ if(smk->frm_size)
+ av_free(smk->frm_size);
+ if(smk->frm_flags)
+ av_free(smk->frm_flags);
+
+ return 0;
+}
+
+AVInputFormat smacker_demuxer = {
+ "smk",
+ "Smacker Video",
+ sizeof(SmackerContext),
+ smacker_probe,
+ smacker_read_header,
+ smacker_read_packet,
+ smacker_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/sol.c b/contrib/ffmpeg/libavformat/sol.c
new file mode 100644
index 000000000..20e45f75d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/sol.c
@@ -0,0 +1,160 @@
+/*
+ * Sierra SOL demuxer
+ * Copyright Konstantin Shishkov.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * Based on documents from Game Audio Player and own research
+ */
+
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+#include "bswap.h"
+
+/* if we don't know the size in advance */
+#define AU_UNKOWN_SIZE ((uint32_t)(~0))
+
+static int sol_probe(AVProbeData *p)
+{
+ /* check file header */
+ uint16_t magic;
+ if (p->buf_size <= 14)
+ return 0;
+ magic=le2me_16(*((uint16_t*)p->buf));
+ if ((magic == 0x0B8D || magic == 0x0C0D || magic == 0x0C8D) &&
+ p->buf[2] == 'S' && p->buf[3] == 'O' &&
+ p->buf[4] == 'L' && p->buf[5] == 0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+#define SOL_DPCM 1
+#define SOL_16BIT 4
+#define SOL_STEREO 16
+
+static int sol_codec_id(int magic, int type)
+{
+ if (magic == 0x0B8D)
+ {
+ if (type & SOL_DPCM) return CODEC_ID_SOL_DPCM;
+ else return CODEC_ID_PCM_U8;
+ }
+ if (type & SOL_DPCM)
+ {
+ if (type & SOL_16BIT) return CODEC_ID_SOL_DPCM;
+ else if (magic == 0x0C8D) return CODEC_ID_SOL_DPCM;
+ else return CODEC_ID_SOL_DPCM;
+ }
+ if (type & SOL_16BIT) return CODEC_ID_PCM_S16LE;
+ return CODEC_ID_PCM_U8;
+}
+
+static int sol_codec_type(int magic, int type)
+{
+ if (magic == 0x0B8D) return 1;//SOL_DPCM_OLD;
+ if (type & SOL_DPCM)
+ {
+ if (type & SOL_16BIT) return 3;//SOL_DPCM_NEW16;
+ else if (magic == 0x0C8D) return 1;//SOL_DPCM_OLD;
+ else return 2;//SOL_DPCM_NEW8;
+ }
+ return -1;
+}
+
+static int sol_channels(int magic, int type)
+{
+ if (magic == 0x0B8D || !(type & SOL_STEREO)) return 1;
+ return 2;
+}
+
+static int sol_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int size;
+ unsigned int magic,tag;
+ ByteIOContext *pb = &s->pb;
+ unsigned int id, codec, channels, rate, type;
+ AVStream *st;
+
+ /* check ".snd" header */
+ magic = get_le16(pb);
+ tag = get_le32(pb);
+ if (tag != MKTAG('S', 'O', 'L', 0))
+ return -1;
+ rate = get_le16(pb);
+ type = get_byte(pb);
+ size = get_le32(pb);
+ if (magic != 0x0B8D)
+ get_byte(pb); /* newer SOLs contain padding byte */
+
+ codec = sol_codec_id(magic, type);
+ channels = sol_channels(magic, type);
+
+ if (codec == CODEC_ID_SOL_DPCM)
+ id = sol_codec_type(magic, type);
+ else id = 0;
+
+ /* now we are ready: build format streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_tag = id;
+ st->codec->codec_id = codec;
+ st->codec->channels = channels;
+ st->codec->sample_rate = rate;
+ av_set_pts_info(st, 64, 1, rate);
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int sol_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ int ret;
+
+ if (url_feof(&s->pb))
+ return -EIO;
+ ret= av_get_packet(&s->pb, pkt, MAX_SIZE);
+ pkt->stream_index = 0;
+
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return 0;
+}
+
+static int sol_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVInputFormat sol_demuxer = {
+ "sol",
+ "Sierra SOL Format",
+ 0,
+ sol_probe,
+ sol_read_header,
+ sol_read_packet,
+ sol_read_close,
+ pcm_read_seek,
+};
diff --git a/contrib/ffmpeg/libavformat/swf.c b/contrib/ffmpeg/libavformat/swf.c
new file mode 100644
index 000000000..6029e3678
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/swf.c
@@ -0,0 +1,944 @@
+/*
+ * Flash Compatible Streaming Format
+ * Copyright (c) 2000 Fabrice Bellard.
+ * Copyright (c) 2003 Tinic Uro.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "bitstream.h"
+#include "riff.h" /* for CodecTag */
+
+/* should have a generic way to indicate probable size */
+#define DUMMY_FILE_SIZE (100 * 1024 * 1024)
+#define DUMMY_DURATION 600 /* in seconds */
+
+#define TAG_END 0
+#define TAG_SHOWFRAME 1
+#define TAG_DEFINESHAPE 2
+#define TAG_FREECHARACTER 3
+#define TAG_PLACEOBJECT 4
+#define TAG_REMOVEOBJECT 5
+#define TAG_STREAMHEAD 18
+#define TAG_STREAMBLOCK 19
+#define TAG_JPEG2 21
+#define TAG_PLACEOBJECT2 26
+#define TAG_STREAMHEAD2 45
+#define TAG_VIDEOSTREAM 60
+#define TAG_VIDEOFRAME 61
+
+#define TAG_LONG 0x100
+
+/* flags for shape definition */
+#define FLAG_MOVETO 0x01
+#define FLAG_SETFILL0 0x02
+#define FLAG_SETFILL1 0x04
+
+#define AUDIO_FIFO_SIZE 65536
+
+/* character id used */
+#define BITMAP_ID 0
+#define VIDEO_ID 0
+#define SHAPE_ID 1
+
+#undef NDEBUG
+#include <assert.h>
+
+typedef struct {
+
+ offset_t duration_pos;
+ offset_t tag_pos;
+
+ int samples_per_frame;
+ int sound_samples;
+ int video_samples;
+ int swf_frame_number;
+ int video_frame_number;
+ int ms_per_frame;
+ int ch_id;
+ int tag;
+
+ uint8_t *audio_fifo;
+ int audio_in_pos;
+ int audio_out_pos;
+ int audio_size;
+
+ int video_type;
+ int audio_type;
+} SWFContext;
+
+static const CodecTag swf_codec_tags[] = {
+ {CODEC_ID_FLV1, 0x02},
+ {CODEC_ID_VP6F, 0x04},
+ {0, 0},
+};
+
+static const int sSampleRates[3][4] = {
+ {44100, 48000, 32000, 0},
+ {22050, 24000, 16000, 0},
+ {11025, 12000, 8000, 0},
+};
+
+static const int sBitRates[2][3][15] = {
+ { { 0, 32, 64, 96,128,160,192,224,256,288,320,352,384,416,448},
+ { 0, 32, 48, 56, 64, 80, 96,112,128,160,192,224,256,320,384},
+ { 0, 32, 40, 48, 56, 64, 80, 96,112,128,160,192,224,256,320}
+ },
+ { { 0, 32, 48, 56, 64, 80, 96,112,128,144,160,176,192,224,256},
+ { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160},
+ { 0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96,112,128,144,160}
+ },
+};
+
+static const int sSamplesPerFrame[3][3] =
+{
+ { 384, 1152, 1152 },
+ { 384, 1152, 576 },
+ { 384, 1152, 576 }
+};
+
+static const int sBitsPerSlot[3] = {
+ 32,
+ 8,
+ 8
+};
+
+static int swf_mp3_info(void *data, int *byteSize, int *samplesPerFrame, int *sampleRate, int *isMono )
+{
+ uint8_t *dataTmp = (uint8_t *)data;
+ uint32_t header = ( (uint32_t)dataTmp[0] << 24 ) | ( (uint32_t)dataTmp[1] << 16 ) | ( (uint32_t)dataTmp[2] << 8 ) | (uint32_t)dataTmp[3];
+ int layerID = 3 - ((header >> 17) & 0x03);
+ int bitRateID = ((header >> 12) & 0x0f);
+ int sampleRateID = ((header >> 10) & 0x03);
+ int bitRate = 0;
+ int bitsPerSlot = sBitsPerSlot[layerID];
+ int isPadded = ((header >> 9) & 0x01);
+
+ if ( (( header >> 21 ) & 0x7ff) != 0x7ff ) {
+ return 0;
+ }
+
+ *isMono = ((header >> 6) & 0x03) == 0x03;
+
+ if ( (header >> 19 ) & 0x01 ) {
+ *sampleRate = sSampleRates[0][sampleRateID];
+ bitRate = sBitRates[0][layerID][bitRateID] * 1000;
+ *samplesPerFrame = sSamplesPerFrame[0][layerID];
+ } else {
+ if ( (header >> 20) & 0x01 ) {
+ *sampleRate = sSampleRates[1][sampleRateID];
+ bitRate = sBitRates[1][layerID][bitRateID] * 1000;
+ *samplesPerFrame = sSamplesPerFrame[1][layerID];
+ } else {
+ *sampleRate = sSampleRates[2][sampleRateID];
+ bitRate = sBitRates[1][layerID][bitRateID] * 1000;
+ *samplesPerFrame = sSamplesPerFrame[2][layerID];
+ }
+ }
+
+ *byteSize = ( ( ( ( *samplesPerFrame * (bitRate / bitsPerSlot) ) / *sampleRate ) + isPadded ) );
+
+ return 1;
+}
+
+#ifdef CONFIG_MUXERS
+static void put_swf_tag(AVFormatContext *s, int tag)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+
+ swf->tag_pos = url_ftell(pb);
+ swf->tag = tag;
+ /* reserve some room for the tag */
+ if (tag & TAG_LONG) {
+ put_le16(pb, 0);
+ put_le32(pb, 0);
+ } else {
+ put_le16(pb, 0);
+ }
+}
+
+static void put_swf_end_tag(AVFormatContext *s)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ offset_t pos;
+ int tag_len, tag;
+
+ pos = url_ftell(pb);
+ tag_len = pos - swf->tag_pos - 2;
+ tag = swf->tag;
+ url_fseek(pb, swf->tag_pos, SEEK_SET);
+ if (tag & TAG_LONG) {
+ tag &= ~TAG_LONG;
+ put_le16(pb, (tag << 6) | 0x3f);
+ put_le32(pb, tag_len - 4);
+ } else {
+ assert(tag_len < 0x3f);
+ put_le16(pb, (tag << 6) | tag_len);
+ }
+ url_fseek(pb, pos, SEEK_SET);
+}
+
+static inline void max_nbits(int *nbits_ptr, int val)
+{
+ int n;
+
+ if (val == 0)
+ return;
+ val = abs(val);
+ n = 1;
+ while (val != 0) {
+ n++;
+ val >>= 1;
+ }
+ if (n > *nbits_ptr)
+ *nbits_ptr = n;
+}
+
+static void put_swf_rect(ByteIOContext *pb,
+ int xmin, int xmax, int ymin, int ymax)
+{
+ PutBitContext p;
+ uint8_t buf[256];
+ int nbits, mask;
+
+ init_put_bits(&p, buf, sizeof(buf));
+
+ nbits = 0;
+ max_nbits(&nbits, xmin);
+ max_nbits(&nbits, xmax);
+ max_nbits(&nbits, ymin);
+ max_nbits(&nbits, ymax);
+ mask = (1 << nbits) - 1;
+
+ /* rectangle info */
+ put_bits(&p, 5, nbits);
+ put_bits(&p, nbits, xmin & mask);
+ put_bits(&p, nbits, xmax & mask);
+ put_bits(&p, nbits, ymin & mask);
+ put_bits(&p, nbits, ymax & mask);
+
+ flush_put_bits(&p);
+ put_buffer(pb, buf, pbBufPtr(&p) - p.buf);
+}
+
+static void put_swf_line_edge(PutBitContext *pb, int dx, int dy)
+{
+ int nbits, mask;
+
+ put_bits(pb, 1, 1); /* edge */
+ put_bits(pb, 1, 1); /* line select */
+ nbits = 2;
+ max_nbits(&nbits, dx);
+ max_nbits(&nbits, dy);
+
+ mask = (1 << nbits) - 1;
+ put_bits(pb, 4, nbits - 2); /* 16 bits precision */
+ if (dx == 0) {
+ put_bits(pb, 1, 0);
+ put_bits(pb, 1, 1);
+ put_bits(pb, nbits, dy & mask);
+ } else if (dy == 0) {
+ put_bits(pb, 1, 0);
+ put_bits(pb, 1, 0);
+ put_bits(pb, nbits, dx & mask);
+ } else {
+ put_bits(pb, 1, 1);
+ put_bits(pb, nbits, dx & mask);
+ put_bits(pb, nbits, dy & mask);
+ }
+}
+
+#define FRAC_BITS 16
+
+/* put matrix */
+static void put_swf_matrix(ByteIOContext *pb,
+ int a, int b, int c, int d, int tx, int ty)
+{
+ PutBitContext p;
+ uint8_t buf[256];
+ int nbits;
+
+ init_put_bits(&p, buf, sizeof(buf));
+
+ put_bits(&p, 1, 1); /* a, d present */
+ nbits = 1;
+ max_nbits(&nbits, a);
+ max_nbits(&nbits, d);
+ put_bits(&p, 5, nbits); /* nb bits */
+ put_bits(&p, nbits, a);
+ put_bits(&p, nbits, d);
+
+ put_bits(&p, 1, 1); /* b, c present */
+ nbits = 1;
+ max_nbits(&nbits, c);
+ max_nbits(&nbits, b);
+ put_bits(&p, 5, nbits); /* nb bits */
+ put_bits(&p, nbits, c);
+ put_bits(&p, nbits, b);
+
+ nbits = 1;
+ max_nbits(&nbits, tx);
+ max_nbits(&nbits, ty);
+ put_bits(&p, 5, nbits); /* nb bits */
+ put_bits(&p, nbits, tx);
+ put_bits(&p, nbits, ty);
+
+ flush_put_bits(&p);
+ put_buffer(pb, buf, pbBufPtr(&p) - p.buf);
+}
+
+/* */
+static int swf_write_header(AVFormatContext *s)
+{
+ SWFContext *swf;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc, *audio_enc, *video_enc;
+ PutBitContext p;
+ uint8_t buf1[256];
+ int i, width, height, rate, rate_base;
+
+ swf = av_malloc(sizeof(SWFContext));
+ if (!swf)
+ return -1;
+ s->priv_data = swf;
+
+ swf->ch_id = -1;
+ swf->audio_in_pos = 0;
+ swf->audio_out_pos = 0;
+ swf->audio_size = 0;
+ swf->audio_fifo = av_malloc(AUDIO_FIFO_SIZE);
+ swf->sound_samples = 0;
+ swf->video_samples = 0;
+ swf->swf_frame_number = 0;
+ swf->video_frame_number = 0;
+
+ video_enc = NULL;
+ audio_enc = NULL;
+ for(i=0;i<s->nb_streams;i++) {
+ enc = s->streams[i]->codec;
+ if (enc->codec_type == CODEC_TYPE_AUDIO)
+ audio_enc = enc;
+ else {
+ if ( enc->codec_id == CODEC_ID_VP6F ||
+ enc->codec_id == CODEC_ID_FLV1 ||
+ enc->codec_id == CODEC_ID_MJPEG ) {
+ video_enc = enc;
+ } else {
+ av_log(enc, AV_LOG_ERROR, "SWF only supports VP6, FLV1 and MJPEG\n");
+ return -1;
+ }
+ }
+ }
+
+ if (!video_enc) {
+ /* currenty, cannot work correctly if audio only */
+ swf->video_type = 0;
+ width = 320;
+ height = 200;
+ rate = 10;
+ rate_base= 1;
+ } else {
+ swf->video_type = video_enc->codec_id;
+ width = video_enc->width;
+ height = video_enc->height;
+ rate = video_enc->time_base.den;
+ rate_base = video_enc->time_base.num;
+ }
+
+ if (!audio_enc ) {
+ swf->audio_type = 0;
+ swf->samples_per_frame = ( 44100. * rate_base ) / rate;
+ } else {
+ swf->audio_type = audio_enc->codec_id;
+ swf->samples_per_frame = ( ( audio_enc->sample_rate ) * rate_base ) / rate;
+ }
+
+ put_tag(pb, "FWS");
+ if ( video_enc && video_enc->codec_id == CODEC_ID_VP6F ) {
+ put_byte(pb, 8); /* version (version 8 and above support VP6 codec) */
+ } else if ( video_enc && video_enc->codec_id == CODEC_ID_FLV1 ) {
+ put_byte(pb, 6); /* version (version 6 and above support FLV1 codec) */
+ } else {
+ put_byte(pb, 4); /* version (should use 4 for mpeg audio support) */
+ }
+ put_le32(pb, DUMMY_FILE_SIZE); /* dummy size
+ (will be patched if not streamed) */
+
+ put_swf_rect(pb, 0, width * 20, 0, height * 20);
+ put_le16(pb, (rate * 256) / rate_base); /* frame rate */
+ swf->duration_pos = url_ftell(pb);
+ put_le16(pb, (uint16_t)(DUMMY_DURATION * (int64_t)rate / rate_base)); /* frame count */
+
+ /* define a shape with the jpeg inside */
+ if ( video_enc && (video_enc->codec_id == CODEC_ID_VP6F ||
+ video_enc->codec_id == CODEC_ID_FLV1 )) {
+ } else if ( video_enc && video_enc->codec_id == CODEC_ID_MJPEG ) {
+ put_swf_tag(s, TAG_DEFINESHAPE);
+
+ put_le16(pb, SHAPE_ID); /* ID of shape */
+ /* bounding rectangle */
+ put_swf_rect(pb, 0, width, 0, height);
+ /* style info */
+ put_byte(pb, 1); /* one fill style */
+ put_byte(pb, 0x41); /* clipped bitmap fill */
+ put_le16(pb, BITMAP_ID); /* bitmap ID */
+ /* position of the bitmap */
+ put_swf_matrix(pb, (int)(1.0 * (1 << FRAC_BITS)), 0,
+ 0, (int)(1.0 * (1 << FRAC_BITS)), 0, 0);
+ put_byte(pb, 0); /* no line style */
+
+ /* shape drawing */
+ init_put_bits(&p, buf1, sizeof(buf1));
+ put_bits(&p, 4, 1); /* one fill bit */
+ put_bits(&p, 4, 0); /* zero line bit */
+
+ put_bits(&p, 1, 0); /* not an edge */
+ put_bits(&p, 5, FLAG_MOVETO | FLAG_SETFILL0);
+ put_bits(&p, 5, 1); /* nbits */
+ put_bits(&p, 1, 0); /* X */
+ put_bits(&p, 1, 0); /* Y */
+ put_bits(&p, 1, 1); /* set fill style 1 */
+
+ /* draw the rectangle ! */
+ put_swf_line_edge(&p, width, 0);
+ put_swf_line_edge(&p, 0, height);
+ put_swf_line_edge(&p, -width, 0);
+ put_swf_line_edge(&p, 0, -height);
+
+ /* end of shape */
+ put_bits(&p, 1, 0); /* not an edge */
+ put_bits(&p, 5, 0);
+
+ flush_put_bits(&p);
+ put_buffer(pb, buf1, pbBufPtr(&p) - p.buf);
+
+ put_swf_end_tag(s);
+ }
+
+ if (audio_enc && audio_enc->codec_id == CODEC_ID_MP3 ) {
+ int v;
+
+ /* start sound */
+ put_swf_tag(s, TAG_STREAMHEAD2);
+
+ v = 0;
+ switch(audio_enc->sample_rate) {
+ case 11025:
+ v |= 1 << 2;
+ break;
+ case 22050:
+ v |= 2 << 2;
+ break;
+ case 44100:
+ v |= 3 << 2;
+ break;
+ default:
+ /* not supported */
+ av_log(s, AV_LOG_ERROR, "swf doesnt support that sample rate, choose from (44100, 22050, 11025)\n");
+ av_free(swf->audio_fifo);
+ av_free(swf);
+ return -1;
+ }
+ v |= 0x02; /* 16 bit playback */
+ if (audio_enc->channels == 2)
+ v |= 0x01; /* stereo playback */
+ put_byte(&s->pb, v);
+ v |= 0x20; /* mp3 compressed */
+ put_byte(&s->pb, v);
+ put_le16(&s->pb, swf->samples_per_frame); /* avg samples per frame */
+ put_le16(&s->pb, 0);
+
+ put_swf_end_tag(s);
+ }
+
+ put_flush_packet(&s->pb);
+ return 0;
+}
+
+static int swf_write_video(AVFormatContext *s,
+ AVCodecContext *enc, const uint8_t *buf, int size)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int c = 0;
+ int outSize = 0;
+ int outSamples = 0;
+
+ /* Flash Player limit */
+ if ( swf->swf_frame_number == 16000 ) {
+ av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
+ }
+
+ if ( swf->audio_type ) {
+ /* Prescan audio data for this swf frame */
+retry_swf_audio_packet:
+ if ( ( swf->audio_size-outSize ) >= 4 ) {
+ int mp3FrameSize = 0;
+ int mp3SampleRate = 0;
+ int mp3IsMono = 0;
+ int mp3SamplesPerFrame = 0;
+
+ /* copy out mp3 header from ring buffer */
+ uint8_t header[4];
+ for (c=0; c<4; c++) {
+ header[c] = swf->audio_fifo[(swf->audio_in_pos+outSize+c) % AUDIO_FIFO_SIZE];
+ }
+
+ if ( swf_mp3_info(header,&mp3FrameSize,&mp3SamplesPerFrame,&mp3SampleRate,&mp3IsMono) ) {
+ if ( ( swf->audio_size-outSize ) >= mp3FrameSize ) {
+ outSize += mp3FrameSize;
+ outSamples += mp3SamplesPerFrame;
+ if ( ( swf->sound_samples + outSamples + swf->samples_per_frame ) < swf->video_samples ) {
+ goto retry_swf_audio_packet;
+ }
+ }
+ } else {
+ /* invalid mp3 data, skip forward
+ we need to do this since the Flash Player
+ does not like custom headers */
+ swf->audio_in_pos ++;
+ swf->audio_size --;
+ swf->audio_in_pos %= AUDIO_FIFO_SIZE;
+ goto retry_swf_audio_packet;
+ }
+ }
+
+ /* audio stream is behind video stream, bail */
+ if ( ( swf->sound_samples + outSamples + swf->samples_per_frame ) < swf->video_samples ) {
+ return 0;
+ }
+ }
+
+ if ( swf->video_type == CODEC_ID_VP6F ||
+ swf->video_type == CODEC_ID_FLV1 ) {
+ if ( swf->video_frame_number == 0 ) {
+ /* create a new video object */
+ put_swf_tag(s, TAG_VIDEOSTREAM);
+ put_le16(pb, VIDEO_ID);
+ put_le16(pb, 15000 ); /* hard flash player limit */
+ put_le16(pb, enc->width);
+ put_le16(pb, enc->height);
+ put_byte(pb, 0);
+ put_byte(pb,codec_get_tag(swf_codec_tags,swf->video_type));
+ put_swf_end_tag(s);
+
+ /* place the video object for the first time */
+ put_swf_tag(s, TAG_PLACEOBJECT2);
+ put_byte(pb, 0x36);
+ put_le16(pb, 1);
+ put_le16(pb, VIDEO_ID);
+ put_swf_matrix(pb, 1 << FRAC_BITS, 0, 0, 1 << FRAC_BITS, 0, 0);
+ put_le16(pb, swf->video_frame_number );
+ put_byte(pb, 'v');
+ put_byte(pb, 'i');
+ put_byte(pb, 'd');
+ put_byte(pb, 'e');
+ put_byte(pb, 'o');
+ put_byte(pb, 0x00);
+ put_swf_end_tag(s);
+ } else {
+ /* mark the character for update */
+ put_swf_tag(s, TAG_PLACEOBJECT2);
+ put_byte(pb, 0x11);
+ put_le16(pb, 1);
+ put_le16(pb, swf->video_frame_number );
+ put_swf_end_tag(s);
+ }
+
+ /* set video frame data */
+ put_swf_tag(s, TAG_VIDEOFRAME | TAG_LONG);
+ put_le16(pb, VIDEO_ID);
+ put_le16(pb, swf->video_frame_number++ );
+ put_buffer(pb, buf, size);
+ put_swf_end_tag(s);
+ } else if ( swf->video_type == CODEC_ID_MJPEG ) {
+ if (swf->swf_frame_number > 0) {
+ /* remove the shape */
+ put_swf_tag(s, TAG_REMOVEOBJECT);
+ put_le16(pb, SHAPE_ID); /* shape ID */
+ put_le16(pb, 1); /* depth */
+ put_swf_end_tag(s);
+
+ /* free the bitmap */
+ put_swf_tag(s, TAG_FREECHARACTER);
+ put_le16(pb, BITMAP_ID);
+ put_swf_end_tag(s);
+ }
+
+ put_swf_tag(s, TAG_JPEG2 | TAG_LONG);
+
+ put_le16(pb, BITMAP_ID); /* ID of the image */
+
+ /* a dummy jpeg header seems to be required */
+ put_byte(pb, 0xff);
+ put_byte(pb, 0xd8);
+ put_byte(pb, 0xff);
+ put_byte(pb, 0xd9);
+ /* write the jpeg image */
+ put_buffer(pb, buf, size);
+
+ put_swf_end_tag(s);
+
+ /* draw the shape */
+
+ put_swf_tag(s, TAG_PLACEOBJECT);
+ put_le16(pb, SHAPE_ID); /* shape ID */
+ put_le16(pb, 1); /* depth */
+ put_swf_matrix(pb, 20 << FRAC_BITS, 0, 0, 20 << FRAC_BITS, 0, 0);
+ put_swf_end_tag(s);
+ } else {
+ /* invalid codec */
+ }
+
+ swf->swf_frame_number ++;
+
+ swf->video_samples += swf->samples_per_frame;
+
+ /* streaming sound always should be placed just before showframe tags */
+ if ( outSize > 0 ) {
+ put_swf_tag(s, TAG_STREAMBLOCK | TAG_LONG);
+ put_le16(pb, outSamples);
+ put_le16(pb, 0);
+ for (c=0; c<outSize; c++) {
+ put_byte(pb,swf->audio_fifo[(swf->audio_in_pos+c) % AUDIO_FIFO_SIZE]);
+ }
+ put_swf_end_tag(s);
+
+ /* update FIFO */
+ swf->sound_samples += outSamples;
+ swf->audio_in_pos += outSize;
+ swf->audio_size -= outSize;
+ swf->audio_in_pos %= AUDIO_FIFO_SIZE;
+ }
+
+ /* output the frame */
+ put_swf_tag(s, TAG_SHOWFRAME);
+ put_swf_end_tag(s);
+
+ put_flush_packet(&s->pb);
+
+ return 0;
+}
+
+static int swf_write_audio(AVFormatContext *s,
+ AVCodecContext *enc, const uint8_t *buf, int size)
+{
+ SWFContext *swf = s->priv_data;
+ int c = 0;
+
+ /* Flash Player limit */
+ if ( swf->swf_frame_number == 16000 ) {
+ av_log(enc, AV_LOG_INFO, "warning: Flash Player limit of 16000 frames reached\n");
+ }
+
+ if (enc->codec_id == CODEC_ID_MP3 ) {
+ for (c=0; c<size; c++) {
+ swf->audio_fifo[(swf->audio_out_pos+c)%AUDIO_FIFO_SIZE] = buf[c];
+ }
+ swf->audio_size += size;
+ swf->audio_out_pos += size;
+ swf->audio_out_pos %= AUDIO_FIFO_SIZE;
+ }
+
+ /* if audio only stream make sure we add swf frames */
+ if ( swf->video_type == 0 ) {
+ swf_write_video(s, enc, 0, 0);
+ }
+
+ return 0;
+}
+
+static int swf_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVCodecContext *codec = s->streams[pkt->stream_index]->codec;
+ if (codec->codec_type == CODEC_TYPE_AUDIO)
+ return swf_write_audio(s, codec, pkt->data, pkt->size);
+ else
+ return swf_write_video(s, codec, pkt->data, pkt->size);
+}
+
+static int swf_write_trailer(AVFormatContext *s)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVCodecContext *enc, *video_enc;
+ int file_size, i;
+
+ video_enc = NULL;
+ for(i=0;i<s->nb_streams;i++) {
+ enc = s->streams[i]->codec;
+ if (enc->codec_type == CODEC_TYPE_VIDEO)
+ video_enc = enc;
+ }
+
+ put_swf_tag(s, TAG_END);
+ put_swf_end_tag(s);
+
+ put_flush_packet(&s->pb);
+
+ /* patch file size and number of frames if not streamed */
+ if (!url_is_streamed(&s->pb) && video_enc) {
+ file_size = url_ftell(pb);
+ url_fseek(pb, 4, SEEK_SET);
+ put_le32(pb, file_size);
+ url_fseek(pb, swf->duration_pos, SEEK_SET);
+ put_le16(pb, video_enc->frame_number);
+ }
+
+ av_free(swf->audio_fifo);
+
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/*********************************************/
+/* Extract FLV encoded frame and MP3 from swf
+ Note that the detection of the real frame
+ is inaccurate at this point as it can be
+ quite tricky to determine, you almost certainly
+ will get a bad audio/video sync */
+
+static int get_swf_tag(ByteIOContext *pb, int *len_ptr)
+{
+ int tag, len;
+
+ if (url_feof(pb))
+ return -1;
+
+ tag = get_le16(pb);
+ len = tag & 0x3f;
+ tag = tag >> 6;
+ if (len == 0x3f) {
+ len = get_le32(pb);
+ }
+// av_log(NULL, AV_LOG_DEBUG, "Tag: %d - Len: %d\n", tag, len);
+ *len_ptr = len;
+ return tag;
+}
+
+
+static int swf_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 16)
+ return 0;
+ if ((p->buf[0] == 'F' || p->buf[0] == 'C') && p->buf[1] == 'W' &&
+ p->buf[2] == 'S')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int swf_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ SWFContext *swf = 0;
+ ByteIOContext *pb = &s->pb;
+ int nbits, len, frame_rate, tag, v;
+ offset_t firstTagOff;
+ AVStream *ast = 0;
+ AVStream *vst = 0;
+
+ swf = av_malloc(sizeof(SWFContext));
+ if (!swf)
+ return -1;
+ s->priv_data = swf;
+
+ tag = get_be32(pb) & 0xffffff00;
+
+ if (tag == MKBETAG('C', 'W', 'S', 0))
+ {
+ av_log(s, AV_LOG_ERROR, "Compressed SWF format not supported\n");
+ return AVERROR_IO;
+ }
+ if (tag != MKBETAG('F', 'W', 'S', 0))
+ return AVERROR_IO;
+ get_le32(pb);
+ /* skip rectangle size */
+ nbits = get_byte(pb) >> 3;
+ len = (4 * nbits - 3 + 7) / 8;
+ url_fskip(pb, len);
+ frame_rate = get_le16(pb);
+ get_le16(pb); /* frame count */
+
+ /* The Flash Player converts 8.8 frame rates
+ to milliseconds internally. Do the same to get
+ a correct framerate */
+ swf->ms_per_frame = ( 1000 * 256 ) / frame_rate;
+ swf->samples_per_frame = 0;
+ swf->ch_id = -1;
+
+ firstTagOff = url_ftell(pb);
+ for(;;) {
+ tag = get_swf_tag(pb, &len);
+ if (tag < 0) {
+ if ( ast || vst ) {
+ if ( vst && ast ) {
+ vst->codec->time_base.den = ast->codec->sample_rate / swf->samples_per_frame;
+ vst->codec->time_base.num = 1;
+ }
+ break;
+ }
+ av_log(s, AV_LOG_ERROR, "No media found in SWF\n");
+ return AVERROR_IO;
+ }
+ if ( tag == TAG_VIDEOSTREAM && !vst) {
+ int codec_id;
+ swf->ch_id = get_le16(pb);
+ get_le16(pb);
+ get_le16(pb);
+ get_le16(pb);
+ get_byte(pb);
+ /* Check for FLV1 */
+ codec_id = codec_get_id(swf_codec_tags, get_byte(pb));
+ if ( codec_id ) {
+ vst = av_new_stream(s, 0);
+ av_set_pts_info(vst, 24, 1, 1000); /* 24 bit pts in ms */
+
+ vst->codec->codec_type = CODEC_TYPE_VIDEO;
+ vst->codec->codec_id = codec_id;
+ if ( swf->samples_per_frame ) {
+ vst->codec->time_base.den = 1000. / swf->ms_per_frame;
+ vst->codec->time_base.num = 1;
+ }
+ }
+ } else if ( ( tag == TAG_STREAMHEAD || tag == TAG_STREAMHEAD2 ) && !ast) {
+ /* streaming found */
+ get_byte(pb);
+ v = get_byte(pb);
+ swf->samples_per_frame = get_le16(pb);
+ if (len!=4)
+ url_fskip(pb,len-4);
+ /* if mp3 streaming found, OK */
+ if ((v & 0x20) != 0) {
+ if ( tag == TAG_STREAMHEAD2 ) {
+ get_le16(pb);
+ }
+ ast = av_new_stream(s, 1);
+ if (!ast)
+ return -ENOMEM;
+ av_set_pts_info(ast, 24, 1, 1000); /* 24 bit pts in ms */
+
+ if (v & 0x01)
+ ast->codec->channels = 2;
+ else
+ ast->codec->channels = 1;
+
+ switch((v>> 2) & 0x03) {
+ case 1:
+ ast->codec->sample_rate = 11025;
+ break;
+ case 2:
+ ast->codec->sample_rate = 22050;
+ break;
+ case 3:
+ ast->codec->sample_rate = 44100;
+ break;
+ default:
+ av_free(ast);
+ return AVERROR_IO;
+ }
+ ast->codec->codec_type = CODEC_TYPE_AUDIO;
+ ast->codec->codec_id = CODEC_ID_MP3;
+ ast->need_parsing = 1;
+ }
+ } else {
+ url_fskip(pb, len);
+ }
+ }
+ url_fseek(pb, firstTagOff, SEEK_SET);
+
+ return 0;
+}
+
+static int swf_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ SWFContext *swf = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st = 0;
+ int tag, len, i, frame;
+
+ for(;;) {
+ tag = get_swf_tag(pb, &len);
+ if (tag < 0)
+ return AVERROR_IO;
+ if (tag == TAG_VIDEOFRAME) {
+ for( i=0; i<s->nb_streams; i++ ) {
+ st = s->streams[i];
+ if (st->id == 0) {
+ if ( get_le16(pb) == swf->ch_id ) {
+ frame = get_le16(pb);
+ av_get_packet(pb, pkt, len-4);
+ pkt->pts = frame * swf->ms_per_frame;
+ pkt->stream_index = st->index;
+ return pkt->size;
+ } else {
+ url_fskip(pb, len-2);
+ continue;
+ }
+ }
+ }
+ url_fskip(pb, len);
+ } else if (tag == TAG_STREAMBLOCK) {
+ for( i=0; i<s->nb_streams; i++ ) {
+ st = s->streams[i];
+ if (st->id == 1) {
+ url_fskip(pb, 4);
+ av_get_packet(pb, pkt, len-4);
+ pkt->stream_index = st->index;
+ return pkt->size;
+ }
+ }
+ url_fskip(pb, len);
+ } else {
+ url_fskip(pb, len);
+ }
+ }
+ return 0;
+}
+
+static int swf_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+#ifdef CONFIG_SWF_DEMUXER
+AVInputFormat swf_demuxer = {
+ "swf",
+ "Flash format",
+ sizeof(SWFContext),
+ swf_probe,
+ swf_read_header,
+ swf_read_packet,
+ swf_read_close,
+};
+#endif
+#ifdef CONFIG_SWF_MUXER
+AVOutputFormat swf_muxer = {
+ "swf",
+ "Flash format",
+ "application/x-shockwave-flash",
+ "swf",
+ sizeof(SWFContext),
+ CODEC_ID_MP3,
+ CODEC_ID_FLV1,
+ swf_write_header,
+ swf_write_packet,
+ swf_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/tcp.c b/contrib/ffmpeg/libavformat/tcp.c
new file mode 100644
index 000000000..93755c497
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/tcp.c
@@ -0,0 +1,232 @@
+/*
+ * TCP protocol
+ * Copyright (c) 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#if defined(__BEOS__) || defined(__INNOTEK_LIBC__)
+typedef int socklen_t;
+#endif
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+#include <sys/time.h>
+#include <fcntl.h>
+
+typedef struct TCPContext {
+ int fd;
+} TCPContext;
+
+/* resolve host with also IP address parsing */
+int resolve_host(struct in_addr *sin_addr, const char *hostname)
+{
+ struct hostent *hp;
+
+ if ((inet_aton(hostname, sin_addr)) == 0) {
+ hp = gethostbyname(hostname);
+ if (!hp)
+ return -1;
+ memcpy (sin_addr, hp->h_addr, sizeof(struct in_addr));
+ }
+ return 0;
+}
+
+/* return non zero if error */
+static int tcp_open(URLContext *h, const char *uri, int flags)
+{
+ struct sockaddr_in dest_addr;
+ char hostname[1024], *q;
+ int port, fd = -1;
+ TCPContext *s = NULL;
+ fd_set wfds;
+ int fd_max, ret;
+ struct timeval tv;
+ socklen_t optlen;
+ char proto[1024],path[1024],tmp[1024]; // PETR: protocol and path strings
+
+ url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname),
+ &port, path, sizeof(path), uri); // PETR: use url_split
+ if (strcmp(proto,"tcp")) goto fail; // PETR: check protocol
+ if ((q = strchr(hostname,'@'))) { strcpy(tmp,q+1); strcpy(hostname,tmp); } // PETR: take only the part after '@' for tcp protocol
+
+ s = av_malloc(sizeof(TCPContext));
+ if (!s)
+ return -ENOMEM;
+ h->priv_data = s;
+
+ if (port <= 0 || port >= 65536)
+ goto fail;
+
+ dest_addr.sin_family = AF_INET;
+ dest_addr.sin_port = htons(port);
+ if (resolve_host(&dest_addr.sin_addr, hostname) < 0)
+ goto fail;
+
+ fd = socket(PF_INET, SOCK_STREAM, 0);
+ if (fd < 0)
+ goto fail;
+ fcntl(fd, F_SETFL, O_NONBLOCK);
+
+ redo:
+ ret = connect(fd, (struct sockaddr *)&dest_addr,
+ sizeof(dest_addr));
+ if (ret < 0) {
+ if (errno == EINTR)
+ goto redo;
+ if (errno != EINPROGRESS)
+ goto fail;
+
+ /* wait until we are connected or until abort */
+ for(;;) {
+ if (url_interrupt_cb()) {
+ ret = -EINTR;
+ goto fail1;
+ }
+ fd_max = fd;
+ FD_ZERO(&wfds);
+ FD_SET(fd, &wfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 100 * 1000;
+ ret = select(fd_max + 1, NULL, &wfds, NULL, &tv);
+ if (ret > 0 && FD_ISSET(fd, &wfds))
+ break;
+ }
+
+ /* test error */
+ optlen = sizeof(ret);
+ getsockopt (fd, SOL_SOCKET, SO_ERROR, &ret, &optlen);
+ if (ret != 0)
+ goto fail;
+ }
+ s->fd = fd;
+ return 0;
+
+ fail:
+ ret = AVERROR_IO;
+ fail1:
+ if (fd >= 0)
+ close(fd);
+ av_free(s);
+ return ret;
+}
+
+static int tcp_read(URLContext *h, uint8_t *buf, int size)
+{
+ TCPContext *s = h->priv_data;
+ int len, fd_max, ret;
+ fd_set rfds;
+ struct timeval tv;
+
+ for (;;) {
+ if (url_interrupt_cb())
+ return -EINTR;
+ fd_max = s->fd;
+ FD_ZERO(&rfds);
+ FD_SET(s->fd, &rfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 100 * 1000;
+ ret = select(fd_max + 1, &rfds, NULL, NULL, &tv);
+ if (ret > 0 && FD_ISSET(s->fd, &rfds)) {
+#ifdef __BEOS__
+ len = recv(s->fd, buf, size, 0);
+#else
+ len = read(s->fd, buf, size);
+#endif
+ if (len < 0) {
+ if (errno != EINTR && errno != EAGAIN)
+#ifdef __BEOS__
+ return errno;
+#else
+ return -errno;
+#endif
+ } else return len;
+ } else if (ret < 0) {
+ return -1;
+ }
+ }
+}
+
+static int tcp_write(URLContext *h, uint8_t *buf, int size)
+{
+ TCPContext *s = h->priv_data;
+ int ret, size1, fd_max, len;
+ fd_set wfds;
+ struct timeval tv;
+
+ size1 = size;
+ while (size > 0) {
+ if (url_interrupt_cb())
+ return -EINTR;
+ fd_max = s->fd;
+ FD_ZERO(&wfds);
+ FD_SET(s->fd, &wfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 100 * 1000;
+ ret = select(fd_max + 1, NULL, &wfds, NULL, &tv);
+ if (ret > 0 && FD_ISSET(s->fd, &wfds)) {
+#ifdef __BEOS__
+ len = send(s->fd, buf, size, 0);
+#else
+ len = write(s->fd, buf, size);
+#endif
+ if (len < 0) {
+ if (errno != EINTR && errno != EAGAIN) {
+#ifdef __BEOS__
+ return errno;
+#else
+ return -errno;
+#endif
+ }
+ continue;
+ }
+ size -= len;
+ buf += len;
+ } else if (ret < 0) {
+ return -1;
+ }
+ }
+ return size1 - size;
+}
+
+static int tcp_close(URLContext *h)
+{
+ TCPContext *s = h->priv_data;
+#ifdef CONFIG_BEOS_NETSERVER
+ closesocket(s->fd);
+#else
+ close(s->fd);
+#endif
+ av_free(s);
+ return 0;
+}
+
+URLProtocol tcp_protocol = {
+ "tcp",
+ tcp_open,
+ tcp_read,
+ tcp_write,
+ NULL, /* seek */
+ tcp_close,
+};
diff --git a/contrib/ffmpeg/libavformat/tiertexseq.c b/contrib/ffmpeg/libavformat/tiertexseq.c
new file mode 100644
index 000000000..b1a39bf76
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/tiertexseq.c
@@ -0,0 +1,310 @@
+/*
+ * Tiertex Limited SEQ File Demuxer
+ * Copyright (c) 2006 Gregory Montoir (cyx@users.sourceforge.net)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file tiertexseq.c
+ * Tiertex Limited SEQ file demuxer
+ */
+
+#include "avformat.h"
+
+#define SEQ_FRAME_SIZE 6144
+#define SEQ_FRAME_W 256
+#define SEQ_FRAME_H 128
+#define SEQ_NUM_FRAME_BUFFERS 30
+#define SEQ_AUDIO_BUFFER_SIZE 882
+#define SEQ_SAMPLE_RATE 22050
+#define SEQ_FRAME_RATE 25
+
+
+typedef struct TiertexSeqFrameBuffer {
+ int fill_size;
+ int data_size;
+ unsigned char *data;
+} TiertexSeqFrameBuffer;
+
+typedef struct SeqDemuxContext {
+ int audio_stream_index;
+ int video_stream_index;
+ int current_frame_pts;
+ int current_frame_offs;
+ TiertexSeqFrameBuffer frame_buffers[SEQ_NUM_FRAME_BUFFERS];
+ int frame_buffers_count;
+ unsigned int current_audio_data_size;
+ unsigned int current_audio_data_offs;
+ unsigned int current_pal_data_size;
+ unsigned int current_pal_data_offs;
+ unsigned int current_video_data_size;
+ unsigned char *current_video_data_ptr;
+ int audio_buffer_full;
+} SeqDemuxContext;
+
+
+static int seq_probe(AVProbeData *p)
+{
+ int i;
+
+ if (p->buf_size < 256)
+ return 0;
+
+ /* there's no real header in a .seq file, the only thing they have in common */
+ /* is the first 256 bytes of the file which are always filled with 0 */
+ for (i = 0; i < 256; i++)
+ if (p->buf[i] != 0)
+ return 0;
+
+ /* only one fourth of the score since the previous check is too naive */
+ return AVPROBE_SCORE_MAX / 4;
+}
+
+static int seq_init_frame_buffers(SeqDemuxContext *seq, ByteIOContext *pb)
+{
+ int i, sz;
+ TiertexSeqFrameBuffer *seq_buffer;
+
+ url_fseek(pb, 256, SEEK_SET);
+
+ for (i = 0; i < SEQ_NUM_FRAME_BUFFERS; i++) {
+ sz = get_le16(pb);
+ if (sz == 0)
+ break;
+ else {
+ seq_buffer = &seq->frame_buffers[i];
+ seq_buffer->fill_size = 0;
+ seq_buffer->data_size = sz;
+ seq_buffer->data = av_malloc(sz);
+ if (!seq_buffer->data)
+ return AVERROR_NOMEM;
+ }
+ }
+ seq->frame_buffers_count = i;
+ return 0;
+}
+
+static int seq_fill_buffer(SeqDemuxContext *seq, ByteIOContext *pb, int buffer_num, unsigned int data_offs, int data_size)
+{
+ TiertexSeqFrameBuffer *seq_buffer;
+
+ if (buffer_num >= SEQ_NUM_FRAME_BUFFERS)
+ return AVERROR_INVALIDDATA;
+
+ seq_buffer = &seq->frame_buffers[buffer_num];
+ if (seq_buffer->fill_size + data_size > seq_buffer->data_size)
+ return AVERROR_INVALIDDATA;
+
+ url_fseek(pb, seq->current_frame_offs + data_offs, SEEK_SET);
+ if (get_buffer(pb, seq_buffer->data + seq_buffer->fill_size, data_size) != data_size)
+ return AVERROR_IO;
+
+ seq_buffer->fill_size += data_size;
+ return 0;
+}
+
+static int seq_parse_frame_data(SeqDemuxContext *seq, ByteIOContext *pb)
+{
+ unsigned int offset_table[4], buffer_num[4];
+ TiertexSeqFrameBuffer *seq_buffer;
+ int i, e, err;
+
+ seq->current_frame_offs += SEQ_FRAME_SIZE;
+ url_fseek(pb, seq->current_frame_offs, SEEK_SET);
+
+ /* sound data */
+ seq->current_audio_data_offs = get_le16(pb);
+ if (seq->current_audio_data_offs != 0) {
+ seq->current_audio_data_size = SEQ_AUDIO_BUFFER_SIZE * 2;
+ } else {
+ seq->current_audio_data_size = 0;
+ }
+
+ /* palette data */
+ seq->current_pal_data_offs = get_le16(pb);
+ if (seq->current_pal_data_offs != 0) {
+ seq->current_pal_data_size = 768;
+ } else {
+ seq->current_pal_data_size = 0;
+ }
+
+ /* video data */
+ for (i = 0; i < 4; i++)
+ buffer_num[i] = get_byte(pb);
+
+ for (i = 0; i < 4; i++)
+ offset_table[i] = get_le16(pb);
+
+ for (i = 0; i < 3; i++) {
+ if (offset_table[i] != 0) {
+ for (e = i + 1; e < 4 && offset_table[e] == 0; e++);
+ err = seq_fill_buffer(seq, pb, buffer_num[1 + i],
+ offset_table[i],
+ offset_table[e] - offset_table[i]);
+ if (err != 0)
+ return err;
+ }
+ }
+
+ if (buffer_num[0] != 255) {
+ if (buffer_num[0] >= SEQ_NUM_FRAME_BUFFERS)
+ return AVERROR_INVALIDDATA;
+
+ seq_buffer = &seq->frame_buffers[buffer_num[0]];
+ seq->current_video_data_size = seq_buffer->fill_size;
+ seq->current_video_data_ptr = seq_buffer->data;
+ seq_buffer->fill_size = 0;
+ } else {
+ seq->current_video_data_size = 0;
+ seq->current_video_data_ptr = 0;
+ }
+
+ return 0;
+}
+
+static int seq_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ int i, rc;
+ SeqDemuxContext *seq = (SeqDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+
+ /* init internal buffers */
+ rc = seq_init_frame_buffers(seq, pb);
+ if (rc)
+ return rc;
+
+ seq->current_frame_offs = 0;
+
+ /* preload (no audio data, just buffer operations related data) */
+ for (i = 1; i <= 100; i++) {
+ rc = seq_parse_frame_data(seq, pb);
+ if (rc)
+ return rc;
+ }
+
+ seq->current_frame_pts = 0;
+
+ seq->audio_buffer_full = 0;
+
+ /* initialize the video decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 32, 1, SEQ_FRAME_RATE);
+ seq->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_TIERTEXSEQVIDEO;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = SEQ_FRAME_W;
+ st->codec->height = SEQ_FRAME_H;
+
+ /* initialize the audio decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ av_set_pts_info(st, 32, 1, SEQ_SAMPLE_RATE);
+ seq->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S16BE;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = 1;
+ st->codec->sample_rate = SEQ_SAMPLE_RATE;
+ st->codec->bits_per_sample = 16;
+ st->codec->bit_rate = st->codec->sample_rate * st->codec->bits_per_sample * st->codec->channels;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ return 0;
+}
+
+static int seq_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int rc;
+ SeqDemuxContext *seq = (SeqDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+
+ if (!seq->audio_buffer_full) {
+ rc = seq_parse_frame_data(seq, pb);
+ if (rc)
+ return rc;
+
+ /* video packet */
+ if (seq->current_pal_data_size + seq->current_video_data_size != 0) {
+ if (av_new_packet(pkt, 1 + seq->current_pal_data_size + seq->current_video_data_size))
+ return AVERROR_NOMEM;
+
+ pkt->data[0] = 0;
+ if (seq->current_pal_data_size != 0) {
+ pkt->data[0] |= 1;
+ url_fseek(pb, seq->current_frame_offs + seq->current_pal_data_offs, SEEK_SET);
+ if (get_buffer(pb, &pkt->data[1], seq->current_pal_data_size) != seq->current_pal_data_size)
+ return AVERROR_IO;
+ }
+ if (seq->current_video_data_size != 0) {
+ pkt->data[0] |= 2;
+ memcpy(&pkt->data[1 + seq->current_pal_data_size],
+ seq->current_video_data_ptr,
+ seq->current_video_data_size);
+ }
+ pkt->stream_index = seq->video_stream_index;
+ pkt->pts = seq->current_frame_pts;
+
+ /* sound buffer will be processed on next read_packet() call */
+ seq->audio_buffer_full = 1;
+ return 0;
+ }
+ }
+
+ /* audio packet */
+ if (seq->current_audio_data_offs == 0) /* end of data reached */
+ return AVERROR_IO;
+
+ url_fseek(pb, seq->current_frame_offs + seq->current_audio_data_offs, SEEK_SET);
+ rc = av_get_packet(pb, pkt, seq->current_audio_data_size);
+ if (rc < 0)
+ return rc;
+
+ pkt->stream_index = seq->audio_stream_index;
+ pkt->pts = seq->current_frame_pts++;
+
+ seq->audio_buffer_full = 0;
+ return 0;
+}
+
+static int seq_read_close(AVFormatContext *s)
+{
+ int i;
+ SeqDemuxContext *seq = (SeqDemuxContext *)s->priv_data;
+
+ for (i = 0; i < SEQ_NUM_FRAME_BUFFERS; i++)
+ av_free(seq->frame_buffers[i].data);
+
+ return 0;
+}
+
+AVInputFormat tiertexseq_demuxer = {
+ "tiertexseq",
+ "Tiertex Limited SEQ format",
+ sizeof(SeqDemuxContext),
+ seq_probe,
+ seq_read_header,
+ seq_read_packet,
+ seq_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/tta.c b/contrib/ffmpeg/libavformat/tta.c
new file mode 100644
index 000000000..a513d9d38
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/tta.c
@@ -0,0 +1,152 @@
+/*
+ * TTA demuxer
+ * Copyright (c) 2006 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "bitstream.h"
+
+typedef struct {
+ int totalframes, currentframe;
+ uint32_t *seektable;
+} TTAContext;
+
+static int tta_probe(AVProbeData *p)
+{
+ const uint8_t *d = p->buf;
+ if (p->buf_size < 4)
+ return 0;
+ if (d[0] == 'T' && d[1] == 'T' && d[2] == 'A' && d[3] == '1')
+ return 80;
+ return 0;
+}
+
+static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ TTAContext *c = s->priv_data;
+ AVStream *st;
+ int i, channels, bps, samplerate, datalen, framelen, start;
+
+ start = url_ftell(&s->pb);
+
+ if (get_le32(&s->pb) != ff_get_fourcc("TTA1"))
+ return -1; // not tta file
+
+ url_fskip(&s->pb, 2); // FIXME: flags
+ channels = get_le16(&s->pb);
+ bps = get_le16(&s->pb);
+ samplerate = get_le32(&s->pb);
+ if(samplerate <= 0 || samplerate > 1000000){
+ av_log(s, AV_LOG_ERROR, "nonsense samplerate\n");
+ return -1;
+ }
+
+ datalen = get_le32(&s->pb);
+ if(datalen < 0){
+ av_log(s, AV_LOG_ERROR, "nonsense datalen\n");
+ return -1;
+ }
+
+ url_fskip(&s->pb, 4); // header crc
+
+ framelen = 1.04489795918367346939 * samplerate;
+ c->totalframes = datalen / framelen + ((datalen % framelen) ? 1 : 0);
+ c->currentframe = 0;
+
+ if(c->totalframes >= UINT_MAX/sizeof(uint32_t)){
+ av_log(s, AV_LOG_ERROR, "totalframes too large\n");
+ return -1;
+ }
+ c->seektable = av_mallocz(sizeof(uint32_t)*c->totalframes);
+ if (!c->seektable)
+ return AVERROR_NOMEM;
+
+ for (i = 0; i < c->totalframes; i++)
+ c->seektable[i] = get_le32(&s->pb);
+ url_fskip(&s->pb, 4); // seektable crc
+
+ st = av_new_stream(s, 0);
+// av_set_pts_info(st, 32, 1, 1000);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_TTA;
+ st->codec->channels = channels;
+ st->codec->sample_rate = samplerate;
+ st->codec->bits_per_sample = bps;
+
+ st->codec->extradata_size = url_ftell(&s->pb) - start;
+ if(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
+ //this check is redundant as get_buffer should fail
+ av_log(s, AV_LOG_ERROR, "extradata_size too large\n");
+ return -1;
+ }
+ st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE);
+ url_fseek(&s->pb, start, SEEK_SET); // or SEEK_CUR and -size ? :)
+ get_buffer(&s->pb, st->codec->extradata, st->codec->extradata_size);
+
+ return 0;
+}
+
+static int tta_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ TTAContext *c = s->priv_data;
+ int ret, size;
+
+ // FIXME!
+ if (c->currentframe > c->totalframes)
+ size = 0;
+ else
+ size = c->seektable[c->currentframe];
+
+ c->currentframe++;
+
+ if (av_new_packet(pkt, size) < 0)
+ return AVERROR_IO;
+
+ pkt->pos = url_ftell(&s->pb);
+ pkt->stream_index = 0;
+ ret = get_buffer(&s->pb, pkt->data, size);
+ if (ret <= 0) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ pkt->size = ret;
+// av_log(s, AV_LOG_INFO, "TTA packet #%d desired size: %d read size: %d at pos %d\n",
+// c->currentframe, size, ret, pkt->pos);
+ return 0; //ret;
+}
+
+static int tta_read_close(AVFormatContext *s)
+{
+ TTAContext *c = s->priv_data;
+ if (c->seektable)
+ av_free(c->seektable);
+ return 0;
+}
+
+AVInputFormat tta_demuxer = {
+ "tta",
+ "true-audio",
+ sizeof(TTAContext),
+ tta_probe,
+ tta_read_header,
+ tta_read_packet,
+ tta_read_close,
+ .extensions = "tta",
+};
diff --git a/contrib/ffmpeg/libavformat/udp.c b/contrib/ffmpeg/libavformat/udp.c
new file mode 100644
index 000000000..96fa4e152
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/udp.c
@@ -0,0 +1,512 @@
+/*
+ * UDP prototype streaming system
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#ifndef __BEOS__
+# include <arpa/inet.h>
+#else
+# include "barpainet.h"
+#endif
+#include <netdb.h>
+
+#ifndef IPV6_ADD_MEMBERSHIP
+#define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
+#define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
+#endif
+
+typedef struct {
+ int udp_fd;
+ int ttl;
+ int is_multicast;
+ int local_port;
+ int reuse_socket;
+#ifndef CONFIG_IPV6
+ struct ip_mreq mreq;
+ struct sockaddr_in dest_addr;
+#else
+ struct sockaddr_storage dest_addr;
+ size_t dest_addr_len;
+#endif
+} UDPContext;
+
+#define UDP_TX_BUF_SIZE 32768
+
+#ifdef CONFIG_IPV6
+
+static int udp_ipv6_is_multicast_address(const struct sockaddr *addr) {
+ if (addr->sa_family == AF_INET)
+ return IN_MULTICAST(ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
+ if (addr->sa_family == AF_INET6)
+ return IN6_IS_ADDR_MULTICAST(&((struct sockaddr_in6 *)addr)->sin6_addr);
+ return -1;
+}
+
+static int udp_ipv6_set_multicast_ttl(int sockfd, int mcastTTL, struct sockaddr *addr) {
+ if (addr->sa_family == AF_INET) {
+ if (setsockopt(sockfd, IPPROTO_IP, IP_MULTICAST_TTL, &mcastTTL, sizeof(mcastTTL)) < 0) {
+ perror("setsockopt(IP_MULTICAST_TTL)");
+ return -1;
+ }
+ }
+ if (addr->sa_family == AF_INET6) {
+ if (setsockopt(sockfd, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, &mcastTTL, sizeof(mcastTTL)) < 0) {
+ perror("setsockopt(IPV6_MULTICAST_HOPS)");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int udp_ipv6_join_multicast_group(int sockfd, struct sockaddr *addr) {
+ struct ip_mreq mreq;
+ struct ipv6_mreq mreq6;
+ if (addr->sa_family == AF_INET) {
+ mreq.imr_multiaddr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
+ mreq.imr_interface.s_addr= INADDR_ANY;
+ if (setsockopt(sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, (const void *)&mreq, sizeof(mreq)) < 0) {
+ perror("setsockopt(IP_ADD_MEMBERSHIP)");
+ return -1;
+ }
+ }
+ if (addr->sa_family == AF_INET6) {
+ memcpy(&mreq6.ipv6mr_multiaddr, &(((struct sockaddr_in6 *)addr)->sin6_addr), sizeof(struct in6_addr));
+ mreq6.ipv6mr_interface= 0;
+ if (setsockopt(sockfd, IPPROTO_IPV6, IPV6_ADD_MEMBERSHIP, &mreq6, sizeof(mreq6)) < 0) {
+ perror("setsockopt(IPV6_ADD_MEMBERSHIP)");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int udp_ipv6_leave_multicast_group(int sockfd, struct sockaddr *addr) {
+ struct ip_mreq mreq;
+ struct ipv6_mreq mreq6;
+ if (addr->sa_family == AF_INET) {
+ mreq.imr_multiaddr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
+ mreq.imr_interface.s_addr= INADDR_ANY;
+ if (setsockopt(sockfd, IPPROTO_IP, IP_DROP_MEMBERSHIP, (const void *)&mreq, sizeof(mreq)) < 0) {
+ perror("setsockopt(IP_DROP_MEMBERSHIP)");
+ return -1;
+ }
+ }
+ if (addr->sa_family == AF_INET6) {
+ memcpy(&mreq6.ipv6mr_multiaddr, &(((struct sockaddr_in6 *)addr)->sin6_addr), sizeof(struct in6_addr));
+ mreq6.ipv6mr_interface= 0;
+ if (setsockopt(sockfd, IPPROTO_IPV6, IPV6_DROP_MEMBERSHIP, &mreq6, sizeof(mreq6)) < 0) {
+ perror("setsockopt(IPV6_DROP_MEMBERSHIP)");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static struct addrinfo* udp_ipv6_resolve_host(const char *hostname, int port, int type, int family, int flags) {
+ struct addrinfo hints, *res = 0;
+ int error;
+ char sport[16];
+ const char *node = 0, *service = 0;
+
+ if (port > 0) {
+ snprintf(sport, sizeof(sport), "%d", port);
+ service = sport;
+ }
+ if ((hostname) && (hostname[0] != '\0') && (hostname[0] != '?')) {
+ node = hostname;
+ }
+ if ((node) || (service)) {
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_socktype = type;
+ hints.ai_family = family;
+ hints.ai_flags = flags;
+ if ((error = getaddrinfo(node, service, &hints, &res))) {
+ av_log(NULL, AV_LOG_ERROR, "udp_ipv6_resolve_host: %s\n", gai_strerror(error));
+ }
+ }
+ return res;
+}
+
+static int udp_ipv6_set_remote_url(URLContext *h, const char *uri) {
+ UDPContext *s = h->priv_data;
+ char hostname[256];
+ int port;
+ struct addrinfo *res0;
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
+ res0 = udp_ipv6_resolve_host(hostname, port, SOCK_DGRAM, AF_UNSPEC, 0);
+ if (res0 == 0) return AVERROR_IO;
+ memcpy(&s->dest_addr, res0->ai_addr, res0->ai_addrlen);
+ s->dest_addr_len = res0->ai_addrlen;
+ freeaddrinfo(res0);
+ return 0;
+}
+
+static int udp_ipv6_set_local(URLContext *h) {
+ UDPContext *s = h->priv_data;
+ int udp_fd = -1;
+ struct sockaddr_storage clientaddr;
+ socklen_t addrlen;
+ char sbuf[NI_MAXSERV];
+ char hbuf[NI_MAXHOST];
+ struct addrinfo *res0 = NULL, *res = NULL;
+
+ if (s->local_port != 0) {
+ res0 = udp_ipv6_resolve_host(0, s->local_port, SOCK_DGRAM, AF_UNSPEC, AI_PASSIVE);
+ if (res0 == 0)
+ goto fail;
+ for (res = res0; res; res=res->ai_next) {
+ udp_fd = socket(res->ai_family, SOCK_DGRAM, 0);
+ if (udp_fd > 0) break;
+ perror("socket");
+ }
+ } else {
+ udp_fd = socket(s->dest_addr.ss_family, SOCK_DGRAM, 0);
+ if (udp_fd < 0)
+ perror("socket");
+ }
+
+ if (udp_fd < 0)
+ goto fail;
+
+ if (s->local_port != 0) {
+ if (bind(udp_fd, res0->ai_addr, res0->ai_addrlen) < 0) {
+ perror("bind");
+ goto fail;
+ }
+ freeaddrinfo(res0);
+ res0 = NULL;
+ }
+
+ addrlen = sizeof(clientaddr);
+ if (getsockname(udp_fd, (struct sockaddr *)&clientaddr, &addrlen) < 0) {
+ perror("getsockname");
+ goto fail;
+ }
+
+ if (getnameinfo((struct sockaddr *)&clientaddr, addrlen, hbuf, sizeof(hbuf), sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV) != 0) {
+ perror("getnameinfo");
+ goto fail;
+ }
+
+ s->local_port = strtol(sbuf, NULL, 10);
+
+ return udp_fd;
+
+ fail:
+ if (udp_fd >= 0)
+#ifdef CONFIG_BEOS_NETSERVER
+ closesocket(udp_fd);
+#else
+ close(udp_fd);
+#endif
+ if(res0)
+ freeaddrinfo(res0);
+ return -1;
+}
+
+#endif
+
+
+/**
+ * If no filename is given to av_open_input_file because you want to
+ * get the local port first, then you must call this function to set
+ * the remote server address.
+ *
+ * url syntax: udp://host:port[?option=val...]
+ * option: 'multicast=1' : enable multicast
+ * 'ttl=n' : set the ttl value (for multicast only)
+ * 'localport=n' : set the local port
+ * 'pkt_size=n' : set max packet size
+ * 'reuse=1' : enable reusing the socket
+ *
+ * @param s1 media file context
+ * @param uri of the remote server
+ * @return zero if no error.
+ */
+int udp_set_remote_url(URLContext *h, const char *uri)
+{
+#ifdef CONFIG_IPV6
+ return udp_ipv6_set_remote_url(h, uri);
+#else
+ UDPContext *s = h->priv_data;
+ char hostname[256];
+ int port;
+
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
+
+ /* set the destination address */
+ if (resolve_host(&s->dest_addr.sin_addr, hostname) < 0)
+ return AVERROR_IO;
+ s->dest_addr.sin_family = AF_INET;
+ s->dest_addr.sin_port = htons(port);
+ return 0;
+#endif
+}
+
+/**
+ * Return the local port used by the UDP connexion
+ * @param s1 media file context
+ * @return the local port number
+ */
+int udp_get_local_port(URLContext *h)
+{
+ UDPContext *s = h->priv_data;
+ return s->local_port;
+}
+
+/**
+ * Return the udp file handle for select() usage to wait for several RTP
+ * streams at the same time.
+ * @param h media file context
+ */
+int udp_get_file_handle(URLContext *h)
+{
+ UDPContext *s = h->priv_data;
+ return s->udp_fd;
+}
+
+/* put it in UDP context */
+/* return non zero if error */
+static int udp_open(URLContext *h, const char *uri, int flags)
+{
+ char hostname[1024];
+ int port, udp_fd = -1, tmp;
+ UDPContext *s = NULL;
+ int is_output;
+ const char *p;
+ char buf[256];
+#ifndef CONFIG_IPV6
+ struct sockaddr_in my_addr, my_addr1;
+ int len;
+#endif
+
+ h->is_streamed = 1;
+ h->max_packet_size = 1472;
+
+ is_output = (flags & URL_WRONLY);
+
+ s = av_malloc(sizeof(UDPContext));
+ if (!s)
+ return -ENOMEM;
+
+ h->priv_data = s;
+ s->ttl = 16;
+ s->is_multicast = 0;
+ s->local_port = 0;
+ s->reuse_socket = 0;
+ p = strchr(uri, '?');
+ if (p) {
+ s->is_multicast = find_info_tag(buf, sizeof(buf), "multicast", p);
+ s->reuse_socket = find_info_tag(buf, sizeof(buf), "reuse", p);
+ if (find_info_tag(buf, sizeof(buf), "ttl", p)) {
+ s->ttl = strtol(buf, NULL, 10);
+ }
+ if (find_info_tag(buf, sizeof(buf), "localport", p)) {
+ s->local_port = strtol(buf, NULL, 10);
+ }
+ if (find_info_tag(buf, sizeof(buf), "pkt_size", p)) {
+ h->max_packet_size = strtol(buf, NULL, 10);
+ }
+ }
+
+ /* fill the dest addr */
+ url_split(NULL, 0, NULL, 0, hostname, sizeof(hostname), &port, NULL, 0, uri);
+
+ /* XXX: fix url_split */
+ if (hostname[0] == '\0' || hostname[0] == '?') {
+ /* only accepts null hostname if input */
+ if (s->is_multicast || (flags & URL_WRONLY))
+ goto fail;
+ } else {
+ udp_set_remote_url(h, uri);
+ }
+
+#ifndef CONFIG_IPV6
+ udp_fd = socket(PF_INET, SOCK_DGRAM, 0);
+ if (udp_fd < 0)
+ goto fail;
+
+ my_addr.sin_family = AF_INET;
+ my_addr.sin_addr.s_addr = htonl (INADDR_ANY);
+ if (s->is_multicast && !(h->flags & URL_WRONLY)) {
+ /* special case: the bind must be done on the multicast address port */
+ my_addr.sin_port = s->dest_addr.sin_port;
+ } else {
+ my_addr.sin_port = htons(s->local_port);
+ }
+
+ if (s->reuse_socket)
+ if (setsockopt (udp_fd, SOL_SOCKET, SO_REUSEADDR, &(s->reuse_socket), sizeof(s->reuse_socket)) != 0)
+ goto fail;
+
+ /* the bind is needed to give a port to the socket now */
+ if (bind(udp_fd,(struct sockaddr *)&my_addr, sizeof(my_addr)) < 0)
+ goto fail;
+
+ len = sizeof(my_addr1);
+ getsockname(udp_fd, (struct sockaddr *)&my_addr1, &len);
+ s->local_port = ntohs(my_addr1.sin_port);
+
+#ifndef CONFIG_BEOS_NETSERVER
+ if (s->is_multicast) {
+ if (h->flags & URL_WRONLY) {
+ /* output */
+ if (setsockopt(udp_fd, IPPROTO_IP, IP_MULTICAST_TTL,
+ &s->ttl, sizeof(s->ttl)) < 0) {
+ perror("IP_MULTICAST_TTL");
+ goto fail;
+ }
+ } else {
+ /* input */
+ memset(&s->mreq, 0, sizeof(s->mreq));
+ s->mreq.imr_multiaddr = s->dest_addr.sin_addr;
+ s->mreq.imr_interface.s_addr = htonl (INADDR_ANY);
+ if (setsockopt(udp_fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
+ &s->mreq, sizeof(s->mreq)) < 0) {
+ perror("rtp: IP_ADD_MEMBERSHIP");
+ goto fail;
+ }
+ }
+ }
+#endif
+#else
+ if (s->is_multicast && !(h->flags & URL_WRONLY))
+ s->local_port = port;
+ udp_fd = udp_ipv6_set_local(h);
+ if (udp_fd < 0)
+ goto fail;
+#ifndef CONFIG_BEOS_NETSERVER
+ if (s->is_multicast) {
+ if (h->flags & URL_WRONLY) {
+ if (udp_ipv6_set_multicast_ttl(udp_fd, s->ttl, (struct sockaddr *)&s->dest_addr) < 0)
+ goto fail;
+ } else {
+ if (udp_ipv6_join_multicast_group(udp_fd, (struct sockaddr *)&s->dest_addr) < 0)
+ goto fail;
+ }
+ }
+#endif
+#endif
+
+ if (is_output) {
+ /* limit the tx buf size to limit latency */
+ tmp = UDP_TX_BUF_SIZE;
+ if (setsockopt(udp_fd, SOL_SOCKET, SO_SNDBUF, &tmp, sizeof(tmp)) < 0) {
+ perror("setsockopt sndbuf");
+ goto fail;
+ }
+ }
+
+ s->udp_fd = udp_fd;
+ return 0;
+ fail:
+ if (udp_fd >= 0)
+#ifdef CONFIG_BEOS_NETSERVER
+ closesocket(udp_fd);
+#else
+ close(udp_fd);
+#endif
+ av_free(s);
+ return AVERROR_IO;
+}
+
+static int udp_read(URLContext *h, uint8_t *buf, int size)
+{
+ UDPContext *s = h->priv_data;
+#ifndef CONFIG_IPV6
+ struct sockaddr_in from;
+#else
+ struct sockaddr_storage from;
+#endif
+ socklen_t from_len;
+ int len;
+
+ for(;;) {
+ from_len = sizeof(from);
+ len = recvfrom (s->udp_fd, buf, size, 0,
+ (struct sockaddr *)&from, &from_len);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR)
+ return AVERROR_IO;
+ } else {
+ break;
+ }
+ }
+ return len;
+}
+
+static int udp_write(URLContext *h, uint8_t *buf, int size)
+{
+ UDPContext *s = h->priv_data;
+ int ret;
+
+ for(;;) {
+ ret = sendto (s->udp_fd, buf, size, 0,
+ (struct sockaddr *) &s->dest_addr,
+#ifndef CONFIG_IPV6
+ sizeof (s->dest_addr));
+#else
+ s->dest_addr_len);
+#endif
+ if (ret < 0) {
+ if (errno != EINTR && errno != EAGAIN)
+ return AVERROR_IO;
+ } else {
+ break;
+ }
+ }
+ return size;
+}
+
+static int udp_close(URLContext *h)
+{
+ UDPContext *s = h->priv_data;
+
+#ifndef CONFIG_BEOS_NETSERVER
+#ifndef CONFIG_IPV6
+ if (s->is_multicast && !(h->flags & URL_WRONLY)) {
+ if (setsockopt(s->udp_fd, IPPROTO_IP, IP_DROP_MEMBERSHIP,
+ &s->mreq, sizeof(s->mreq)) < 0) {
+ perror("IP_DROP_MEMBERSHIP");
+ }
+ }
+#else
+ if (s->is_multicast && !(h->flags & URL_WRONLY))
+ udp_ipv6_leave_multicast_group(s->udp_fd, (struct sockaddr *)&s->dest_addr);
+#endif
+ close(s->udp_fd);
+#else
+ closesocket(s->udp_fd);
+#endif
+ av_free(s);
+ return 0;
+}
+
+URLProtocol udp_protocol = {
+ "udp",
+ udp_open,
+ udp_read,
+ udp_write,
+ NULL, /* seek */
+ udp_close,
+};
diff --git a/contrib/ffmpeg/libavformat/utils.c b/contrib/ffmpeg/libavformat/utils.c
new file mode 100644
index 000000000..eaeeb7c16
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/utils.c
@@ -0,0 +1,3108 @@
+/*
+ * Various utilities for ffmpeg system
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+#include "opt.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+/**
+ * @file libavformat/utils.c
+ * Various utility functions for using ffmpeg library.
+ */
+
+static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den);
+static void av_frac_add(AVFrac *f, int64_t incr);
+static void av_frac_set(AVFrac *f, int64_t val);
+
+/** head of registered input format linked list. */
+AVInputFormat *first_iformat = NULL;
+/** head of registered output format linked list. */
+AVOutputFormat *first_oformat = NULL;
+
+void av_register_input_format(AVInputFormat *format)
+{
+ AVInputFormat **p;
+ p = &first_iformat;
+ while (*p != NULL) p = &(*p)->next;
+ *p = format;
+ format->next = NULL;
+}
+
+void av_register_output_format(AVOutputFormat *format)
+{
+ AVOutputFormat **p;
+ p = &first_oformat;
+ while (*p != NULL) p = &(*p)->next;
+ *p = format;
+ format->next = NULL;
+}
+
+int match_ext(const char *filename, const char *extensions)
+{
+ const char *ext, *p;
+ char ext1[32], *q;
+
+ if(!filename)
+ return 0;
+
+ ext = strrchr(filename, '.');
+ if (ext) {
+ ext++;
+ p = extensions;
+ for(;;) {
+ q = ext1;
+ while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
+ *q++ = *p++;
+ *q = '\0';
+ if (!strcasecmp(ext1, ext))
+ return 1;
+ if (*p == '\0')
+ break;
+ p++;
+ }
+ }
+ return 0;
+}
+
+AVOutputFormat *guess_format(const char *short_name, const char *filename,
+ const char *mime_type)
+{
+ AVOutputFormat *fmt, *fmt_found;
+ int score_max, score;
+
+ /* specific test for image sequences */
+#ifdef CONFIG_IMAGE2_MUXER
+ if (!short_name && filename &&
+ av_filename_number_test(filename) &&
+ av_guess_image2_codec(filename) != CODEC_ID_NONE) {
+ return guess_format("image2", NULL, NULL);
+ }
+#endif
+ /* find the proper file type */
+ fmt_found = NULL;
+ score_max = 0;
+ fmt = first_oformat;
+ while (fmt != NULL) {
+ score = 0;
+ if (fmt->name && short_name && !strcmp(fmt->name, short_name))
+ score += 100;
+ if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
+ score += 10;
+ if (filename && fmt->extensions &&
+ match_ext(filename, fmt->extensions)) {
+ score += 5;
+ }
+ if (score > score_max) {
+ score_max = score;
+ fmt_found = fmt;
+ }
+ fmt = fmt->next;
+ }
+ return fmt_found;
+}
+
+AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
+ const char *mime_type)
+{
+ AVOutputFormat *fmt = guess_format(short_name, filename, mime_type);
+
+ if (fmt) {
+ AVOutputFormat *stream_fmt;
+ char stream_format_name[64];
+
+ snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
+ stream_fmt = guess_format(stream_format_name, NULL, NULL);
+
+ if (stream_fmt)
+ fmt = stream_fmt;
+ }
+
+ return fmt;
+}
+
+/**
+ * Guesses the codec id based upon muxer and filename.
+ */
+enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
+ const char *filename, const char *mime_type, enum CodecType type){
+ if(type == CODEC_TYPE_VIDEO){
+ enum CodecID codec_id= CODEC_ID_NONE;
+
+#ifdef CONFIG_IMAGE2_MUXER
+ if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
+ codec_id= av_guess_image2_codec(filename);
+ }
+#endif
+ if(codec_id == CODEC_ID_NONE)
+ codec_id= fmt->video_codec;
+ return codec_id;
+ }else if(type == CODEC_TYPE_AUDIO)
+ return fmt->audio_codec;
+ else
+ return CODEC_ID_NONE;
+}
+
+/**
+ * finds AVInputFormat based on input format's short name.
+ */
+AVInputFormat *av_find_input_format(const char *short_name)
+{
+ AVInputFormat *fmt;
+ for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
+ if (!strcmp(fmt->name, short_name))
+ return fmt;
+ }
+ return NULL;
+}
+
+/* memory handling */
+
+/**
+ * Default packet destructor.
+ */
+void av_destruct_packet(AVPacket *pkt)
+{
+ av_free(pkt->data);
+ pkt->data = NULL; pkt->size = 0;
+}
+
+/**
+ * Allocate the payload of a packet and intialized its fields to default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK. AVERROR_xxx otherwise.
+ */
+int av_new_packet(AVPacket *pkt, int size)
+{
+ uint8_t *data;
+ if((unsigned)size > (unsigned)size + FF_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR_NOMEM;
+ data = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!data)
+ return AVERROR_NOMEM;
+ memset(data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+
+ av_init_packet(pkt);
+ pkt->data = data;
+ pkt->size = size;
+ pkt->destruct = av_destruct_packet;
+ return 0;
+}
+
+/**
+ * Allocate and read the payload of a packet and intialized its fields to default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return >0 (read size) if OK. AVERROR_xxx otherwise.
+ */
+int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
+{
+ int ret= av_new_packet(pkt, size);
+
+ if(ret<0)
+ return ret;
+
+ pkt->pos= url_ftell(s);
+
+ ret= get_buffer(s, pkt->data, size);
+ if(ret<=0)
+ av_free_packet(pkt);
+ else
+ pkt->size= ret;
+
+ return ret;
+}
+
+/* This is a hack - the packet memory allocation stuff is broken. The
+ packet is allocated if it was not really allocated */
+int av_dup_packet(AVPacket *pkt)
+{
+ if (pkt->destruct != av_destruct_packet) {
+ uint8_t *data;
+ /* we duplicate the packet and don't forget to put the padding
+ again */
+ if((unsigned)pkt->size > (unsigned)pkt->size + FF_INPUT_BUFFER_PADDING_SIZE)
+ return AVERROR_NOMEM;
+ data = av_malloc(pkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!data) {
+ return AVERROR_NOMEM;
+ }
+ memcpy(data, pkt->data, pkt->size);
+ memset(data + pkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+ pkt->data = data;
+ pkt->destruct = av_destruct_packet;
+ }
+ return 0;
+}
+
+/**
+ * Allocate the payload of a packet and intialized its fields to default values.
+ *
+ * @param filename possible numbered sequence string
+ * @return 1 if a valid numbered sequence string, 0 otherwise.
+ */
+int av_filename_number_test(const char *filename)
+{
+ char buf[1024];
+ return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
+}
+
+/**
+ * Guess file format.
+ */
+AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened)
+{
+ AVInputFormat *fmt1, *fmt;
+ int score, score_max;
+
+ fmt = NULL;
+ score_max = 0;
+ for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
+ if (!is_opened && !(fmt1->flags & AVFMT_NOFILE))
+ continue;
+ score = 0;
+ if (fmt1->read_probe) {
+ score = fmt1->read_probe(pd);
+ } else if (fmt1->extensions) {
+ if (match_ext(pd->filename, fmt1->extensions)) {
+ score = 50;
+ }
+ }
+ if (score > score_max) {
+ score_max = score;
+ fmt = fmt1;
+ }
+ }
+ return fmt;
+}
+
+/************************************************************/
+/* input media file */
+
+/**
+ * Open a media file from an IO stream. 'fmt' must be specified.
+ */
+static const char* format_to_name(void* ptr)
+{
+ AVFormatContext* fc = (AVFormatContext*) ptr;
+ if(fc->iformat) return fc->iformat->name;
+ else if(fc->oformat) return fc->oformat->name;
+ else return "NULL";
+}
+
+#define OFFSET(x) offsetof(AVFormatContext,x)
+#define DEFAULT 0 //should be NAN but it doesnt work as its not a constant in glibc as required by ANSI/ISO C
+//these names are too long to be readable
+#define E AV_OPT_FLAG_ENCODING_PARAM
+#define D AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[]={
+{"probesize", NULL, OFFSET(probesize), FF_OPT_TYPE_INT, 32000, 32, INT_MAX, D}, /* 32000 from mpegts.c: 1.0 second at 24Mbit/s */
+{"muxrate", "set mux rate", OFFSET(mux_rate), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
+{"packetsize", "set packet size", OFFSET(packet_size), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
+{"fflags", NULL, OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, D|E, "fflags"},
+{"ignidx", "ignore index", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_IGNIDX, INT_MIN, INT_MAX, D, "fflags"},
+{"genpts", "generate pts", 0, FF_OPT_TYPE_CONST, AVFMT_FLAG_GENPTS, INT_MIN, INT_MAX, D, "fflags"},
+{"track", " set the track number", OFFSET(track), FF_OPT_TYPE_INT, DEFAULT, 0, INT_MAX, E},
+{"year", "set the year", OFFSET(year), FF_OPT_TYPE_INT, DEFAULT, INT_MIN, INT_MAX, E},
+{NULL},
+};
+
+#undef E
+#undef D
+#undef DEFAULT
+
+static const AVClass av_format_context_class = { "AVFormatContext", format_to_name, options };
+
+#if LIBAVFORMAT_VERSION_INT >= ((51<<16)+(0<<8)+0)
+static
+#endif
+void avformat_get_context_defaults(AVFormatContext *s){
+ memset(s, 0, sizeof(AVFormatContext));
+
+ s->av_class = &av_format_context_class;
+
+ av_opt_set_defaults(s);
+}
+
+AVFormatContext *av_alloc_format_context(void)
+{
+ AVFormatContext *ic;
+ ic = av_malloc(sizeof(AVFormatContext));
+ if (!ic) return ic;
+ avformat_get_context_defaults(ic);
+ ic->av_class = &av_format_context_class;
+ return ic;
+}
+
+/**
+ * Allocates all the structures needed to read an input stream.
+ * This does not open the needed codecs for decoding the stream[s].
+ */
+int av_open_input_stream(AVFormatContext **ic_ptr,
+ ByteIOContext *pb, const char *filename,
+ AVInputFormat *fmt, AVFormatParameters *ap)
+{
+ int err;
+ AVFormatContext *ic;
+ AVFormatParameters default_ap;
+
+ if(!ap){
+ ap=&default_ap;
+ memset(ap, 0, sizeof(default_ap));
+ }
+
+ if(!ap->prealloced_context)
+ ic = av_alloc_format_context();
+ else
+ ic = *ic_ptr;
+ if (!ic) {
+ err = AVERROR_NOMEM;
+ goto fail;
+ }
+ ic->iformat = fmt;
+ if (pb)
+ ic->pb = *pb;
+ ic->duration = AV_NOPTS_VALUE;
+ ic->start_time = AV_NOPTS_VALUE;
+ pstrcpy(ic->filename, sizeof(ic->filename), filename);
+
+ /* allocate private data */
+ if (fmt->priv_data_size > 0) {
+ ic->priv_data = av_mallocz(fmt->priv_data_size);
+ if (!ic->priv_data) {
+ err = AVERROR_NOMEM;
+ goto fail;
+ }
+ } else {
+ ic->priv_data = NULL;
+ }
+
+ err = ic->iformat->read_header(ic, ap);
+ if (err < 0)
+ goto fail;
+
+ if (pb && !ic->data_offset)
+ ic->data_offset = url_ftell(&ic->pb);
+
+ *ic_ptr = ic;
+ return 0;
+ fail:
+ if (ic) {
+ av_freep(&ic->priv_data);
+ }
+ av_free(ic);
+ *ic_ptr = NULL;
+ return err;
+}
+
+/** Size of probe buffer, for guessing file type from file contents. */
+#define PROBE_BUF_MIN 2048
+#define PROBE_BUF_MAX (1<<20)
+
+/**
+ * Open a media file as input. The codec are not opened. Only the file
+ * header (if present) is read.
+ *
+ * @param ic_ptr the opened media file handle is put here
+ * @param filename filename to open.
+ * @param fmt if non NULL, force the file format to use
+ * @param buf_size optional buffer size (zero if default is OK)
+ * @param ap additionnal parameters needed when opening the file (NULL if default)
+ * @return 0 if OK. AVERROR_xxx otherwise.
+ */
+int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
+ AVInputFormat *fmt,
+ int buf_size,
+ AVFormatParameters *ap)
+{
+ int err, must_open_file, file_opened, probe_size;
+ AVProbeData probe_data, *pd = &probe_data;
+ ByteIOContext pb1, *pb = &pb1;
+
+ file_opened = 0;
+ pd->filename = "";
+ if (filename)
+ pd->filename = filename;
+ pd->buf = NULL;
+ pd->buf_size = 0;
+
+ if (!fmt) {
+ /* guess format if no file can be opened */
+ fmt = av_probe_input_format(pd, 0);
+ }
+
+ /* do not open file if the format does not need it. XXX: specific
+ hack needed to handle RTSP/TCP */
+ must_open_file = 1;
+ if (fmt && (fmt->flags & AVFMT_NOFILE)) {
+ must_open_file = 0;
+ pb= NULL; //FIXME this or memset(pb, 0, sizeof(ByteIOContext)); otherwise its uninitalized
+ }
+
+ if (!fmt || must_open_file) {
+ /* if no file needed do not try to open one */
+ if (url_fopen(pb, filename, URL_RDONLY) < 0) {
+ err = AVERROR_IO;
+ goto fail;
+ }
+ file_opened = 1;
+ if (buf_size > 0) {
+ url_setbufsize(pb, buf_size);
+ }
+
+ for(probe_size= PROBE_BUF_MIN; probe_size<=PROBE_BUF_MAX && !fmt; probe_size<<=1){
+ /* read probe data */
+ pd->buf= av_realloc(pd->buf, probe_size);
+ pd->buf_size = get_buffer(pb, pd->buf, probe_size);
+ if (url_fseek(pb, 0, SEEK_SET) == (offset_t)-EPIPE) {
+ url_fclose(pb);
+ if (url_fopen(pb, filename, URL_RDONLY) < 0) {
+ file_opened = 0;
+ err = AVERROR_IO;
+ goto fail;
+ }
+ }
+ /* guess file format */
+ fmt = av_probe_input_format(pd, 1);
+ }
+ av_freep(&pd->buf);
+ }
+
+ /* if still no format found, error */
+ if (!fmt) {
+ err = AVERROR_NOFMT;
+ goto fail;
+ }
+
+ /* XXX: suppress this hack for redirectors */
+#ifdef CONFIG_NETWORK
+ if (fmt == &redir_demuxer) {
+ err = redir_open(ic_ptr, pb);
+ url_fclose(pb);
+ return err;
+ }
+#endif
+
+ /* check filename in case of an image number is expected */
+ if (fmt->flags & AVFMT_NEEDNUMBER) {
+ if (!av_filename_number_test(filename)) {
+ err = AVERROR_NUMEXPECTED;
+ goto fail;
+ }
+ }
+ err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
+ if (err)
+ goto fail;
+ return 0;
+ fail:
+ av_freep(&pd->buf);
+ if (file_opened)
+ url_fclose(pb);
+ *ic_ptr = NULL;
+ return err;
+
+}
+
+/*******************************************************/
+
+/**
+ * Read a transport packet from a media file.
+ *
+ * This function is absolete and should never be used.
+ * Use av_read_frame() instead.
+ *
+ * @param s media file handle
+ * @param pkt is filled
+ * @return 0 if OK. AVERROR_xxx if error.
+ */
+int av_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ return s->iformat->read_packet(s, pkt);
+}
+
+/**********************************************************/
+
+/**
+ * Get the number of samples of an audio frame. Return (-1) if error.
+ */
+static int get_audio_frame_size(AVCodecContext *enc, int size)
+{
+ int frame_size;
+
+ if (enc->frame_size <= 1) {
+ int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
+
+ if (bits_per_sample) {
+ if (enc->channels == 0)
+ return -1;
+ frame_size = (size << 3) / (bits_per_sample * enc->channels);
+ } else {
+ /* used for example by ADPCM codecs */
+ if (enc->bit_rate == 0)
+ return -1;
+ frame_size = (size * 8 * enc->sample_rate) / enc->bit_rate;
+ }
+ } else {
+ frame_size = enc->frame_size;
+ }
+ return frame_size;
+}
+
+
+/**
+ * Return the frame duration in seconds, return 0 if not available.
+ */
+static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
+ AVCodecParserContext *pc, AVPacket *pkt)
+{
+ int frame_size;
+
+ *pnum = 0;
+ *pden = 0;
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ if(st->time_base.num*1000LL > st->time_base.den){
+ *pnum = st->time_base.num;
+ *pden = st->time_base.den;
+ }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
+ *pnum = st->codec->time_base.num;
+ *pden = st->codec->time_base.den;
+ if (pc && pc->repeat_pict) {
+ *pden *= 2;
+ *pnum = (*pnum) * (2 + pc->repeat_pict);
+ }
+ }
+ break;
+ case CODEC_TYPE_AUDIO:
+ frame_size = get_audio_frame_size(st->codec, pkt->size);
+ if (frame_size < 0)
+ break;
+ *pnum = frame_size;
+ *pden = st->codec->sample_rate;
+ break;
+ default:
+ break;
+ }
+}
+
+static int is_intra_only(AVCodecContext *enc){
+ if(enc->codec_type == CODEC_TYPE_AUDIO){
+ return 1;
+ }else if(enc->codec_type == CODEC_TYPE_VIDEO){
+ switch(enc->codec_id){
+ case CODEC_ID_MJPEG:
+ case CODEC_ID_MJPEGB:
+ case CODEC_ID_LJPEG:
+ case CODEC_ID_RAWVIDEO:
+ case CODEC_ID_DVVIDEO:
+ case CODEC_ID_HUFFYUV:
+ case CODEC_ID_FFVHUFF:
+ case CODEC_ID_ASV1:
+ case CODEC_ID_ASV2:
+ case CODEC_ID_VCR1:
+ return 1;
+ default: break;
+ }
+ }
+ return 0;
+}
+
+static int64_t lsb2full(int64_t lsb, int64_t last_ts, int lsb_bits){
+ int64_t mask = lsb_bits < 64 ? (1LL<<lsb_bits)-1 : -1LL;
+ int64_t delta= last_ts - mask/2;
+ return ((lsb - delta)&mask) + delta;
+}
+
+static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
+ AVCodecParserContext *pc, AVPacket *pkt)
+{
+ int num, den, presentation_delayed;
+ /* handle wrapping */
+ if(st->cur_dts != AV_NOPTS_VALUE){
+ if(pkt->pts != AV_NOPTS_VALUE)
+ pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
+ if(pkt->dts != AV_NOPTS_VALUE)
+ pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
+ }
+
+ if (pkt->duration == 0) {
+ compute_frame_duration(&num, &den, st, pc, pkt);
+ if (den && num) {
+ pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
+ }
+ }
+
+ if(is_intra_only(st->codec))
+ pkt->flags |= PKT_FLAG_KEY;
+
+ /* do we have a video B frame ? */
+ presentation_delayed = 0;
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ /* XXX: need has_b_frame, but cannot get it if the codec is
+ not initialized */
+ if (( st->codec->codec_id == CODEC_ID_H264
+ || st->codec->has_b_frames) &&
+ pc && pc->pict_type != FF_B_TYPE)
+ presentation_delayed = 1;
+ /* this may be redundant, but it shouldnt hurt */
+ if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
+ presentation_delayed = 1;
+ }
+
+ if(st->cur_dts == AV_NOPTS_VALUE){
+ if(presentation_delayed) st->cur_dts = -pkt->duration;
+ else st->cur_dts = 0;
+ }
+
+// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
+ /* interpolate PTS and DTS if they are not present */
+ if (presentation_delayed) {
+ /* DTS = decompression time stamp */
+ /* PTS = presentation time stamp */
+ if (pkt->dts == AV_NOPTS_VALUE) {
+ /* if we know the last pts, use it */
+ if(st->last_IP_pts != AV_NOPTS_VALUE)
+ st->cur_dts = pkt->dts = st->last_IP_pts;
+ else
+ pkt->dts = st->cur_dts;
+ } else {
+ st->cur_dts = pkt->dts;
+ }
+ /* this is tricky: the dts must be incremented by the duration
+ of the frame we are displaying, i.e. the last I or P frame */
+ if (st->last_IP_duration == 0)
+ st->cur_dts += pkt->duration;
+ else
+ st->cur_dts += st->last_IP_duration;
+ st->last_IP_duration = pkt->duration;
+ st->last_IP_pts= pkt->pts;
+ /* cannot compute PTS if not present (we can compute it only
+ by knowing the futur */
+ } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
+ if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
+ int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
+ int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
+ if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
+ pkt->pts += pkt->duration;
+// av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
+ }
+ }
+
+ /* presentation is not delayed : PTS and DTS are the same */
+ if (pkt->pts == AV_NOPTS_VALUE) {
+ if (pkt->dts == AV_NOPTS_VALUE) {
+ pkt->pts = st->cur_dts;
+ pkt->dts = st->cur_dts;
+ }
+ else {
+ st->cur_dts = pkt->dts;
+ pkt->pts = pkt->dts;
+ }
+ } else {
+ st->cur_dts = pkt->pts;
+ pkt->dts = pkt->pts;
+ }
+ st->cur_dts += pkt->duration;
+ }
+// av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
+
+ /* update flags */
+ if (pc) {
+ pkt->flags = 0;
+ /* key frame computation */
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ if (pc->pict_type == FF_I_TYPE)
+ pkt->flags |= PKT_FLAG_KEY;
+ break;
+ case CODEC_TYPE_AUDIO:
+ pkt->flags |= PKT_FLAG_KEY;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void av_destruct_packet_nofree(AVPacket *pkt)
+{
+ pkt->data = NULL; pkt->size = 0;
+}
+
+static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
+{
+ AVStream *st;
+ int len, ret, i;
+
+ for(;;) {
+ /* select current input stream component */
+ st = s->cur_st;
+ if (st) {
+ if (!st->need_parsing || !st->parser) {
+ /* no parsing needed: we just output the packet as is */
+ /* raw data support */
+ *pkt = s->cur_pkt;
+ compute_pkt_fields(s, st, NULL, pkt);
+ s->cur_st = NULL;
+ break;
+ } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
+ len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
+ s->cur_ptr, s->cur_len,
+ s->cur_pkt.pts, s->cur_pkt.dts);
+ s->cur_pkt.pts = AV_NOPTS_VALUE;
+ s->cur_pkt.dts = AV_NOPTS_VALUE;
+ /* increment read pointer */
+ s->cur_ptr += len;
+ s->cur_len -= len;
+
+ /* return packet if any */
+ if (pkt->size) {
+ got_packet:
+ pkt->duration = 0;
+ pkt->stream_index = st->index;
+ pkt->pts = st->parser->pts;
+ pkt->dts = st->parser->dts;
+ pkt->destruct = av_destruct_packet_nofree;
+ compute_pkt_fields(s, st, st->parser, pkt);
+ break;
+ }
+ } else {
+ /* free packet */
+ av_free_packet(&s->cur_pkt);
+ s->cur_st = NULL;
+ }
+ } else {
+ /* read next packet */
+ ret = av_read_packet(s, &s->cur_pkt);
+ if (ret < 0) {
+ if (ret == -EAGAIN)
+ return ret;
+ /* return the last frames, if any */
+ for(i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ if (st->parser && st->need_parsing) {
+ av_parser_parse(st->parser, st->codec,
+ &pkt->data, &pkt->size,
+ NULL, 0,
+ AV_NOPTS_VALUE, AV_NOPTS_VALUE);
+ if (pkt->size)
+ goto got_packet;
+ }
+ }
+ /* no more packets: really terminates parsing */
+ return ret;
+ }
+
+ st = s->streams[s->cur_pkt.stream_index];
+ if(st->codec->debug & FF_DEBUG_PTS)
+ av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
+ s->cur_pkt.stream_index,
+ s->cur_pkt.pts,
+ s->cur_pkt.dts,
+ s->cur_pkt.size);
+
+ s->cur_st = st;
+ s->cur_ptr = s->cur_pkt.data;
+ s->cur_len = s->cur_pkt.size;
+ if (st->need_parsing && !st->parser) {
+ st->parser = av_parser_init(st->codec->codec_id);
+ if (!st->parser) {
+ /* no parser available : just output the raw packets */
+ st->need_parsing = 0;
+ }else if(st->need_parsing == 2){
+ st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
+ }
+ }
+ }
+ }
+ if(st->codec->debug & FF_DEBUG_PTS)
+ av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
+ pkt->stream_index,
+ pkt->pts,
+ pkt->dts,
+ pkt->size);
+
+ return 0;
+}
+
+/**
+ * Return the next frame of a stream.
+ *
+ * The returned packet is valid
+ * until the next av_read_frame() or until av_close_input_file() and
+ * must be freed with av_free_packet. For video, the packet contains
+ * exactly one frame. For audio, it contains an integer number of
+ * frames if each frame has a known fixed size (e.g. PCM or ADPCM
+ * data). If the audio frames have a variable size (e.g. MPEG audio),
+ * then it contains one frame.
+ *
+ * pkt->pts, pkt->dts and pkt->duration are always set to correct
+ * values in AV_TIME_BASE unit (and guessed if the format cannot
+ * provided them). pkt->pts can be AV_NOPTS_VALUE if the video format
+ * has B frames, so it is better to rely on pkt->dts if you do not
+ * decompress the payload.
+ *
+ * @return 0 if OK, < 0 if error or end of file.
+ */
+int av_read_frame(AVFormatContext *s, AVPacket *pkt)
+{
+ AVPacketList *pktl;
+ int eof=0;
+ const int genpts= s->flags & AVFMT_FLAG_GENPTS;
+
+ for(;;){
+ pktl = s->packet_buffer;
+ if (pktl) {
+ AVPacket *next_pkt= &pktl->pkt;
+
+ if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
+ while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
+ if( pktl->pkt.stream_index == next_pkt->stream_index
+ && next_pkt->dts < pktl->pkt.dts
+ && pktl->pkt.pts != pktl->pkt.dts //not b frame
+ /*&& pktl->pkt.dts != AV_NOPTS_VALUE*/){
+ next_pkt->pts= pktl->pkt.dts;
+ }
+ pktl= pktl->next;
+ }
+ pktl = s->packet_buffer;
+ }
+
+ if( next_pkt->pts != AV_NOPTS_VALUE
+ || next_pkt->dts == AV_NOPTS_VALUE
+ || !genpts || eof){
+ /* read packet from packet buffer, if there is data */
+ *pkt = *next_pkt;
+ s->packet_buffer = pktl->next;
+ av_free(pktl);
+ return 0;
+ }
+ }
+ if(genpts){
+ AVPacketList **plast_pktl= &s->packet_buffer;
+ int ret= av_read_frame_internal(s, pkt);
+ if(ret<0){
+ if(pktl && ret != -EAGAIN){
+ eof=1;
+ continue;
+ }else
+ return ret;
+ }
+
+ /* duplicate the packet */
+ if (av_dup_packet(pkt) < 0)
+ return AVERROR_NOMEM;
+
+ while(*plast_pktl) plast_pktl= &(*plast_pktl)->next; //FIXME maybe maintain pointer to the last?
+
+ pktl = av_mallocz(sizeof(AVPacketList));
+ if (!pktl)
+ return AVERROR_NOMEM;
+
+ /* add the packet in the buffered packet list */
+ *plast_pktl = pktl;
+ pktl->pkt= *pkt;
+ }else{
+ assert(!s->packet_buffer);
+ return av_read_frame_internal(s, pkt);
+ }
+ }
+}
+
+/* XXX: suppress the packet queue */
+static void flush_packet_queue(AVFormatContext *s)
+{
+ AVPacketList *pktl;
+
+ for(;;) {
+ pktl = s->packet_buffer;
+ if (!pktl)
+ break;
+ s->packet_buffer = pktl->next;
+ av_free_packet(&pktl->pkt);
+ av_free(pktl);
+ }
+}
+
+/*******************************************************/
+/* seek support */
+
+int av_find_default_stream_index(AVFormatContext *s)
+{
+ int i;
+ AVStream *st;
+
+ if (s->nb_streams <= 0)
+ return -1;
+ for(i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ return i;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Flush the frame reader.
+ */
+static void av_read_frame_flush(AVFormatContext *s)
+{
+ AVStream *st;
+ int i;
+
+ flush_packet_queue(s);
+
+ /* free previous packet */
+ if (s->cur_st) {
+ if (s->cur_st->parser)
+ av_free_packet(&s->cur_pkt);
+ s->cur_st = NULL;
+ }
+ /* fail safe */
+ s->cur_ptr = NULL;
+ s->cur_len = 0;
+
+ /* for each stream, reset read state */
+ for(i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+
+ if (st->parser) {
+ av_parser_close(st->parser);
+ st->parser = NULL;
+ }
+ st->last_IP_pts = AV_NOPTS_VALUE;
+ st->cur_dts = 0; /* we set the current DTS to an unspecified origin */
+ }
+}
+
+/**
+ * Updates cur_dts of all streams based on given timestamp and AVStream.
+ *
+ * Stream ref_st unchanged, others set cur_dts in their native timebase
+ * only needed for timestamp wrapping or if (dts not set and pts!=dts)
+ * @param timestamp new dts expressed in time_base of param ref_st
+ * @param ref_st reference stream giving time_base of param timestamp
+ */
+void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
+ int i;
+
+ for(i = 0; i < s->nb_streams; i++) {
+ AVStream *st = s->streams[i];
+
+ st->cur_dts = av_rescale(timestamp,
+ st->time_base.den * (int64_t)ref_st->time_base.num,
+ st->time_base.num * (int64_t)ref_st->time_base.den);
+ }
+}
+
+/**
+ * Add a index entry into a sorted list updateing if it is already there.
+ *
+ * @param timestamp timestamp in the timebase of the given stream
+ */
+int av_add_index_entry(AVStream *st,
+ int64_t pos, int64_t timestamp, int size, int distance, int flags)
+{
+ AVIndexEntry *entries, *ie;
+ int index;
+
+ if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
+ return -1;
+
+ entries = av_fast_realloc(st->index_entries,
+ &st->index_entries_allocated_size,
+ (st->nb_index_entries + 1) *
+ sizeof(AVIndexEntry));
+ if(!entries)
+ return -1;
+
+ st->index_entries= entries;
+
+ index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
+
+ if(index<0){
+ index= st->nb_index_entries++;
+ ie= &entries[index];
+ assert(index==0 || ie[-1].timestamp < timestamp);
+ }else{
+ ie= &entries[index];
+ if(ie->timestamp != timestamp){
+ if(ie->timestamp <= timestamp)
+ return -1;
+ memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
+ st->nb_index_entries++;
+ }else if(ie->pos == pos && distance < ie->min_distance) //dont reduce the distance
+ distance= ie->min_distance;
+ }
+
+ ie->pos = pos;
+ ie->timestamp = timestamp;
+ ie->min_distance= distance;
+ ie->size= size;
+ ie->flags = flags;
+
+ return index;
+}
+
+/**
+ * build an index for raw streams using a parser.
+ */
+static void av_build_index_raw(AVFormatContext *s)
+{
+ AVPacket pkt1, *pkt = &pkt1;
+ int ret;
+ AVStream *st;
+
+ st = s->streams[0];
+ av_read_frame_flush(s);
+ url_fseek(&s->pb, s->data_offset, SEEK_SET);
+
+ for(;;) {
+ ret = av_read_frame(s, pkt);
+ if (ret < 0)
+ break;
+ if (pkt->stream_index == 0 && st->parser &&
+ (pkt->flags & PKT_FLAG_KEY)) {
+ av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
+ 0, 0, AVINDEX_KEYFRAME);
+ }
+ av_free_packet(pkt);
+ }
+}
+
+/**
+ * Returns TRUE if we deal with a raw stream.
+ *
+ * Raw codec data and parsing needed.
+ */
+static int is_raw_stream(AVFormatContext *s)
+{
+ AVStream *st;
+
+ if (s->nb_streams != 1)
+ return 0;
+ st = s->streams[0];
+ if (!st->need_parsing)
+ return 0;
+ return 1;
+}
+
+/**
+ * Gets the index for a specific timestamp.
+ * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond to
+ * the timestamp which is <= the requested one, if backward is 0
+ * then it will be >=
+ * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
+ * @return < 0 if no such timestamp could be found
+ */
+int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
+ int flags)
+{
+ AVIndexEntry *entries= st->index_entries;
+ int nb_entries= st->nb_index_entries;
+ int a, b, m;
+ int64_t timestamp;
+
+ a = - 1;
+ b = nb_entries;
+
+ while (b - a > 1) {
+ m = (a + b) >> 1;
+ timestamp = entries[m].timestamp;
+ if(timestamp >= wanted_timestamp)
+ b = m;
+ if(timestamp <= wanted_timestamp)
+ a = m;
+ }
+ m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
+
+ if(!(flags & AVSEEK_FLAG_ANY)){
+ while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
+ m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
+ }
+ }
+
+ if(m == nb_entries)
+ return -1;
+ return m;
+}
+
+#define DEBUG_SEEK
+
+/**
+ * Does a binary search using av_index_search_timestamp() and AVCodec.read_timestamp().
+ * this isnt supposed to be called directly by a user application, but by demuxers
+ * @param target_ts target timestamp in the time base of the given stream
+ * @param stream_index stream number
+ */
+int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
+ AVInputFormat *avif= s->iformat;
+ int64_t pos_min, pos_max, pos, pos_limit;
+ int64_t ts_min, ts_max, ts;
+ int index;
+ AVStream *st;
+
+ if (stream_index < 0)
+ return -1;
+
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
+#endif
+
+ ts_max=
+ ts_min= AV_NOPTS_VALUE;
+ pos_limit= -1; //gcc falsely says it may be uninitalized
+
+ st= s->streams[stream_index];
+ if(st->index_entries){
+ AVIndexEntry *e;
+
+ index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non keyframe entries in index case, especially read_timestamp()
+ index= FFMAX(index, 0);
+ e= &st->index_entries[index];
+
+ if(e->timestamp <= target_ts || e->pos == e->min_distance){
+ pos_min= e->pos;
+ ts_min= e->timestamp;
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
+ pos_min,ts_min);
+#endif
+ }else{
+ assert(index==0);
+ }
+
+ index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
+ assert(index < st->nb_index_entries);
+ if(index >= 0){
+ e= &st->index_entries[index];
+ assert(e->timestamp >= target_ts);
+ pos_max= e->pos;
+ ts_max= e->timestamp;
+ pos_limit= pos_max - e->min_distance;
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
+ pos_max,pos_limit, ts_max);
+#endif
+ }
+ }
+
+ pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
+ if(pos<0)
+ return -1;
+
+ /* do the seek */
+ url_fseek(&s->pb, pos, SEEK_SET);
+
+ av_update_cur_dts(s, st, ts);
+
+ return 0;
+}
+
+/**
+ * Does a binary search using read_timestamp().
+ * this isnt supposed to be called directly by a user application, but by demuxers
+ * @param target_ts target timestamp in the time base of the given stream
+ * @param stream_index stream number
+ */
+int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
+ int64_t pos, ts;
+ int64_t start_pos, filesize;
+ int no_change;
+
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
+#endif
+
+ if(ts_min == AV_NOPTS_VALUE){
+ pos_min = s->data_offset;
+ ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
+ if (ts_min == AV_NOPTS_VALUE)
+ return -1;
+ }
+
+ if(ts_max == AV_NOPTS_VALUE){
+ int step= 1024;
+ filesize = url_fsize(&s->pb);
+ pos_max = filesize - 1;
+ do{
+ pos_max -= step;
+ ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
+ step += step;
+ }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
+ if (ts_max == AV_NOPTS_VALUE)
+ return -1;
+
+ for(;;){
+ int64_t tmp_pos= pos_max + 1;
+ int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
+ if(tmp_ts == AV_NOPTS_VALUE)
+ break;
+ ts_max= tmp_ts;
+ pos_max= tmp_pos;
+ if(tmp_pos >= filesize)
+ break;
+ }
+ pos_limit= pos_max;
+ }
+
+ if(ts_min > ts_max){
+ return -1;
+ }else if(ts_min == ts_max){
+ pos_limit= pos_min;
+ }
+
+ no_change=0;
+ while (pos_min < pos_limit) {
+#ifdef DEBUG_SEEK
+ av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
+ pos_min, pos_max,
+ ts_min, ts_max);
+#endif
+ assert(pos_limit <= pos_max);
+
+ if(no_change==0){
+ int64_t approximate_keyframe_distance= pos_max - pos_limit;
+ // interpolate position (better than dichotomy)
+ pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
+ + pos_min - approximate_keyframe_distance;
+ }else if(no_change==1){
+ // bisection, if interpolation failed to change min or max pos last time
+ pos = (pos_min + pos_limit)>>1;
+ }else{
+ // linear search if bisection failed, can only happen if there are very few or no keframes between min/max
+ pos=pos_min;
+ }
+ if(pos <= pos_min)
+ pos= pos_min + 1;
+ else if(pos > pos_limit)
+ pos= pos_limit;
+ start_pos= pos;
+
+ ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
+ if(pos == pos_max)
+ no_change++;
+ else
+ no_change=0;
+#ifdef DEBUG_SEEK
+av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit, start_pos, no_change);
+#endif
+ assert(ts != AV_NOPTS_VALUE);
+ if (target_ts <= ts) {
+ pos_limit = start_pos - 1;
+ pos_max = pos;
+ ts_max = ts;
+ }
+ if (target_ts >= ts) {
+ pos_min = pos;
+ ts_min = ts;
+ }
+ }
+
+ pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
+ ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
+#ifdef DEBUG_SEEK
+ pos_min = pos;
+ ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
+ pos_min++;
+ ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
+ av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
+ pos, ts_min, target_ts, ts_max);
+#endif
+ *ts_ret= ts;
+ return pos;
+}
+
+static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
+ int64_t pos_min, pos_max;
+#if 0
+ AVStream *st;
+
+ if (stream_index < 0)
+ return -1;
+
+ st= s->streams[stream_index];
+#endif
+
+ pos_min = s->data_offset;
+ pos_max = url_fsize(&s->pb) - 1;
+
+ if (pos < pos_min) pos= pos_min;
+ else if(pos > pos_max) pos= pos_max;
+
+ url_fseek(&s->pb, pos, SEEK_SET);
+
+#if 0
+ av_update_cur_dts(s, st, ts);
+#endif
+ return 0;
+}
+
+static int av_seek_frame_generic(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ int index;
+ AVStream *st;
+ AVIndexEntry *ie;
+
+ if (!s->index_built) {
+ if (is_raw_stream(s)) {
+ av_build_index_raw(s);
+ } else {
+ return -1;
+ }
+ s->index_built = 1;
+ }
+
+ st = s->streams[stream_index];
+ index = av_index_search_timestamp(st, timestamp, flags);
+ if (index < 0)
+ return -1;
+
+ /* now we have found the index, we can seek */
+ ie = &st->index_entries[index];
+ av_read_frame_flush(s);
+ url_fseek(&s->pb, ie->pos, SEEK_SET);
+
+ av_update_cur_dts(s, st, ie->timestamp);
+
+ return 0;
+}
+
+/**
+ * Seek to the key frame at timestamp.
+ * 'timestamp' in 'stream_index'.
+ * @param stream_index If stream_index is (-1), a default
+ * stream is selected, and timestamp is automatically converted
+ * from AV_TIME_BASE units to the stream specific time_base.
+ * @param timestamp timestamp in AVStream.time_base units
+ * or if there is no stream specified then in AV_TIME_BASE units
+ * @param flags flags which select direction and seeking mode
+ * @return >= 0 on success
+ */
+int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+ int ret;
+ AVStream *st;
+
+ av_read_frame_flush(s);
+
+ if(flags & AVSEEK_FLAG_BYTE)
+ return av_seek_frame_byte(s, stream_index, timestamp, flags);
+
+ if(stream_index < 0){
+ stream_index= av_find_default_stream_index(s);
+ if(stream_index < 0)
+ return -1;
+
+ st= s->streams[stream_index];
+ /* timestamp for default must be expressed in AV_TIME_BASE units */
+ timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
+ }
+ st= s->streams[stream_index];
+
+ /* first, we try the format specific seek */
+ if (s->iformat->read_seek)
+ ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
+ else
+ ret = -1;
+ if (ret >= 0) {
+ return 0;
+ }
+
+ if(s->iformat->read_timestamp)
+ return av_seek_frame_binary(s, stream_index, timestamp, flags);
+ else
+ return av_seek_frame_generic(s, stream_index, timestamp, flags);
+}
+
+/*******************************************************/
+
+/**
+ * Returns TRUE if the stream has accurate timings in any stream.
+ *
+ * @return TRUE if the stream has accurate timings for at least one component.
+ */
+static int av_has_timings(AVFormatContext *ic)
+{
+ int i;
+ AVStream *st;
+
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->start_time != AV_NOPTS_VALUE &&
+ st->duration != AV_NOPTS_VALUE)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Estimate the stream timings from the one of each components.
+ *
+ * Also computes the global bitrate if possible.
+ */
+static void av_update_stream_timings(AVFormatContext *ic)
+{
+ int64_t start_time, start_time1, end_time, end_time1;
+ int i;
+ AVStream *st;
+
+ start_time = MAXINT64;
+ end_time = MININT64;
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->start_time != AV_NOPTS_VALUE) {
+ start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
+ if (start_time1 < start_time)
+ start_time = start_time1;
+ if (st->duration != AV_NOPTS_VALUE) {
+ end_time1 = start_time1
+ + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
+ if (end_time1 > end_time)
+ end_time = end_time1;
+ }
+ }
+ }
+ if (start_time != MAXINT64) {
+ ic->start_time = start_time;
+ if (end_time != MININT64) {
+ ic->duration = end_time - start_time;
+ if (ic->file_size > 0) {
+ /* compute the bit rate */
+ ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
+ (double)ic->duration;
+ }
+ }
+ }
+
+}
+
+static void fill_all_stream_timings(AVFormatContext *ic)
+{
+ int i;
+ AVStream *st;
+
+ av_update_stream_timings(ic);
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->start_time == AV_NOPTS_VALUE) {
+ if(ic->start_time != AV_NOPTS_VALUE)
+ st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
+ if(ic->duration != AV_NOPTS_VALUE)
+ st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
+ }
+ }
+}
+
+static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
+{
+ int64_t filesize, duration;
+ int bit_rate, i;
+ AVStream *st;
+
+ /* if bit_rate is already set, we believe it */
+ if (ic->bit_rate == 0) {
+ bit_rate = 0;
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ bit_rate += st->codec->bit_rate;
+ }
+ ic->bit_rate = bit_rate;
+ }
+
+ /* if duration is already set, we believe it */
+ if (ic->duration == AV_NOPTS_VALUE &&
+ ic->bit_rate != 0 &&
+ ic->file_size != 0) {
+ filesize = ic->file_size;
+ if (filesize > 0) {
+ for(i = 0; i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
+ if (st->start_time == AV_NOPTS_VALUE ||
+ st->duration == AV_NOPTS_VALUE) {
+ st->start_time = 0;
+ st->duration = duration;
+ }
+ }
+ }
+ }
+}
+
+#define DURATION_MAX_READ_SIZE 250000
+
+/* only usable for MPEG-PS streams */
+static void av_estimate_timings_from_pts(AVFormatContext *ic)
+{
+ AVPacket pkt1, *pkt = &pkt1;
+ AVStream *st;
+ int read_size, i, ret;
+ int64_t end_time;
+ int64_t filesize, offset, duration;
+
+ /* free previous packet */
+ if (ic->cur_st && ic->cur_st->parser)
+ av_free_packet(&ic->cur_pkt);
+ ic->cur_st = NULL;
+
+ /* flush packet queue */
+ flush_packet_queue(ic);
+
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (st->parser) {
+ av_parser_close(st->parser);
+ st->parser= NULL;
+ }
+ }
+
+ /* we read the first packets to get the first PTS (not fully
+ accurate, but it is enough now) */
+ url_fseek(&ic->pb, 0, SEEK_SET);
+ read_size = 0;
+ for(;;) {
+ if (read_size >= DURATION_MAX_READ_SIZE)
+ break;
+ /* if all info is available, we can stop */
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->start_time == AV_NOPTS_VALUE)
+ break;
+ }
+ if (i == ic->nb_streams)
+ break;
+
+ ret = av_read_packet(ic, pkt);
+ if (ret != 0)
+ break;
+ read_size += pkt->size;
+ st = ic->streams[pkt->stream_index];
+ if (pkt->pts != AV_NOPTS_VALUE) {
+ if (st->start_time == AV_NOPTS_VALUE)
+ st->start_time = pkt->pts;
+ }
+ av_free_packet(pkt);
+ }
+
+ /* estimate the end time (duration) */
+ /* XXX: may need to support wrapping */
+ filesize = ic->file_size;
+ offset = filesize - DURATION_MAX_READ_SIZE;
+ if (offset < 0)
+ offset = 0;
+
+ url_fseek(&ic->pb, offset, SEEK_SET);
+ read_size = 0;
+ for(;;) {
+ if (read_size >= DURATION_MAX_READ_SIZE)
+ break;
+ /* if all info is available, we can stop */
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ if (st->duration == AV_NOPTS_VALUE)
+ break;
+ }
+ if (i == ic->nb_streams)
+ break;
+
+ ret = av_read_packet(ic, pkt);
+ if (ret != 0)
+ break;
+ read_size += pkt->size;
+ st = ic->streams[pkt->stream_index];
+ if (pkt->pts != AV_NOPTS_VALUE) {
+ end_time = pkt->pts;
+ duration = end_time - st->start_time;
+ if (duration > 0) {
+ if (st->duration == AV_NOPTS_VALUE ||
+ st->duration < duration)
+ st->duration = duration;
+ }
+ }
+ av_free_packet(pkt);
+ }
+
+ fill_all_stream_timings(ic);
+
+ url_fseek(&ic->pb, 0, SEEK_SET);
+}
+
+static void av_estimate_timings(AVFormatContext *ic)
+{
+ int64_t file_size;
+
+ /* get the file size, if possible */
+ if (ic->iformat->flags & AVFMT_NOFILE) {
+ file_size = 0;
+ } else {
+ file_size = url_fsize(&ic->pb);
+ if (file_size < 0)
+ file_size = 0;
+ }
+ ic->file_size = file_size;
+
+ if ((!strcmp(ic->iformat->name, "mpeg") ||
+ !strcmp(ic->iformat->name, "mpegts")) &&
+ file_size && !ic->pb.is_streamed) {
+ /* get accurate estimate from the PTSes */
+ av_estimate_timings_from_pts(ic);
+ } else if (av_has_timings(ic)) {
+ /* at least one components has timings - we use them for all
+ the components */
+ fill_all_stream_timings(ic);
+ } else {
+ /* less precise: use bit rate info */
+ av_estimate_timings_from_bit_rate(ic);
+ }
+ av_update_stream_timings(ic);
+
+#if 0
+ {
+ int i;
+ AVStream *st;
+ for(i = 0;i < ic->nb_streams; i++) {
+ st = ic->streams[i];
+ printf("%d: start_time: %0.3f duration: %0.3f\n",
+ i, (double)st->start_time / AV_TIME_BASE,
+ (double)st->duration / AV_TIME_BASE);
+ }
+ printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
+ (double)ic->start_time / AV_TIME_BASE,
+ (double)ic->duration / AV_TIME_BASE,
+ ic->bit_rate / 1000);
+ }
+#endif
+}
+
+static int has_codec_parameters(AVCodecContext *enc)
+{
+ int val;
+ switch(enc->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ val = enc->sample_rate;
+ break;
+ case CODEC_TYPE_VIDEO:
+ val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
+ break;
+ default:
+ val = 1;
+ break;
+ }
+ return (val != 0);
+}
+
+static int try_decode_frame(AVStream *st, const uint8_t *data, int size)
+{
+ int16_t *samples;
+ AVCodec *codec;
+ int got_picture, ret=0;
+ AVFrame picture;
+
+ if(!st->codec->codec){
+ codec = avcodec_find_decoder(st->codec->codec_id);
+ if (!codec)
+ return -1;
+ ret = avcodec_open(st->codec, codec);
+ if (ret < 0)
+ return ret;
+ }
+
+ if(!has_codec_parameters(st->codec)){
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_VIDEO:
+ ret = avcodec_decode_video(st->codec, &picture,
+ &got_picture, (uint8_t *)data, size);
+ break;
+ case CODEC_TYPE_AUDIO:
+ samples = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
+ if (!samples)
+ goto fail;
+ ret = avcodec_decode_audio(st->codec, samples,
+ &got_picture, (uint8_t *)data, size);
+ av_free(samples);
+ break;
+ default:
+ break;
+ }
+ }
+ fail:
+ return ret;
+}
+
+/* absolute maximum size we read until we abort */
+#define MAX_READ_SIZE 5000000
+
+/* maximum duration until we stop analysing the stream */
+#define MAX_STREAM_DURATION ((int)(AV_TIME_BASE * 3.0))
+
+/**
+ * Read the beginning of a media file to get stream information. This
+ * is useful for file formats with no headers such as MPEG. This
+ * function also compute the real frame rate in case of mpeg2 repeat
+ * frame mode.
+ *
+ * @param ic media file handle
+ * @return >=0 if OK. AVERROR_xxx if error.
+ * @todo let user decide somehow what information is needed so we dont waste time geting stuff the user doesnt need
+ */
+int av_find_stream_info(AVFormatContext *ic)
+{
+ int i, count, ret, read_size, j;
+ AVStream *st;
+ AVPacket pkt1, *pkt;
+ AVPacketList *pktl=NULL, **ppktl;
+ int64_t last_dts[MAX_STREAMS];
+ int64_t duration_sum[MAX_STREAMS];
+ int duration_count[MAX_STREAMS]={0};
+
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO){
+/* if(!st->time_base.num)
+ st->time_base= */
+ if(!st->codec->time_base.num)
+ st->codec->time_base= st->time_base;
+ }
+ //only for the split stuff
+ if (!st->parser) {
+ st->parser = av_parser_init(st->codec->codec_id);
+ if(st->need_parsing == 2 && st->parser){
+ st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
+ }
+ }
+ }
+
+ for(i=0;i<MAX_STREAMS;i++){
+ last_dts[i]= AV_NOPTS_VALUE;
+ duration_sum[i]= INT64_MAX;
+ }
+
+ count = 0;
+ read_size = 0;
+ ppktl = &ic->packet_buffer;
+ for(;;) {
+ /* check if one codec still needs to be handled */
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (!has_codec_parameters(st->codec))
+ break;
+ /* variable fps and no guess at the real fps */
+ if( st->codec->time_base.den >= 101LL*st->codec->time_base.num
+ && duration_count[i]<20 && st->codec->codec_type == CODEC_TYPE_VIDEO)
+ break;
+ if(st->parser && st->parser->parser->split && !st->codec->extradata)
+ break;
+ }
+ if (i == ic->nb_streams) {
+ /* NOTE: if the format has no header, then we need to read
+ some packets to get most of the streams, so we cannot
+ stop here */
+ if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
+ /* if we found the info for all the codecs, we can stop */
+ ret = count;
+ break;
+ }
+ }
+ /* we did not get all the codec info, but we read too much data */
+ if (read_size >= MAX_READ_SIZE) {
+ ret = count;
+ break;
+ }
+
+ /* NOTE: a new stream can be added there if no header in file
+ (AVFMTCTX_NOHEADER) */
+ ret = av_read_frame_internal(ic, &pkt1);
+ if (ret < 0) {
+ /* EOF or error */
+ ret = -1; /* we could not have all the codec parameters before EOF */
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (!has_codec_parameters(st->codec)){
+ char buf[256];
+ avcodec_string(buf, sizeof(buf), st->codec, 0);
+ av_log(ic, AV_LOG_INFO, "Could not find codec parameters (%s)\n", buf);
+ } else {
+ ret = 0;
+ }
+ }
+ break;
+ }
+
+ pktl = av_mallocz(sizeof(AVPacketList));
+ if (!pktl) {
+ ret = AVERROR_NOMEM;
+ break;
+ }
+
+ /* add the packet in the buffered packet list */
+ *ppktl = pktl;
+ ppktl = &pktl->next;
+
+ pkt = &pktl->pkt;
+ *pkt = pkt1;
+
+ /* duplicate the packet */
+ if (av_dup_packet(pkt) < 0) {
+ ret = AVERROR_NOMEM;
+ break;
+ }
+
+ read_size += pkt->size;
+
+ st = ic->streams[pkt->stream_index];
+ st->codec_info_duration += pkt->duration;
+ if (pkt->duration != 0)
+ st->codec_info_nb_frames++;
+
+ {
+ int index= pkt->stream_index;
+ int64_t last= last_dts[index];
+ int64_t duration= pkt->dts - last;
+
+ if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
+ if(duration*duration_count[index]*10/9 < duration_sum[index]){
+ duration_sum[index]= duration;
+ duration_count[index]=1;
+ }else{
+ int factor= av_rescale(2*duration, duration_count[index], duration_sum[index]);
+ if(factor==3)
+ duration_count[index] *= 2;
+ factor= av_rescale(duration, duration_count[index], duration_sum[index]);
+ duration_sum[index] += duration;
+ duration_count[index]+= factor;
+ }
+ if(st->codec_info_nb_frames == 0 && 0)
+ st->codec_info_duration += duration;
+ }
+ last_dts[pkt->stream_index]= pkt->dts;
+ }
+ if(st->parser && st->parser->parser->split && !st->codec->extradata){
+ int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
+ if(i){
+ st->codec->extradata_size= i;
+ st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
+ memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+ }
+ }
+
+ /* if still no information, we try to open the codec and to
+ decompress the frame. We try to avoid that in most cases as
+ it takes longer and uses more memory. For MPEG4, we need to
+ decompress for Quicktime. */
+ if (!has_codec_parameters(st->codec) /*&&
+ (st->codec->codec_id == CODEC_ID_FLV1 ||
+ st->codec->codec_id == CODEC_ID_H264 ||
+ st->codec->codec_id == CODEC_ID_H263 ||
+ st->codec->codec_id == CODEC_ID_H261 ||
+ st->codec->codec_id == CODEC_ID_VORBIS ||
+ st->codec->codec_id == CODEC_ID_MJPEG ||
+ st->codec->codec_id == CODEC_ID_PNG ||
+ st->codec->codec_id == CODEC_ID_PAM ||
+ st->codec->codec_id == CODEC_ID_PGM ||
+ st->codec->codec_id == CODEC_ID_PGMYUV ||
+ st->codec->codec_id == CODEC_ID_PBM ||
+ st->codec->codec_id == CODEC_ID_PPM ||
+ st->codec->codec_id == CODEC_ID_SHORTEN ||
+ (st->codec->codec_id == CODEC_ID_MPEG4 && !st->need_parsing))*/)
+ try_decode_frame(st, pkt->data, pkt->size);
+
+ if (av_rescale_q(st->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= MAX_STREAM_DURATION) {
+ break;
+ }
+ count++;
+ }
+
+ // close codecs which where opened in try_decode_frame()
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if(st->codec->codec)
+ avcodec_close(st->codec);
+ }
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_sample)
+ st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
+
+ if(duration_count[i]
+ && (st->codec->time_base.num*101LL <= st->codec->time_base.den || st->codec->codec_id == CODEC_ID_MPEG2VIDEO) &&
+ //FIXME we should not special case mpeg2, but this needs testing with non mpeg2 ...
+ st->time_base.num*duration_sum[i]/duration_count[i]*101LL > st->time_base.den){
+ int64_t num, den, error, best_error;
+
+ num= st->time_base.den*duration_count[i];
+ den= st->time_base.num*duration_sum[i];
+
+ best_error= INT64_MAX;
+ for(j=1; j<60*12; j++){
+ error= FFABS(1001*12*num - 1001*j*den);
+ if(error < best_error){
+ best_error= error;
+ av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, j, 12, INT_MAX);
+ }
+ }
+ for(j=0; j<3; j++){
+ static const int ticks[]= {24,30,60};
+ error= FFABS(1001*12*num - 1000*12*den * ticks[j]);
+ if(error < best_error){
+ best_error= error;
+ av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, ticks[j]*1000, 1001, INT_MAX);
+ }
+ }
+ }
+
+ if (!st->r_frame_rate.num){
+ if( st->codec->time_base.den * (int64_t)st->time_base.num
+ <= st->codec->time_base.num * (int64_t)st->time_base.den){
+ st->r_frame_rate.num = st->codec->time_base.den;
+ st->r_frame_rate.den = st->codec->time_base.num;
+ }else{
+ st->r_frame_rate.num = st->time_base.den;
+ st->r_frame_rate.den = st->time_base.num;
+ }
+ }
+ }
+ }
+
+ av_estimate_timings(ic);
+#if 0
+ /* correct DTS for b frame streams with no timestamps */
+ for(i=0;i<ic->nb_streams;i++) {
+ st = ic->streams[i];
+ if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
+ if(b-frames){
+ ppktl = &ic->packet_buffer;
+ while(ppkt1){
+ if(ppkt1->stream_index != i)
+ continue;
+ if(ppkt1->pkt->dts < 0)
+ break;
+ if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
+ break;
+ ppkt1->pkt->dts -= delta;
+ ppkt1= ppkt1->next;
+ }
+ if(ppkt1)
+ continue;
+ st->cur_dts -= delta;
+ }
+ }
+ }
+#endif
+ return ret;
+}
+
+/*******************************************************/
+
+/**
+ * start playing a network based stream (e.g. RTSP stream) at the
+ * current position
+ */
+int av_read_play(AVFormatContext *s)
+{
+ if (!s->iformat->read_play)
+ return AVERROR_NOTSUPP;
+ return s->iformat->read_play(s);
+}
+
+/**
+ * Pause a network based stream (e.g. RTSP stream).
+ *
+ * Use av_read_play() to resume it.
+ */
+int av_read_pause(AVFormatContext *s)
+{
+ if (!s->iformat->read_pause)
+ return AVERROR_NOTSUPP;
+ return s->iformat->read_pause(s);
+}
+
+/**
+ * Close a media file (but not its codecs).
+ *
+ * @param s media file handle
+ */
+void av_close_input_file(AVFormatContext *s)
+{
+ int i, must_open_file;
+ AVStream *st;
+
+ /* free previous packet */
+ if (s->cur_st && s->cur_st->parser)
+ av_free_packet(&s->cur_pkt);
+
+ if (s->iformat->read_close)
+ s->iformat->read_close(s);
+ for(i=0;i<s->nb_streams;i++) {
+ /* free all data in a stream component */
+ st = s->streams[i];
+ if (st->parser) {
+ av_parser_close(st->parser);
+ }
+ av_free(st->index_entries);
+ av_free(st->codec->extradata);
+ av_free(st->codec);
+ av_free(st);
+ }
+ flush_packet_queue(s);
+ must_open_file = 1;
+ if (s->iformat->flags & AVFMT_NOFILE) {
+ must_open_file = 0;
+ }
+ if (must_open_file) {
+ url_fclose(&s->pb);
+ }
+ av_freep(&s->priv_data);
+ av_free(s);
+}
+
+/**
+ * Add a new stream to a media file.
+ *
+ * Can only be called in the read_header() function. If the flag
+ * AVFMTCTX_NOHEADER is in the format context, then new streams
+ * can be added in read_packet too.
+ *
+ * @param s media file handle
+ * @param id file format dependent stream id
+ */
+AVStream *av_new_stream(AVFormatContext *s, int id)
+{
+ AVStream *st;
+ int i;
+
+ if (s->nb_streams >= MAX_STREAMS)
+ return NULL;
+
+ st = av_mallocz(sizeof(AVStream));
+ if (!st)
+ return NULL;
+
+ st->codec= avcodec_alloc_context();
+ if (s->iformat) {
+ /* no default bitrate if decoding */
+ st->codec->bit_rate = 0;
+ }
+ st->index = s->nb_streams;
+ st->id = id;
+ st->start_time = AV_NOPTS_VALUE;
+ st->duration = AV_NOPTS_VALUE;
+ st->cur_dts = AV_NOPTS_VALUE;
+
+ /* default pts settings is MPEG like */
+ av_set_pts_info(st, 33, 1, 90000);
+ st->last_IP_pts = AV_NOPTS_VALUE;
+ for(i=0; i<MAX_REORDER_DELAY+1; i++)
+ st->pts_buffer[i]= AV_NOPTS_VALUE;
+
+ s->streams[s->nb_streams++] = st;
+ return st;
+}
+
+/************************************************************/
+/* output media file */
+
+int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
+{
+ int ret;
+
+ if (s->oformat->priv_data_size > 0) {
+ s->priv_data = av_mallocz(s->oformat->priv_data_size);
+ if (!s->priv_data)
+ return AVERROR_NOMEM;
+ } else
+ s->priv_data = NULL;
+
+ if (s->oformat->set_parameters) {
+ ret = s->oformat->set_parameters(s, ap);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * allocate the stream private data and write the stream header to an
+ * output media file
+ *
+ * @param s media file handle
+ * @return 0 if OK. AVERROR_xxx if error.
+ */
+int av_write_header(AVFormatContext *s)
+{
+ int ret, i;
+ AVStream *st;
+
+ // some sanity checks
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+
+ switch (st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ if(st->codec->sample_rate<=0){
+ av_log(s, AV_LOG_ERROR, "sample rate not set\n");
+ return -1;
+ }
+ break;
+ case CODEC_TYPE_VIDEO:
+ if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
+ av_log(s, AV_LOG_ERROR, "time base not set\n");
+ return -1;
+ }
+ if(st->codec->width<=0 || st->codec->height<=0){
+ av_log(s, AV_LOG_ERROR, "dimensions not set\n");
+ return -1;
+ }
+ break;
+ }
+ }
+
+ if(s->oformat->write_header){
+ ret = s->oformat->write_header(s);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* init PTS generation */
+ for(i=0;i<s->nb_streams;i++) {
+ int64_t den = AV_NOPTS_VALUE;
+ st = s->streams[i];
+
+ switch (st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ den = (int64_t)st->time_base.num * st->codec->sample_rate;
+ break;
+ case CODEC_TYPE_VIDEO:
+ den = (int64_t)st->time_base.num * st->codec->time_base.den;
+ break;
+ default:
+ break;
+ }
+ if (den != AV_NOPTS_VALUE) {
+ if (den <= 0)
+ return AVERROR_INVALIDDATA;
+ av_frac_init(&st->pts, 0, 0, den);
+ }
+ }
+ return 0;
+}
+
+//FIXME merge with compute_pkt_fields
+static int compute_pkt_fields2(AVStream *st, AVPacket *pkt){
+ int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
+ int num, den, frame_size, i;
+
+// av_log(st->codec, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
+
+/* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
+ return -1;*/
+
+ /* duration field */
+ if (pkt->duration == 0) {
+ compute_frame_duration(&num, &den, st, NULL, pkt);
+ if (den && num) {
+ pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num);
+ }
+ }
+
+ //XXX/FIXME this is a temporary hack until all encoders output pts
+ if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
+ pkt->dts=
+// pkt->pts= st->cur_dts;
+ pkt->pts= st->pts.val;
+ }
+
+ //calculate dts from pts
+ if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
+ st->pts_buffer[0]= pkt->pts;
+ for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
+ st->pts_buffer[i]= (i-delay-1) * pkt->duration;
+ for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
+ FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
+
+ pkt->dts= st->pts_buffer[0];
+ }
+
+ if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
+ av_log(NULL, AV_LOG_ERROR, "error, non monotone timestamps %"PRId64" >= %"PRId64"\n", st->cur_dts, pkt->dts);
+ return -1;
+ }
+ if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
+ av_log(NULL, AV_LOG_ERROR, "error, pts < dts\n");
+ return -1;
+ }
+
+// av_log(NULL, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
+ st->cur_dts= pkt->dts;
+ st->pts.val= pkt->dts;
+
+ /* update pts */
+ switch (st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ frame_size = get_audio_frame_size(st->codec, pkt->size);
+
+ /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
+ but it would be better if we had the real timestamps from the encoder */
+ if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
+ av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
+ }
+ break;
+ case CODEC_TYPE_VIDEO:
+ av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void truncate_ts(AVStream *st, AVPacket *pkt){
+ int64_t pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
+
+// if(pkt->dts < 0)
+// pkt->dts= 0; //this happens for low_delay=0 and b frames, FIXME, needs further invstigation about what we should do here
+
+ pkt->pts &= pts_mask;
+ pkt->dts &= pts_mask;
+}
+
+/**
+ * Write a packet to an output media file.
+ *
+ * The packet shall contain one audio or video frame.
+ *
+ * @param s media file handle
+ * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
+ * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
+ */
+int av_write_frame(AVFormatContext *s, AVPacket *pkt)
+{
+ int ret;
+
+ ret=compute_pkt_fields2(s->streams[pkt->stream_index], pkt);
+ if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
+ return ret;
+
+ truncate_ts(s->streams[pkt->stream_index], pkt);
+
+ ret= s->oformat->write_packet(s, pkt);
+ if(!ret)
+ ret= url_ferror(&s->pb);
+ return ret;
+}
+
+/**
+ * Interleave a packet per DTS in an output media file.
+ *
+ * Packets with pkt->destruct == av_destruct_packet will be freed inside this function,
+ * so they cannot be used after it, note calling av_free_packet() on them is still safe.
+ *
+ * @param s media file handle
+ * @param out the interleaved packet will be output here
+ * @param in the input packet
+ * @param flush 1 if no further packets are available as input and all
+ * remaining packets should be output
+ * @return 1 if a packet was output, 0 if no packet could be output,
+ * < 0 if an error occured
+ */
+int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
+ AVPacketList *pktl, **next_point, *this_pktl;
+ int stream_count=0;
+ int streams[MAX_STREAMS];
+
+ if(pkt){
+ AVStream *st= s->streams[ pkt->stream_index];
+
+// assert(pkt->destruct != av_destruct_packet); //FIXME
+
+ this_pktl = av_mallocz(sizeof(AVPacketList));
+ this_pktl->pkt= *pkt;
+ if(pkt->destruct == av_destruct_packet)
+ pkt->destruct= NULL; // non shared -> must keep original from being freed
+ else
+ av_dup_packet(&this_pktl->pkt); //shared -> must dup
+
+ next_point = &s->packet_buffer;
+ while(*next_point){
+ AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
+ int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
+ int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
+ if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
+ break;
+ next_point= &(*next_point)->next;
+ }
+ this_pktl->next= *next_point;
+ *next_point= this_pktl;
+ }
+
+ memset(streams, 0, sizeof(streams));
+ pktl= s->packet_buffer;
+ while(pktl){
+//av_log(s, AV_LOG_DEBUG, "show st:%d dts:%"PRId64"\n", pktl->pkt.stream_index, pktl->pkt.dts);
+ if(streams[ pktl->pkt.stream_index ] == 0)
+ stream_count++;
+ streams[ pktl->pkt.stream_index ]++;
+ pktl= pktl->next;
+ }
+
+ if(s->nb_streams == stream_count || (flush && stream_count)){
+ pktl= s->packet_buffer;
+ *out= pktl->pkt;
+
+ s->packet_buffer= pktl->next;
+ av_freep(&pktl);
+ return 1;
+ }else{
+ av_init_packet(out);
+ return 0;
+ }
+}
+
+/**
+ * Interleaves a AVPacket correctly so it can be muxed.
+ * @param out the interleaved packet will be output here
+ * @param in the input packet
+ * @param flush 1 if no further packets are available as input and all
+ * remaining packets should be output
+ * @return 1 if a packet was output, 0 if no packet could be output,
+ * < 0 if an error occured
+ */
+static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
+ if(s->oformat->interleave_packet)
+ return s->oformat->interleave_packet(s, out, in, flush);
+ else
+ return av_interleave_packet_per_dts(s, out, in, flush);
+}
+
+/**
+ * Writes a packet to an output media file ensuring correct interleaving.
+ *
+ * The packet must contain one audio or video frame.
+ * If the packets are already correctly interleaved the application should
+ * call av_write_frame() instead as its slightly faster, its also important
+ * to keep in mind that completly non interleaved input will need huge amounts
+ * of memory to interleave with this, so its prefereable to interleave at the
+ * demuxer level
+ *
+ * @param s media file handle
+ * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
+ * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
+ */
+int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
+ AVStream *st= s->streams[ pkt->stream_index];
+
+ //FIXME/XXX/HACK drop zero sized packets
+ if(st->codec->codec_type == CODEC_TYPE_AUDIO && pkt->size==0)
+ return 0;
+
+//av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
+ if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
+ return -1;
+
+ if(pkt->dts == AV_NOPTS_VALUE)
+ return -1;
+
+ for(;;){
+ AVPacket opkt;
+ int ret= av_interleave_packet(s, &opkt, pkt, 0);
+ if(ret<=0) //FIXME cleanup needed for ret<0 ?
+ return ret;
+
+ truncate_ts(s->streams[opkt.stream_index], &opkt);
+ ret= s->oformat->write_packet(s, &opkt);
+
+ av_free_packet(&opkt);
+ pkt= NULL;
+
+ if(ret<0)
+ return ret;
+ if(url_ferror(&s->pb))
+ return url_ferror(&s->pb);
+ }
+}
+
+/**
+ * @brief Write the stream trailer to an output media file and
+ * free the file private data.
+ *
+ * @param s media file handle
+ * @return 0 if OK. AVERROR_xxx if error.
+ */
+int av_write_trailer(AVFormatContext *s)
+{
+ int ret, i;
+
+ for(;;){
+ AVPacket pkt;
+ ret= av_interleave_packet(s, &pkt, NULL, 1);
+ if(ret<0) //FIXME cleanup needed for ret<0 ?
+ goto fail;
+ if(!ret)
+ break;
+
+ truncate_ts(s->streams[pkt.stream_index], &pkt);
+ ret= s->oformat->write_packet(s, &pkt);
+
+ av_free_packet(&pkt);
+
+ if(ret<0)
+ goto fail;
+ if(url_ferror(&s->pb))
+ goto fail;
+ }
+
+ if(s->oformat->write_trailer)
+ ret = s->oformat->write_trailer(s);
+fail:
+ if(ret == 0)
+ ret=url_ferror(&s->pb);
+ for(i=0;i<s->nb_streams;i++)
+ av_freep(&s->streams[i]->priv_data);
+ av_freep(&s->priv_data);
+ return ret;
+}
+
+/* "user interface" functions */
+
+void dump_format(AVFormatContext *ic,
+ int index,
+ const char *url,
+ int is_output)
+{
+ int i, flags;
+ char buf[256];
+
+ av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
+ is_output ? "Output" : "Input",
+ index,
+ is_output ? ic->oformat->name : ic->iformat->name,
+ is_output ? "to" : "from", url);
+ if (!is_output) {
+ av_log(NULL, AV_LOG_INFO, " Duration: ");
+ if (ic->duration != AV_NOPTS_VALUE) {
+ int hours, mins, secs, us;
+ secs = ic->duration / AV_TIME_BASE;
+ us = ic->duration % AV_TIME_BASE;
+ mins = secs / 60;
+ secs %= 60;
+ hours = mins / 60;
+ mins %= 60;
+ av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%01d", hours, mins, secs,
+ (10 * us) / AV_TIME_BASE);
+ } else {
+ av_log(NULL, AV_LOG_INFO, "N/A");
+ }
+ if (ic->start_time != AV_NOPTS_VALUE) {
+ int secs, us;
+ av_log(NULL, AV_LOG_INFO, ", start: ");
+ secs = ic->start_time / AV_TIME_BASE;
+ us = ic->start_time % AV_TIME_BASE;
+ av_log(NULL, AV_LOG_INFO, "%d.%06d",
+ secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
+ }
+ av_log(NULL, AV_LOG_INFO, ", bitrate: ");
+ if (ic->bit_rate) {
+ av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
+ } else {
+ av_log(NULL, AV_LOG_INFO, "N/A");
+ }
+ av_log(NULL, AV_LOG_INFO, "\n");
+ }
+ for(i=0;i<ic->nb_streams;i++) {
+ AVStream *st = ic->streams[i];
+ int g= ff_gcd(st->time_base.num, st->time_base.den);
+ avcodec_string(buf, sizeof(buf), st->codec, is_output);
+ av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
+ /* the pid is an important information, so we display it */
+ /* XXX: add a generic system */
+ if (is_output)
+ flags = ic->oformat->flags;
+ else
+ flags = ic->iformat->flags;
+ if (flags & AVFMT_SHOW_IDS) {
+ av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
+ }
+ if (strlen(st->language) > 0) {
+ av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
+ }
+ av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
+ av_log(NULL, AV_LOG_INFO, ": %s", buf);
+ if(st->codec->codec_type == CODEC_TYPE_VIDEO){
+ if(st->r_frame_rate.den && st->r_frame_rate.num)
+ av_log(NULL, AV_LOG_INFO, ", %5.2f fps(r)", av_q2d(st->r_frame_rate));
+/* else if(st->time_base.den && st->time_base.num)
+ av_log(NULL, AV_LOG_INFO, ", %5.2f fps(m)", 1/av_q2d(st->time_base));*/
+ else
+ av_log(NULL, AV_LOG_INFO, ", %5.2f fps(c)", 1/av_q2d(st->codec->time_base));
+ }
+ av_log(NULL, AV_LOG_INFO, "\n");
+ }
+}
+
+typedef struct {
+ const char *abv;
+ int width, height;
+ int frame_rate, frame_rate_base;
+} AbvEntry;
+
+static AbvEntry frame_abvs[] = {
+ { "ntsc", 720, 480, 30000, 1001 },
+ { "pal", 720, 576, 25, 1 },
+ { "qntsc", 352, 240, 30000, 1001 }, /* VCD compliant ntsc */
+ { "qpal", 352, 288, 25, 1 }, /* VCD compliant pal */
+ { "sntsc", 640, 480, 30000, 1001 }, /* square pixel ntsc */
+ { "spal", 768, 576, 25, 1 }, /* square pixel pal */
+ { "film", 352, 240, 24, 1 },
+ { "ntsc-film", 352, 240, 24000, 1001 },
+ { "sqcif", 128, 96, 0, 0 },
+ { "qcif", 176, 144, 0, 0 },
+ { "cif", 352, 288, 0, 0 },
+ { "4cif", 704, 576, 0, 0 },
+};
+
+/**
+ * parses width and height out of string str.
+ */
+int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
+{
+ int i;
+ int n = sizeof(frame_abvs) / sizeof(AbvEntry);
+ const char *p;
+ int frame_width = 0, frame_height = 0;
+
+ for(i=0;i<n;i++) {
+ if (!strcmp(frame_abvs[i].abv, str)) {
+ frame_width = frame_abvs[i].width;
+ frame_height = frame_abvs[i].height;
+ break;
+ }
+ }
+ if (i == n) {
+ p = str;
+ frame_width = strtol(p, (char **)&p, 10);
+ if (*p)
+ p++;
+ frame_height = strtol(p, (char **)&p, 10);
+ }
+ if (frame_width <= 0 || frame_height <= 0)
+ return -1;
+ *width_ptr = frame_width;
+ *height_ptr = frame_height;
+ return 0;
+}
+
+/**
+ * Converts frame rate from string to a fraction.
+ *
+ * First we try to get an exact integer or fractional frame rate.
+ * If this fails we convert the frame rate to a double and return
+ * an approximate fraction using the DEFAULT_FRAME_RATE_BASE.
+ */
+int parse_frame_rate(int *frame_rate, int *frame_rate_base, const char *arg)
+{
+ int i;
+ char* cp;
+
+ /* First, we check our abbreviation table */
+ for (i = 0; i < sizeof(frame_abvs)/sizeof(*frame_abvs); ++i)
+ if (!strcmp(frame_abvs[i].abv, arg)) {
+ *frame_rate = frame_abvs[i].frame_rate;
+ *frame_rate_base = frame_abvs[i].frame_rate_base;
+ return 0;
+ }
+
+ /* Then, we try to parse it as fraction */
+ cp = strchr(arg, '/');
+ if (!cp)
+ cp = strchr(arg, ':');
+ if (cp) {
+ char* cpp;
+ *frame_rate = strtol(arg, &cpp, 10);
+ if (cpp != arg || cpp == cp)
+ *frame_rate_base = strtol(cp+1, &cpp, 10);
+ else
+ *frame_rate = 0;
+ }
+ else {
+ /* Finally we give up and parse it as double */
+ AVRational time_base = av_d2q(strtod(arg, 0), DEFAULT_FRAME_RATE_BASE);
+ *frame_rate_base = time_base.den;
+ *frame_rate = time_base.num;
+ }
+ if (!*frame_rate || !*frame_rate_base)
+ return -1;
+ else
+ return 0;
+}
+
+/**
+ * Converts date string to number of seconds since Jan 1st, 1970.
+ *
+ * @code
+ * Syntax:
+ * - If not a duration:
+ * [{YYYY-MM-DD|YYYYMMDD}]{T| }{HH[:MM[:SS[.m...]]][Z]|HH[MM[SS[.m...]]][Z]}
+ * Time is localtime unless Z is suffixed to the end. In this case GMT
+ * Return the date in micro seconds since 1970
+ *
+ * - If a duration:
+ * HH[:MM[:SS[.m...]]]
+ * S+[.m...]
+ * @endcode
+ */
+#ifndef CONFIG_WINCE
+int64_t parse_date(const char *datestr, int duration)
+{
+ const char *p;
+ int64_t t;
+ struct tm dt;
+ int i;
+ static const char *date_fmt[] = {
+ "%Y-%m-%d",
+ "%Y%m%d",
+ };
+ static const char *time_fmt[] = {
+ "%H:%M:%S",
+ "%H%M%S",
+ };
+ const char *q;
+ int is_utc, len;
+ char lastch;
+ int negative = 0;
+
+#undef time
+ time_t now = time(0);
+
+ len = strlen(datestr);
+ if (len > 0)
+ lastch = datestr[len - 1];
+ else
+ lastch = '\0';
+ is_utc = (lastch == 'z' || lastch == 'Z');
+
+ memset(&dt, 0, sizeof(dt));
+
+ p = datestr;
+ q = NULL;
+ if (!duration) {
+ for (i = 0; i < sizeof(date_fmt) / sizeof(date_fmt[0]); i++) {
+ q = small_strptime(p, date_fmt[i], &dt);
+ if (q) {
+ break;
+ }
+ }
+
+ if (!q) {
+ if (is_utc) {
+ dt = *gmtime(&now);
+ } else {
+ dt = *localtime(&now);
+ }
+ dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
+ } else {
+ p = q;
+ }
+
+ if (*p == 'T' || *p == 't' || *p == ' ')
+ p++;
+
+ for (i = 0; i < sizeof(time_fmt) / sizeof(time_fmt[0]); i++) {
+ q = small_strptime(p, time_fmt[i], &dt);
+ if (q) {
+ break;
+ }
+ }
+ } else {
+ if (p[0] == '-') {
+ negative = 1;
+ ++p;
+ }
+ q = small_strptime(p, time_fmt[0], &dt);
+ if (!q) {
+ dt.tm_sec = strtol(p, (char **)&q, 10);
+ dt.tm_min = 0;
+ dt.tm_hour = 0;
+ }
+ }
+
+ /* Now we have all the fields that we can get */
+ if (!q) {
+ if (duration)
+ return 0;
+ else
+ return now * int64_t_C(1000000);
+ }
+
+ if (duration) {
+ t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
+ } else {
+ dt.tm_isdst = -1; /* unknown */
+ if (is_utc) {
+ t = mktimegm(&dt);
+ } else {
+ t = mktime(&dt);
+ }
+ }
+
+ t *= 1000000;
+
+ if (*q == '.') {
+ int val, n;
+ q++;
+ for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
+ if (!isdigit(*q))
+ break;
+ val += n * (*q - '0');
+ }
+ t += val;
+ }
+ return negative ? -t : t;
+}
+#endif /* CONFIG_WINCE */
+
+/**
+ * Attempts to find a specific tag in a URL.
+ *
+ * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
+ * Return 1 if found.
+ */
+int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
+{
+ const char *p;
+ char tag[128], *q;
+
+ p = info;
+ if (*p == '?')
+ p++;
+ for(;;) {
+ q = tag;
+ while (*p != '\0' && *p != '=' && *p != '&') {
+ if ((q - tag) < sizeof(tag) - 1)
+ *q++ = *p;
+ p++;
+ }
+ *q = '\0';
+ q = arg;
+ if (*p == '=') {
+ p++;
+ while (*p != '&' && *p != '\0') {
+ if ((q - arg) < arg_size - 1) {
+ if (*p == '+')
+ *q++ = ' ';
+ else
+ *q++ = *p;
+ }
+ p++;
+ }
+ *q = '\0';
+ }
+ if (!strcmp(tag, tag1))
+ return 1;
+ if (*p != '&')
+ break;
+ p++;
+ }
+ return 0;
+}
+
+/**
+ * Returns in 'buf' the path with '%d' replaced by number.
+
+ * Also handles the '%0nd' format where 'n' is the total number
+ * of digits and '%%'.
+ *
+ * @param buf destination buffer
+ * @param buf_size destination buffer size
+ * @param path numbered sequence string
+ * @number frame number
+ * @return 0 if OK, -1 if format error.
+ */
+int av_get_frame_filename(char *buf, int buf_size,
+ const char *path, int number)
+{
+ const char *p;
+ char *q, buf1[20], c;
+ int nd, len, percentd_found;
+
+ q = buf;
+ p = path;
+ percentd_found = 0;
+ for(;;) {
+ c = *p++;
+ if (c == '\0')
+ break;
+ if (c == '%') {
+ do {
+ nd = 0;
+ while (isdigit(*p)) {
+ nd = nd * 10 + *p++ - '0';
+ }
+ c = *p++;
+ } while (isdigit(c));
+
+ switch(c) {
+ case '%':
+ goto addchar;
+ case 'd':
+ if (percentd_found)
+ goto fail;
+ percentd_found = 1;
+ snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
+ len = strlen(buf1);
+ if ((q - buf + len) > buf_size - 1)
+ goto fail;
+ memcpy(q, buf1, len);
+ q += len;
+ break;
+ default:
+ goto fail;
+ }
+ } else {
+ addchar:
+ if ((q - buf) < buf_size - 1)
+ *q++ = c;
+ }
+ }
+ if (!percentd_found)
+ goto fail;
+ *q = '\0';
+ return 0;
+ fail:
+ *q = '\0';
+ return -1;
+}
+
+/**
+ * Print nice hexa dump of a buffer
+ * @param f stream for output
+ * @param buf buffer
+ * @param size buffer size
+ */
+void av_hex_dump(FILE *f, uint8_t *buf, int size)
+{
+ int len, i, j, c;
+
+ for(i=0;i<size;i+=16) {
+ len = size - i;
+ if (len > 16)
+ len = 16;
+ fprintf(f, "%08x ", i);
+ for(j=0;j<16;j++) {
+ if (j < len)
+ fprintf(f, " %02x", buf[i+j]);
+ else
+ fprintf(f, " ");
+ }
+ fprintf(f, " ");
+ for(j=0;j<len;j++) {
+ c = buf[i+j];
+ if (c < ' ' || c > '~')
+ c = '.';
+ fprintf(f, "%c", c);
+ }
+ fprintf(f, "\n");
+ }
+}
+
+/**
+ * Print on 'f' a nice dump of a packet
+ * @param f stream for output
+ * @param pkt packet to dump
+ * @param dump_payload true if the payload must be displayed too
+ */
+ //FIXME needs to know the time_base
+void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
+{
+ fprintf(f, "stream #%d:\n", pkt->stream_index);
+ fprintf(f, " keyframe=%d\n", ((pkt->flags & PKT_FLAG_KEY) != 0));
+ fprintf(f, " duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
+ /* DTS is _always_ valid after av_read_frame() */
+ fprintf(f, " dts=");
+ if (pkt->dts == AV_NOPTS_VALUE)
+ fprintf(f, "N/A");
+ else
+ fprintf(f, "%0.3f", (double)pkt->dts / AV_TIME_BASE);
+ /* PTS may be not known if B frames are present */
+ fprintf(f, " pts=");
+ if (pkt->pts == AV_NOPTS_VALUE)
+ fprintf(f, "N/A");
+ else
+ fprintf(f, "%0.3f", (double)pkt->pts / AV_TIME_BASE);
+ fprintf(f, "\n");
+ fprintf(f, " size=%d\n", pkt->size);
+ if (dump_payload)
+ av_hex_dump(f, pkt->data, pkt->size);
+}
+
+void url_split(char *proto, int proto_size,
+ char *authorization, int authorization_size,
+ char *hostname, int hostname_size,
+ int *port_ptr,
+ char *path, int path_size,
+ const char *url)
+{
+ const char *p;
+ char *q;
+ int port;
+
+ port = -1;
+
+ p = url;
+ q = proto;
+ while (*p != ':' && *p != '\0') {
+ if ((q - proto) < proto_size - 1)
+ *q++ = *p;
+ p++;
+ }
+ if (proto_size > 0)
+ *q = '\0';
+ if (authorization_size > 0)
+ authorization[0] = '\0';
+ if (*p == '\0') {
+ if (proto_size > 0)
+ proto[0] = '\0';
+ if (hostname_size > 0)
+ hostname[0] = '\0';
+ p = url;
+ } else {
+ char *at,*slash; // PETR: position of '@' character and '/' character
+
+ p++;
+ if (*p == '/')
+ p++;
+ if (*p == '/')
+ p++;
+ at = strchr(p,'@'); // PETR: get the position of '@'
+ slash = strchr(p,'/'); // PETR: get position of '/' - end of hostname
+ if (at && slash && at > slash) at = NULL; // PETR: not interested in '@' behind '/'
+
+ q = at ? authorization : hostname; // PETR: if '@' exists starting with auth.
+
+ while ((at || *p != ':') && *p != '/' && *p != '?' && *p != '\0') { // PETR:
+ if (*p == '@') { // PETR: passed '@'
+ if (authorization_size > 0)
+ *q = '\0';
+ q = hostname;
+ at = NULL;
+ } else if (!at) { // PETR: hostname
+ if ((q - hostname) < hostname_size - 1)
+ *q++ = *p;
+ } else {
+ if ((q - authorization) < authorization_size - 1)
+ *q++ = *p;
+ }
+ p++;
+ }
+ if (hostname_size > 0)
+ *q = '\0';
+ if (*p == ':') {
+ p++;
+ port = strtoul(p, (char **)&p, 10);
+ }
+ }
+ if (port_ptr)
+ *port_ptr = port;
+ pstrcpy(path, path_size, p);
+}
+
+/**
+ * Set the pts for a given stream.
+ *
+ * @param s stream
+ * @param pts_wrap_bits number of bits effectively used by the pts
+ * (used for wrap control, 33 is the value for MPEG)
+ * @param pts_num numerator to convert to seconds (MPEG: 1)
+ * @param pts_den denominator to convert to seconds (MPEG: 90000)
+ */
+void av_set_pts_info(AVStream *s, int pts_wrap_bits,
+ int pts_num, int pts_den)
+{
+ s->pts_wrap_bits = pts_wrap_bits;
+ s->time_base.num = pts_num;
+ s->time_base.den = pts_den;
+}
+
+/* fraction handling */
+
+/**
+ * f = val + (num / den) + 0.5.
+ *
+ * 'num' is normalized so that it is such as 0 <= num < den.
+ *
+ * @param f fractional number
+ * @param val integer value
+ * @param num must be >= 0
+ * @param den must be >= 1
+ */
+static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
+{
+ num += (den >> 1);
+ if (num >= den) {
+ val += num / den;
+ num = num % den;
+ }
+ f->val = val;
+ f->num = num;
+ f->den = den;
+}
+
+/**
+ * Set f to (val + 0.5).
+ */
+static void av_frac_set(AVFrac *f, int64_t val)
+{
+ f->val = val;
+ f->num = f->den >> 1;
+}
+
+/**
+ * Fractionnal addition to f: f = f + (incr / f->den).
+ *
+ * @param f fractional number
+ * @param incr increment, can be positive or negative
+ */
+static void av_frac_add(AVFrac *f, int64_t incr)
+{
+ int64_t num, den;
+
+ num = f->num + incr;
+ den = f->den;
+ if (num < 0) {
+ f->val += num / den;
+ num = num % den;
+ if (num < 0) {
+ num += den;
+ f->val--;
+ }
+ } else if (num >= den) {
+ f->val += num / den;
+ num = num % den;
+ }
+ f->num = num;
+}
diff --git a/contrib/ffmpeg/libavformat/v4l2.c b/contrib/ffmpeg/libavformat/v4l2.c
new file mode 100644
index 000000000..00adccaa8
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/v4l2.c
@@ -0,0 +1,541 @@
+/*
+ * Video4Linux2 grab interface
+ * Copyright (c) 2000,2001 Fabrice Bellard.
+ * Copyright (c) 2006 Luca Abeni.
+ *
+ * Part of this file is based on the V4L2 video capture example
+ * (http://v4l2spec.bytesex.org/v4l2spec/capture.c)
+ *
+ * Thanks to Michael Niedermayer for providing the mapping between
+ * V4L2_PIX_FMT_* and PIX_FMT_*
+ *
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <asm/types.h>
+#include <linux/videodev2.h>
+#include <time.h>
+
+static const int desired_video_buffers = 256;
+
+enum io_method {
+ io_read,
+ io_mmap,
+ io_userptr
+};
+
+struct video_data {
+ int fd;
+ int frame_format; /* V4L2_PIX_FMT_* */
+ enum io_method io_method;
+ int width, height;
+ int frame_rate;
+ int frame_rate_base;
+ int frame_size;
+ int top_field_first;
+
+ int buffers;
+ void **buf_start;
+ unsigned int *buf_len;
+};
+
+struct fmt_map {
+ enum PixelFormat ff_fmt;
+ int32_t v4l2_fmt;
+};
+
+static struct fmt_map fmt_conversion_table[] = {
+ {
+ .ff_fmt = PIX_FMT_YUV420P,
+ .v4l2_fmt = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .ff_fmt = PIX_FMT_YUV422P,
+ .v4l2_fmt = V4L2_PIX_FMT_YUV422P,
+ },
+ {
+ .ff_fmt = PIX_FMT_YUV422,
+ .v4l2_fmt = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .ff_fmt = PIX_FMT_UYVY422,
+ .v4l2_fmt = V4L2_PIX_FMT_UYVY,
+ },
+ {
+ .ff_fmt = PIX_FMT_YUV411P,
+ .v4l2_fmt = V4L2_PIX_FMT_YUV411P,
+ },
+ {
+ .ff_fmt = PIX_FMT_YUV410P,
+ .v4l2_fmt = V4L2_PIX_FMT_YUV410,
+ },
+ {
+ .ff_fmt = PIX_FMT_BGR24,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR24,
+ },
+ {
+ .ff_fmt = PIX_FMT_RGB24,
+ .v4l2_fmt = V4L2_PIX_FMT_RGB24,
+ },
+ /*
+ {
+ .ff_fmt = PIX_FMT_RGBA32,
+ .v4l2_fmt = V4L2_PIX_FMT_BGR32,
+ },
+ */
+ {
+ .ff_fmt = PIX_FMT_GRAY8,
+ .v4l2_fmt = V4L2_PIX_FMT_GREY,
+ },
+};
+
+static int device_open(const char *devname, uint32_t *capabilities)
+{
+ struct v4l2_capability cap;
+ int fd;
+ int res;
+
+ fd = open(devname, O_RDWR /*| O_NONBLOCK*/, 0);
+ if (fd < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open video device %s : %s\n",
+ devname, strerror(errno));
+
+ return -1;
+ }
+
+ res = ioctl(fd, VIDIOC_QUERYCAP, &cap);
+ // ENOIOCTLCMD definition only availble on __KERNEL__
+ if (res < 0 && errno == 515)
+ {
+ av_log(NULL, AV_LOG_ERROR, "QUERYCAP not implemented, probably V4L device but not supporting V4L2\n");
+ close(fd);
+
+ return -1;
+ }
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYCAP): %s\n",
+ strerror(errno));
+ close(fd);
+
+ return -1;
+ }
+ if ((cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) {
+ av_log(NULL, AV_LOG_ERROR, "Not a video capture device\n");
+ close(fd);
+
+ return -1;
+ }
+ *capabilities = cap.capabilities;
+
+ return fd;
+}
+
+static int device_init(int fd, int *width, int *height, int pix_fmt)
+{
+ struct v4l2_format fmt;
+ int res;
+
+ memset(&fmt, 0, sizeof(struct v4l2_format));
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ fmt.fmt.pix.width = *width;
+ fmt.fmt.pix.height = *height;
+ fmt.fmt.pix.pixelformat = pix_fmt;
+ fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
+ res = ioctl(fd, VIDIOC_S_FMT, &fmt);
+ if ((*width != fmt.fmt.pix.width) || (*height != fmt.fmt.pix.height)) {
+ av_log(NULL, AV_LOG_INFO, "The V4L2 driver changed the video from %dx%d to %dx%d\n", *width, *height, fmt.fmt.pix.width, fmt.fmt.pix.height);
+ *width = fmt.fmt.pix.width;
+ *height = fmt.fmt.pix.height;
+ }
+
+ return res;
+}
+
+static int first_field(int fd)
+{
+ int res;
+ v4l2_std_id std;
+
+ res = ioctl(fd, VIDIOC_G_STD, &std);
+ if (res < 0) {
+ return 0;
+ }
+ if (std & V4L2_STD_NTSC) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static uint32_t fmt_ff2v4l(enum PixelFormat pix_fmt)
+{
+ int i;
+
+ for (i = 0; i < sizeof(fmt_conversion_table) / sizeof(struct fmt_map); i++) {
+ if (fmt_conversion_table[i].ff_fmt == pix_fmt) {
+ return fmt_conversion_table[i].v4l2_fmt;
+ }
+ }
+
+ return 0;
+}
+
+static enum PixelFormat fmt_v4l2ff(uint32_t pix_fmt)
+{
+ int i;
+
+ for (i = 0; i < sizeof(fmt_conversion_table) / sizeof(struct fmt_map); i++) {
+ if (fmt_conversion_table[i].v4l2_fmt == pix_fmt) {
+ return fmt_conversion_table[i].ff_fmt;
+ }
+ }
+
+ return -1;
+}
+
+static int mmap_init(struct video_data *s)
+{
+ struct v4l2_requestbuffers req;
+ int i, res;
+
+ memset(&req, 0, sizeof(struct v4l2_requestbuffers));
+ req.count = desired_video_buffers;
+ req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ req.memory = V4L2_MEMORY_MMAP;
+ res = ioctl (s->fd, VIDIOC_REQBUFS, &req);
+ if (res < 0) {
+ if (errno == EINVAL) {
+ av_log(NULL, AV_LOG_ERROR, "Device does not support mmap\n");
+ } else {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_REQBUFS)\n");
+ }
+
+ return -1;
+ }
+
+ if (req.count < 2) {
+ av_log(NULL, AV_LOG_ERROR, "Insufficient buffer memory\n");
+
+ return -1;
+ }
+ s->buffers = req.count;
+ s->buf_start = av_malloc(sizeof(void *) * s->buffers);
+ if (s->buf_start == NULL) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot allocate buffer pointers\n");
+
+ return -1;
+ }
+ s->buf_len = av_malloc(sizeof(unsigned int) * s->buffers);
+ if (s->buf_len == NULL) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot allocate buffer sizes\n");
+ av_free(s->buf_start);
+
+ return -1;
+ }
+
+ for (i = 0; i < req.count; i++) {
+ struct v4l2_buffer buf;
+
+ memset(&buf, 0, sizeof(struct v4l2_buffer));
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = i;
+ res = ioctl (s->fd, VIDIOC_QUERYBUF, &buf);
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QUERYBUF)\n");
+
+ return -1;
+ }
+
+ s->buf_len[i] = buf.length;
+ if (s->buf_len[i] < s->frame_size) {
+ av_log(NULL, AV_LOG_ERROR, "Buffer len [%d] = %d != %d\n", i, s->buf_len[i], s->frame_size);
+
+ return -1;
+ }
+ s->buf_start[i] = mmap (NULL, buf.length,
+ PROT_READ | PROT_WRITE, MAP_SHARED, s->fd, buf.m.offset);
+ if (s->buf_start[i] == MAP_FAILED) {
+ av_log(NULL, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int read_init(struct video_data *s)
+{
+ return -1;
+}
+
+static int mmap_read_frame(struct video_data *s, void *frame, int64_t *ts)
+{
+ struct v4l2_buffer buf;
+ int res;
+
+ memset(&buf, 0, sizeof(struct v4l2_buffer));
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+
+ /* FIXME: Some special treatment might be needed in case of loss of signal... */
+ while ((res = ioctl(s->fd, VIDIOC_DQBUF, &buf)) < 0 &&
+ ((errno == EAGAIN) || (errno == EINTR)));
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_DQBUF): %s\n", strerror(errno));
+
+ return -1;
+ }
+ assert (buf.index < s->buffers);
+ if (buf.bytesused != s->frame_size) {
+ av_log(NULL, AV_LOG_ERROR, "The v4l2 frame is %d bytes, but %d bytes are expected\n", buf.bytesused, s->frame_size);
+
+ return -1;
+ }
+
+ /* Image is at s->buff_start[buf.index] */
+ memcpy(frame, s->buf_start[buf.index], buf.bytesused);
+ *ts = buf.timestamp.tv_sec * int64_t_C(1000000) + buf.timestamp.tv_usec;
+
+ res = ioctl (s->fd, VIDIOC_QBUF, &buf);
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF)\n");
+
+ return -1;
+ }
+
+ return s->buf_len[buf.index];
+}
+
+static int read_frame(struct video_data *s, void *frame, int64_t *ts)
+{
+ return -1;
+}
+
+static int mmap_start(struct video_data *s)
+{
+ enum v4l2_buf_type type;
+ int i, res;
+
+ for (i = 0; i < s->buffers; i++) {
+ struct v4l2_buffer buf;
+
+ memset(&buf, 0, sizeof(struct v4l2_buffer));
+ buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.index = i;
+
+ res = ioctl (s->fd, VIDIOC_QBUF, &buf);
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_QBUF): %s\n", strerror(errno));
+
+ return -1;
+ }
+ }
+
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ res = ioctl (s->fd, VIDIOC_STREAMON, &type);
+ if (res < 0) {
+ av_log(NULL, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", strerror(errno));
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mmap_close(struct video_data *s)
+{
+ enum v4l2_buf_type type;
+ int i;
+
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ /* We do not check for the result, because we could
+ * not do anything about it anyway...
+ */
+ ioctl(s->fd, VIDIOC_STREAMOFF, &type);
+ for (i = 0; i < s->buffers; i++) {
+ munmap(s->buf_start[i], s->buf_len[i]);
+ }
+ av_free(s->buf_start);
+ av_free(s->buf_len);
+}
+
+static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap)
+{
+ struct video_data *s = s1->priv_data;
+ AVStream *st;
+ int width, height;
+ int res, frame_rate, frame_rate_base;
+ uint32_t desired_format, capabilities;
+ const char *video_device;
+
+ if (ap->width <= 0 || ap->height <= 0 || ap->time_base.den <= 0) {
+ av_log(s1, AV_LOG_ERROR, "Missing/Wrong parameters\n");
+
+ return -1;
+ }
+
+ width = ap->width;
+ height = ap->height;
+ frame_rate = ap->time_base.den;
+ frame_rate_base = ap->time_base.num;
+
+ if((unsigned)width > 32767 || (unsigned)height > 32767) {
+ av_log(s1, AV_LOG_ERROR, "Wrong size %dx%d\n", width, height);
+
+ return -1;
+ }
+
+ st = av_new_stream(s1, 0);
+ if (!st) {
+ return -ENOMEM;
+ }
+ av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
+
+ s->width = width;
+ s->height = height;
+ s->frame_rate = frame_rate;
+ s->frame_rate_base = frame_rate_base;
+
+ video_device = ap->device;
+ if (!video_device) {
+ video_device = "/dev/video";
+ }
+ capabilities = 0;
+ s->fd = device_open(video_device, &capabilities);
+ if (s->fd < 0) {
+ av_free(st);
+
+ return AVERROR_IO;
+ }
+ av_log(s1, AV_LOG_INFO, "[%d]Capabilities: %x\n", s->fd, capabilities);
+
+ desired_format = fmt_ff2v4l(ap->pix_fmt);
+ if (desired_format == 0 || (device_init(s->fd, &width, &height, desired_format) < 0)) {
+ int i, done;
+
+ done = 0; i = 0;
+ while (!done) {
+ desired_format = fmt_conversion_table[i].v4l2_fmt;
+ if (device_init(s->fd, &width, &height, desired_format) < 0) {
+ desired_format = 0;
+ i++;
+ } else {
+ done = 1;
+ }
+ if (i == sizeof(fmt_conversion_table) / sizeof(struct fmt_map)) {
+ done = 1;
+ }
+ }
+ }
+ if (desired_format == 0) {
+ av_log(s1, AV_LOG_ERROR, "Cannot find a proper format.\n");
+ close(s->fd);
+ av_free(st);
+
+ return AVERROR_IO;
+ }
+ s->frame_format = desired_format;
+
+ st->codec->pix_fmt = fmt_v4l2ff(desired_format);
+ s->frame_size = avpicture_get_size(st->codec->pix_fmt, width, height);
+ if (capabilities & V4L2_CAP_STREAMING) {
+ s->io_method = io_mmap;
+ res = mmap_init(s);
+ if (res == 0) {
+ res = mmap_start(s);
+ }
+ } else {
+ s->io_method = io_read;
+ res = read_init(s);
+ }
+ if (res < 0) {
+ close(s->fd);
+ av_free(st);
+
+ return AVERROR_IO;
+ }
+ s->top_field_first = first_field(s->fd);
+
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->width = width;
+ st->codec->height = height;
+ st->codec->time_base.den = frame_rate;
+ st->codec->time_base.num = frame_rate_base;
+ st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
+
+ return 0;
+}
+
+static int v4l2_read_packet(AVFormatContext *s1, AVPacket *pkt)
+{
+ struct video_data *s = s1->priv_data;
+ int res;
+
+ if (av_new_packet(pkt, s->frame_size) < 0)
+ return AVERROR_IO;
+
+ if (s->io_method == io_mmap) {
+ res = mmap_read_frame(s, pkt->data, &pkt->pts);
+ } else if (s->io_method == io_read) {
+ res = read_frame(s, pkt->data, &pkt->pts);
+ } else {
+ return AVERROR_IO;
+ }
+ if (res < 0) {
+ return AVERROR_IO;
+ }
+
+ if (s1->streams[0]->codec->coded_frame) {
+ s1->streams[0]->codec->coded_frame->interlaced_frame = 1;
+ s1->streams[0]->codec->coded_frame->top_field_first = s->top_field_first;
+ }
+
+ return s->frame_size;
+}
+
+static int v4l2_read_close(AVFormatContext *s1)
+{
+ struct video_data *s = s1->priv_data;
+
+ if (s->io_method == io_mmap) {
+ mmap_close(s);
+ }
+
+ close(s->fd);
+ return 0;
+}
+
+AVInputFormat v4l2_demuxer = {
+ "video4linux2",
+ "video grab",
+ sizeof(struct video_data),
+ NULL,
+ v4l2_read_header,
+ v4l2_read_packet,
+ v4l2_read_close,
+ .flags = AVFMT_NOFILE,
+};
diff --git a/contrib/ffmpeg/libavformat/voc.c b/contrib/ffmpeg/libavformat/voc.c
new file mode 100644
index 000000000..329f07739
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/voc.c
@@ -0,0 +1,36 @@
+/*
+ * Creative Voice File common data.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "voc.h"
+
+const unsigned char voc_magic[21] = "Creative Voice File\x1A";
+
+const CodecTag voc_codec_tags[] = {
+ {CODEC_ID_PCM_U8, 0x00},
+ {CODEC_ID_ADPCM_SBPRO_4, 0x01},
+ {CODEC_ID_ADPCM_SBPRO_3, 0x02},
+ {CODEC_ID_ADPCM_SBPRO_2, 0x03},
+ {CODEC_ID_PCM_S16LE, 0x04},
+ {CODEC_ID_PCM_ALAW, 0x06},
+ {CODEC_ID_PCM_MULAW, 0x07},
+ {CODEC_ID_ADPCM_CT, 0x0200},
+ {0, 0},
+};
diff --git a/contrib/ffmpeg/libavformat/voc.h b/contrib/ffmpeg/libavformat/voc.h
new file mode 100644
index 000000000..16adb0078
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/voc.h
@@ -0,0 +1,51 @@
+/*
+ * Creative Voice File demuxer.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef VOC_H
+#define VOC_H
+
+#include "avformat.h"
+#include "riff.h" /* for CodecTag */
+
+typedef struct voc_dec_context {
+ int remaining_size;
+} voc_dec_context_t;
+
+typedef enum voc_type {
+ VOC_TYPE_EOF = 0x00,
+ VOC_TYPE_VOICE_DATA = 0x01,
+ VOC_TYPE_VOICE_DATA_CONT = 0x02,
+ VOC_TYPE_SILENCE = 0x03,
+ VOC_TYPE_MARKER = 0x04,
+ VOC_TYPE_ASCII = 0x05,
+ VOC_TYPE_REPETITION_START = 0x06,
+ VOC_TYPE_REPETITION_END = 0x07,
+ VOC_TYPE_EXTENDED = 0x08,
+ VOC_TYPE_NEW_VOICE_DATA = 0x09,
+} voc_type_t;
+
+extern const unsigned char voc_magic[21];
+extern const CodecTag voc_codec_tags[];
+
+int voc_get_packet(AVFormatContext *s, AVPacket *pkt,
+ AVStream *st, int max_size);
+
+#endif
diff --git a/contrib/ffmpeg/libavformat/vocdec.c b/contrib/ffmpeg/libavformat/vocdec.c
new file mode 100644
index 000000000..6a7869227
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/vocdec.c
@@ -0,0 +1,155 @@
+/*
+ * Creative Voice File demuxer.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "voc.h"
+
+
+static const int voc_max_pkt_size = 2048;
+
+
+static int voc_probe(AVProbeData *p)
+{
+ int version, check;
+
+ if (p->buf_size < 26)
+ return 0;
+ if (memcmp(p->buf, voc_magic, sizeof(voc_magic) - 1))
+ return 0;
+ version = p->buf[22] | (p->buf[23] << 8);
+ check = p->buf[24] | (p->buf[25] << 8);
+ if (~version + 0x1234 != check)
+ return 10;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int voc_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ voc_dec_context_t *voc = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int header_size;
+ AVStream *st;
+
+ url_fskip(pb, 20);
+ header_size = get_le16(pb) - 22;
+ if (header_size != 4) {
+ av_log(s, AV_LOG_ERROR, "unkown header size: %d\n", header_size);
+ return AVERROR_NOTSUPP;
+ }
+ url_fskip(pb, header_size);
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+
+ voc->remaining_size = 0;
+ return 0;
+}
+
+int
+voc_get_packet(AVFormatContext *s, AVPacket *pkt, AVStream *st, int max_size)
+{
+ voc_dec_context_t *voc = s->priv_data;
+ AVCodecContext *dec = st->codec;
+ ByteIOContext *pb = &s->pb;
+ voc_type_t type;
+ int size;
+ int sample_rate = 0;
+ int channels = 1;
+
+ while (!voc->remaining_size) {
+ type = get_byte(pb);
+ if (type == VOC_TYPE_EOF)
+ return AVERROR_IO;
+ voc->remaining_size = get_le24(pb);
+ max_size -= 4;
+
+ switch (type) {
+ case VOC_TYPE_VOICE_DATA:
+ dec->sample_rate = 1000000 / (256 - get_byte(pb));
+ if (sample_rate)
+ dec->sample_rate = sample_rate;
+ dec->channels = channels;
+ dec->codec_id = codec_get_id(voc_codec_tags, get_byte(pb));
+ dec->bits_per_sample = av_get_bits_per_sample(dec->codec_id);
+ voc->remaining_size -= 2;
+ max_size -= 2;
+ channels = 1;
+ break;
+
+ case VOC_TYPE_VOICE_DATA_CONT:
+ break;
+
+ case VOC_TYPE_EXTENDED:
+ sample_rate = get_le16(pb);
+ get_byte(pb);
+ channels = get_byte(pb) + 1;
+ sample_rate = 256000000 / (channels * (65536 - sample_rate));
+ voc->remaining_size = 0;
+ max_size -= 4;
+ break;
+
+ case VOC_TYPE_NEW_VOICE_DATA:
+ dec->sample_rate = get_le32(pb);
+ dec->bits_per_sample = get_byte(pb);
+ dec->channels = get_byte(pb);
+ dec->codec_id = codec_get_id(voc_codec_tags, get_le16(pb));
+ url_fskip(pb, 4);
+ voc->remaining_size -= 12;
+ max_size -= 12;
+ break;
+
+ default:
+ url_fskip(pb, voc->remaining_size);
+ max_size -= voc->remaining_size;
+ voc->remaining_size = 0;
+ break;
+ }
+ }
+
+ dec->bit_rate = dec->sample_rate * dec->bits_per_sample;
+
+ if (max_size <= 0)
+ max_size = voc_max_pkt_size;
+ size = FFMIN(voc->remaining_size, max_size);
+ voc->remaining_size -= size;
+ return av_get_packet(pb, pkt, size);
+}
+
+static int voc_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ return voc_get_packet(s, pkt, s->streams[0], 0);
+}
+
+static int voc_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVInputFormat voc_demuxer = {
+ "voc",
+ "Creative Voice File format",
+ sizeof(voc_dec_context_t),
+ voc_probe,
+ voc_read_header,
+ voc_read_packet,
+ voc_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/vocenc.c b/contrib/ffmpeg/libavformat/vocenc.c
new file mode 100644
index 000000000..ed304883d
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/vocenc.c
@@ -0,0 +1,104 @@
+/*
+ * Creative Voice File muxer.
+ * Copyright (c) 2006 Aurelien Jacobs <aurel@gnuage.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "voc.h"
+
+
+typedef struct voc_enc_context {
+ int param_written;
+} voc_enc_context_t;
+
+static int voc_write_header(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ const int header_size = 26;
+ const int version = 0x0114;
+
+ if (s->nb_streams != 1
+ || s->streams[0]->codec->codec_type != CODEC_TYPE_AUDIO)
+ return AVERROR_NOTSUPP;
+
+ put_buffer(pb, voc_magic, sizeof(voc_magic) - 1);
+ put_le16(pb, header_size);
+ put_le16(pb, version);
+ put_le16(pb, ~version + 0x1234);
+
+ return 0;
+}
+
+static int voc_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ voc_enc_context_t *voc = s->priv_data;
+ AVCodecContext *enc = s->streams[0]->codec;
+ ByteIOContext *pb = &s->pb;
+
+ if (!voc->param_written) {
+ int format = codec_get_tag(voc_codec_tags, enc->codec_id);
+
+ if (format > 0xFF) {
+ put_byte(pb, VOC_TYPE_NEW_VOICE_DATA);
+ put_le24(pb, pkt->size + 12);
+ put_le32(pb, enc->sample_rate);
+ put_byte(pb, enc->bits_per_sample);
+ put_byte(pb, enc->channels);
+ put_le16(pb, format);
+ put_le32(pb, 0);
+ } else {
+ if (s->streams[0]->codec->channels > 1) {
+ put_byte(pb, VOC_TYPE_EXTENDED);
+ put_le24(pb, 4);
+ put_le16(pb, 65536-256000000/(enc->sample_rate*enc->channels));
+ put_byte(pb, format);
+ put_byte(pb, enc->channels - 1);
+ }
+ put_byte(pb, VOC_TYPE_VOICE_DATA);
+ put_le24(pb, pkt->size + 2);
+ put_byte(pb, 256 - 1000000 / enc->sample_rate);
+ put_byte(pb, format);
+ }
+ voc->param_written = 1;
+ } else {
+ put_byte(pb, VOC_TYPE_VOICE_DATA_CONT);
+ put_le24(pb, pkt->size);
+ }
+
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+static int voc_write_trailer(AVFormatContext *s)
+{
+ put_byte(&s->pb, 0);
+ return 0;
+}
+
+AVOutputFormat voc_muxer = {
+ "voc",
+ "Creative Voice File format",
+ "audio/x-voc",
+ "voc",
+ sizeof(voc_enc_context_t),
+ CODEC_ID_PCM_U8,
+ CODEC_ID_NONE,
+ voc_write_header,
+ voc_write_packet,
+ voc_write_trailer,
+};
diff --git a/contrib/ffmpeg/libavformat/wav.c b/contrib/ffmpeg/libavformat/wav.c
new file mode 100644
index 000000000..7fb982349
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/wav.c
@@ -0,0 +1,253 @@
+/*
+ * WAV muxer and demuxer
+ * Copyright (c) 2001, 2002 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+#include "allformats.h"
+#include "riff.h"
+
+typedef struct {
+ offset_t data;
+ offset_t data_end;
+} WAVContext;
+
+#ifdef CONFIG_MUXERS
+static int wav_write_header(AVFormatContext *s)
+{
+ WAVContext *wav = s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ offset_t fmt;
+
+ put_tag(pb, "RIFF");
+ put_le32(pb, 0); /* file length */
+ put_tag(pb, "WAVE");
+
+ /* format header */
+ fmt = start_tag(pb, "fmt ");
+ if (put_wav_header(pb, s->streams[0]->codec) < 0) {
+ av_free(wav);
+ return -1;
+ }
+ end_tag(pb, fmt);
+
+ av_set_pts_info(s->streams[0], 64, 1, s->streams[0]->codec->sample_rate);
+
+ /* data header */
+ wav->data = start_tag(pb, "data");
+
+ put_flush_packet(pb);
+
+ return 0;
+}
+
+static int wav_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ ByteIOContext *pb = &s->pb;
+ put_buffer(pb, pkt->data, pkt->size);
+ return 0;
+}
+
+static int wav_write_trailer(AVFormatContext *s)
+{
+ ByteIOContext *pb = &s->pb;
+ WAVContext *wav = s->priv_data;
+ offset_t file_size;
+
+ if (!url_is_streamed(&s->pb)) {
+ end_tag(pb, wav->data);
+
+ /* update file size */
+ file_size = url_ftell(pb);
+ url_fseek(pb, 4, SEEK_SET);
+ put_le32(pb, (uint32_t)(file_size - 8));
+ url_fseek(pb, file_size, SEEK_SET);
+
+ put_flush_packet(pb);
+ }
+ return 0;
+}
+#endif //CONFIG_MUXERS
+
+/* return the size of the found tag */
+/* XXX: > 2GB ? */
+static int find_tag(ByteIOContext *pb, uint32_t tag1)
+{
+ unsigned int tag;
+ int size;
+
+ for(;;) {
+ if (url_feof(pb))
+ return -1;
+ tag = get_le32(pb);
+ size = get_le32(pb);
+ if (tag == tag1)
+ break;
+ url_fseek(pb, size, SEEK_CUR);
+ }
+ if (size < 0)
+ size = 0x7fffffff;
+ return size;
+}
+
+static int wav_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'R' && p->buf[1] == 'I' &&
+ p->buf[2] == 'F' && p->buf[3] == 'F' &&
+ p->buf[8] == 'W' && p->buf[9] == 'A' &&
+ p->buf[10] == 'V' && p->buf[11] == 'E')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+/* wav input */
+static int wav_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ int size;
+ unsigned int tag;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ WAVContext *wav = s->priv_data;
+
+ /* check RIFF header */
+ tag = get_le32(pb);
+
+ if (tag != MKTAG('R', 'I', 'F', 'F'))
+ return -1;
+ get_le32(pb); /* file size */
+ tag = get_le32(pb);
+ if (tag != MKTAG('W', 'A', 'V', 'E'))
+ return -1;
+
+ /* parse fmt header */
+ size = find_tag(pb, MKTAG('f', 'm', 't', ' '));
+ if (size < 0)
+ return -1;
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+
+ get_wav_header(pb, st->codec, size);
+ st->need_parsing = 1;
+
+ av_set_pts_info(st, 64, 1, st->codec->sample_rate);
+
+ size = find_tag(pb, MKTAG('d', 'a', 't', 'a'));
+ if (size < 0)
+ return -1;
+ wav->data_end= url_ftell(pb) + size;
+ return 0;
+}
+
+#define MAX_SIZE 4096
+
+static int wav_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ int ret, size, left;
+ AVStream *st;
+ WAVContext *wav = s->priv_data;
+
+ if (url_feof(&s->pb))
+ return AVERROR_IO;
+ st = s->streams[0];
+
+ left= wav->data_end - url_ftell(&s->pb);
+ if(left <= 0){
+ left = find_tag(&(s->pb), MKTAG('d', 'a', 't', 'a'));
+ if (left < 0) {
+ return AVERROR_IO;
+ }
+ wav->data_end= url_ftell(&s->pb) + left;
+ }
+
+ size = MAX_SIZE;
+ if (st->codec->block_align > 1) {
+ if (size < st->codec->block_align)
+ size = st->codec->block_align;
+ size = (size / st->codec->block_align) * st->codec->block_align;
+ }
+ size= FFMIN(size, left);
+ if (av_new_packet(pkt, size))
+ return AVERROR_IO;
+ pkt->stream_index = 0;
+
+ ret = get_buffer(&s->pb, pkt->data, pkt->size);
+ if (ret < 0)
+ av_free_packet(pkt);
+ /* note: we need to modify the packet size here to handle the last
+ packet */
+ pkt->size = ret;
+ return ret;
+}
+
+static int wav_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int wav_read_seek(AVFormatContext *s,
+ int stream_index, int64_t timestamp, int flags)
+{
+ AVStream *st;
+
+ st = s->streams[0];
+ switch(st->codec->codec_id) {
+ case CODEC_ID_MP2:
+ case CODEC_ID_MP3:
+ case CODEC_ID_AC3:
+ case CODEC_ID_DTS:
+ /* use generic seeking with dynamically generated indexes */
+ return -1;
+ default:
+ break;
+ }
+ return pcm_read_seek(s, stream_index, timestamp, flags);
+}
+
+#ifdef CONFIG_WAV_DEMUXER
+AVInputFormat wav_demuxer = {
+ "wav",
+ "wav format",
+ sizeof(WAVContext),
+ wav_probe,
+ wav_read_header,
+ wav_read_packet,
+ wav_read_close,
+ wav_read_seek,
+};
+#endif
+#ifdef CONFIG_WAV_MUXER
+AVOutputFormat wav_muxer = {
+ "wav",
+ "wav format",
+ "audio/x-wav",
+ "wav",
+ sizeof(WAVContext),
+ CODEC_ID_PCM_S16LE,
+ CODEC_ID_NONE,
+ wav_write_header,
+ wav_write_packet,
+ wav_write_trailer,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/wc3movie.c b/contrib/ffmpeg/libavformat/wc3movie.c
new file mode 100644
index 000000000..6b3242797
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/wc3movie.c
@@ -0,0 +1,394 @@
+/*
+ * Wing Commander III Movie (.mve) File Demuxer
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file wc3movie.c
+ * Wing Commander III Movie file demuxer
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the WC3 .mve file format, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ */
+
+#include "avformat.h"
+
+#define WC3_PREAMBLE_SIZE 8
+
+#define FORM_TAG MKTAG('F', 'O', 'R', 'M')
+#define MOVE_TAG MKTAG('M', 'O', 'V', 'E')
+#define _PC__TAG MKTAG('_', 'P', 'C', '_')
+#define SOND_TAG MKTAG('S', 'O', 'N', 'D')
+#define BNAM_TAG MKTAG('B', 'N', 'A', 'M')
+#define SIZE_TAG MKTAG('S', 'I', 'Z', 'E')
+#define PALT_TAG MKTAG('P', 'A', 'L', 'T')
+#define INDX_TAG MKTAG('I', 'N', 'D', 'X')
+#define BRCH_TAG MKTAG('B', 'R', 'C', 'H')
+#define SHOT_TAG MKTAG('S', 'H', 'O', 'T')
+#define VGA__TAG MKTAG('V', 'G', 'A', ' ')
+#define TEXT_TAG MKTAG('T', 'E', 'X', 'T')
+#define AUDI_TAG MKTAG('A', 'U', 'D', 'I')
+
+/* video resolution unless otherwise specified */
+#define WC3_DEFAULT_WIDTH 320
+#define WC3_DEFAULT_HEIGHT 165
+
+/* always use the same PCM audio parameters */
+#define WC3_SAMPLE_RATE 22050
+#define WC3_AUDIO_CHANNELS 1
+#define WC3_AUDIO_BITS 16
+
+/* nice, constant framerate */
+#define WC3_FRAME_PTS_INC (90000 / 15)
+
+#define PALETTE_SIZE (256 * 3)
+#define PALETTE_COUNT 256
+
+typedef struct Wc3DemuxContext {
+ int width;
+ int height;
+ unsigned char *palettes;
+ int palette_count;
+ int64_t pts;
+ int video_stream_index;
+ int audio_stream_index;
+
+ AVPaletteControl palette_control;
+
+} Wc3DemuxContext;
+
+/* bizarre palette lookup table */
+static const unsigned char wc3_pal_lookup[] = {
+ 0x00, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0E,
+ 0x10, 0x12, 0x13, 0x15, 0x16, 0x18, 0x19, 0x1A,
+ 0x1C, 0x1D, 0x1F, 0x20, 0x21, 0x23, 0x24, 0x25,
+ 0x27, 0x28, 0x29, 0x2A, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x38, 0x39,
+ 0x3A, 0x3B, 0x3C, 0x3D, 0x3F, 0x40, 0x41, 0x42,
+ 0x43, 0x44, 0x45, 0x46, 0x48, 0x49, 0x4A, 0x4B,
+ 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53,
+ 0x54, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C,
+ 0x5D, 0x5E, 0x5F, 0x60, 0x61, 0x62, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B, 0x6C,
+ 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74,
+ 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x7B, 0x7C,
+ 0x7D, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
+ 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B,
+ 0x8C, 0x8D, 0x8D, 0x8E, 0x8F, 0x90, 0x91, 0x92,
+ 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x99,
+ 0x9A, 0x9B, 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1,
+ 0xA2, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8,
+ 0xA9, 0xAA, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
+ 0xB0, 0xB1, 0xB2, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6,
+ 0xB7, 0xB8, 0xB9, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD,
+ 0xBE, 0xBF, 0xBF, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4,
+ 0xC5, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB,
+ 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xD0, 0xD0, 0xD1,
+ 0xD2, 0xD3, 0xD4, 0xD5, 0xD5, 0xD6, 0xD7, 0xD8,
+ 0xD9, 0xDA, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
+ 0xDF, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE4, 0xE5,
+ 0xE6, 0xE7, 0xE8, 0xE9, 0xE9, 0xEA, 0xEB, 0xEC,
+ 0xED, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF1, 0xF2,
+ 0xF3, 0xF4, 0xF5, 0xF6, 0xF6, 0xF7, 0xF8, 0xF9,
+ 0xFA, 0xFA, 0xFB, 0xFC, 0xFD, 0xFD, 0xFD, 0xFD
+};
+
+
+static int wc3_probe(AVProbeData *p)
+{
+ if (p->buf_size < 12)
+ return 0;
+
+ if ((LE_32(&p->buf[0]) != FORM_TAG) ||
+ (LE_32(&p->buf[8]) != MOVE_TAG))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int wc3_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ Wc3DemuxContext *wc3 = (Wc3DemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned int fourcc_tag;
+ unsigned int size;
+ AVStream *st;
+ unsigned char preamble[WC3_PREAMBLE_SIZE];
+ int ret = 0;
+ int current_palette = 0;
+ int bytes_to_read;
+ int i;
+ unsigned char rotate;
+
+ /* default context members */
+ wc3->width = WC3_DEFAULT_WIDTH;
+ wc3->height = WC3_DEFAULT_HEIGHT;
+ wc3->palettes = NULL;
+ wc3->palette_count = 0;
+ wc3->pts = 0;
+ wc3->video_stream_index = wc3->audio_stream_index = 0;
+
+ /* skip the first 3 32-bit numbers */
+ url_fseek(pb, 12, SEEK_CUR);
+
+ /* traverse through the chunks and load the header information before
+ * the first BRCH tag */
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ WC3_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ fourcc_tag = LE_32(&preamble[0]);
+ size = (BE_32(&preamble[4]) + 1) & (~1);
+
+ do {
+ switch (fourcc_tag) {
+
+ case SOND_TAG:
+ case INDX_TAG:
+ /* SOND unknown, INDX unnecessary; ignore both */
+ url_fseek(pb, size, SEEK_CUR);
+ break;
+
+ case _PC__TAG:
+ /* need the number of palettes */
+ url_fseek(pb, 8, SEEK_CUR);
+ if ((ret = get_buffer(pb, preamble, 4)) != 4)
+ return AVERROR_IO;
+ wc3->palette_count = LE_32(&preamble[0]);
+ if((unsigned)wc3->palette_count >= UINT_MAX / PALETTE_SIZE){
+ wc3->palette_count= 0;
+ return -1;
+ }
+ wc3->palettes = av_malloc(wc3->palette_count * PALETTE_SIZE);
+ break;
+
+ case BNAM_TAG:
+ /* load up the name */
+ if ((unsigned)size < 512)
+ bytes_to_read = size;
+ else
+ bytes_to_read = 512;
+ if ((ret = get_buffer(pb, s->title, bytes_to_read)) != bytes_to_read)
+ return AVERROR_IO;
+ break;
+
+ case SIZE_TAG:
+ /* video resolution override */
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ WC3_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ wc3->width = LE_32(&preamble[0]);
+ wc3->height = LE_32(&preamble[4]);
+ break;
+
+ case PALT_TAG:
+ /* one of several palettes */
+ if ((unsigned)current_palette >= wc3->palette_count)
+ return AVERROR_INVALIDDATA;
+ if ((ret = get_buffer(pb,
+ &wc3->palettes[current_palette * PALETTE_SIZE],
+ PALETTE_SIZE)) != PALETTE_SIZE)
+ return AVERROR_IO;
+
+ /* transform the current palette in place */
+ for (i = current_palette * PALETTE_SIZE;
+ i < (current_palette + 1) * PALETTE_SIZE; i++) {
+ /* rotate each palette component left by 2 and use the result
+ * as an index into the color component table */
+ rotate = ((wc3->palettes[i] << 2) & 0xFF) |
+ ((wc3->palettes[i] >> 6) & 0xFF);
+ wc3->palettes[i] = wc3_pal_lookup[rotate];
+ }
+ current_palette++;
+ break;
+
+ default:
+ av_log(s, AV_LOG_ERROR, " unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\n",
+ preamble[0], preamble[1], preamble[2], preamble[3],
+ preamble[0], preamble[1], preamble[2], preamble[3]);
+ return AVERROR_INVALIDDATA;
+ break;
+ }
+
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ WC3_PREAMBLE_SIZE)
+ return AVERROR_IO;
+ fourcc_tag = LE_32(&preamble[0]);
+ /* chunk sizes are 16-bit aligned */
+ size = (BE_32(&preamble[4]) + 1) & (~1);
+
+ } while (fourcc_tag != BRCH_TAG);
+
+ /* initialize the decoder streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ wc3->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_XAN_WC3;
+ st->codec->codec_tag = 0; /* no fourcc */
+ st->codec->width = wc3->width;
+ st->codec->height = wc3->height;
+
+ /* palette considerations */
+ st->codec->palctrl = &wc3->palette_control;
+
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ wc3->audio_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_PCM_S16LE;
+ st->codec->codec_tag = 1;
+ st->codec->channels = WC3_AUDIO_CHANNELS;
+ st->codec->bits_per_sample = WC3_AUDIO_BITS;
+ st->codec->sample_rate = WC3_SAMPLE_RATE;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample;
+ st->codec->block_align = WC3_AUDIO_BITS * WC3_AUDIO_CHANNELS;
+
+ return 0;
+}
+
+static int wc3_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ Wc3DemuxContext *wc3 = (Wc3DemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned int fourcc_tag;
+ unsigned int size;
+ int packet_read = 0;
+ int ret = 0;
+ unsigned char preamble[WC3_PREAMBLE_SIZE];
+ unsigned char text[1024];
+ unsigned int palette_number;
+ int i;
+ unsigned char r, g, b;
+ int base_palette_index;
+
+ while (!packet_read) {
+
+ /* get the next chunk preamble */
+ if ((ret = get_buffer(pb, preamble, WC3_PREAMBLE_SIZE)) !=
+ WC3_PREAMBLE_SIZE)
+ ret = AVERROR_IO;
+
+ fourcc_tag = LE_32(&preamble[0]);
+ /* chunk sizes are 16-bit aligned */
+ size = (BE_32(&preamble[4]) + 1) & (~1);
+
+ switch (fourcc_tag) {
+
+ case BRCH_TAG:
+ /* no-op */
+ break;
+
+ case SHOT_TAG:
+ /* load up new palette */
+ if ((ret = get_buffer(pb, preamble, 4)) != 4)
+ return AVERROR_IO;
+ palette_number = LE_32(&preamble[0]);
+ if (palette_number >= wc3->palette_count)
+ return AVERROR_INVALIDDATA;
+ base_palette_index = palette_number * PALETTE_COUNT * 3;
+ for (i = 0; i < PALETTE_COUNT; i++) {
+ r = wc3->palettes[base_palette_index + i * 3 + 0];
+ g = wc3->palettes[base_palette_index + i * 3 + 1];
+ b = wc3->palettes[base_palette_index + i * 3 + 2];
+ wc3->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
+ }
+ wc3->palette_control.palette_changed = 1;
+ break;
+
+ case VGA__TAG:
+ /* send out video chunk */
+ ret= av_get_packet(pb, pkt, size);
+ pkt->stream_index = wc3->video_stream_index;
+ pkt->pts = wc3->pts;
+ if (ret != size)
+ ret = AVERROR_IO;
+ packet_read = 1;
+ break;
+
+ case TEXT_TAG:
+ /* subtitle chunk */
+#if 0
+ url_fseek(pb, size, SEEK_CUR);
+#else
+ if ((unsigned)size > sizeof(text) || (ret = get_buffer(pb, text, size)) != size)
+ ret = AVERROR_IO;
+ else {
+ int i = 0;
+ av_log (s, AV_LOG_DEBUG, "Subtitle time!\n");
+ av_log (s, AV_LOG_DEBUG, " inglish: %s\n", &text[i + 1]);
+ i += text[i] + 1;
+ av_log (s, AV_LOG_DEBUG, " doytsch: %s\n", &text[i + 1]);
+ i += text[i] + 1;
+ av_log (s, AV_LOG_DEBUG, " fronsay: %s\n", &text[i + 1]);
+ }
+#endif
+ break;
+
+ case AUDI_TAG:
+ /* send out audio chunk */
+ ret= av_get_packet(pb, pkt, size);
+ pkt->stream_index = wc3->audio_stream_index;
+ pkt->pts = wc3->pts;
+ if (ret != size)
+ ret = AVERROR_IO;
+
+ /* time to advance pts */
+ wc3->pts += WC3_FRAME_PTS_INC;
+
+ packet_read = 1;
+ break;
+
+ default:
+ av_log (s, AV_LOG_ERROR, " unrecognized WC3 chunk: %c%c%c%c (0x%02X%02X%02X%02X)\n",
+ preamble[0], preamble[1], preamble[2], preamble[3],
+ preamble[0], preamble[1], preamble[2], preamble[3]);
+ ret = AVERROR_INVALIDDATA;
+ packet_read = 1;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int wc3_read_close(AVFormatContext *s)
+{
+ Wc3DemuxContext *wc3 = (Wc3DemuxContext *)s->priv_data;
+
+ av_free(wc3->palettes);
+
+ return 0;
+}
+
+AVInputFormat wc3_demuxer = {
+ "wc3movie",
+ "Wing Commander III movie format",
+ sizeof(Wc3DemuxContext),
+ wc3_probe,
+ wc3_read_header,
+ wc3_read_packet,
+ wc3_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/westwood.c b/contrib/ffmpeg/libavformat/westwood.c
new file mode 100644
index 000000000..5c42e3b55
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/westwood.c
@@ -0,0 +1,414 @@
+/*
+ * Westwood Studios Multimedia Formats Demuxer (VQA, AUD)
+ * Copyright (c) 2003 The ffmpeg Project
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file westwood.c
+ * Westwood Studios VQA & AUD file demuxers
+ * by Mike Melanson (melanson@pcisys.net)
+ * for more information on the Westwood file formats, visit:
+ * http://www.pcisys.net/~melanson/codecs/
+ * http://www.geocities.com/SiliconValley/8682/aud3.txt
+ *
+ * Implementation note: There is no definite file signature for AUD files.
+ * The demuxer uses a probabilistic strategy for content detection. This
+ * entails performing sanity checks on certain header values in order to
+ * qualify a file. Refer to wsaud_probe() for the precise parameters.
+ */
+
+#include "avformat.h"
+
+#define AUD_HEADER_SIZE 12
+#define AUD_CHUNK_PREAMBLE_SIZE 8
+#define AUD_CHUNK_SIGNATURE 0x0000DEAF
+
+#define FORM_TAG MKBETAG('F', 'O', 'R', 'M')
+#define WVQA_TAG MKBETAG('W', 'V', 'Q', 'A')
+#define VQHD_TAG MKBETAG('V', 'Q', 'H', 'D')
+#define FINF_TAG MKBETAG('F', 'I', 'N', 'F')
+#define SND0_TAG MKBETAG('S', 'N', 'D', '0')
+#define SND1_TAG MKBETAG('S', 'N', 'D', '1')
+#define SND2_TAG MKBETAG('S', 'N', 'D', '2')
+#define VQFR_TAG MKBETAG('V', 'Q', 'F', 'R')
+
+/* don't know what these tags are for, but acknowledge their existence */
+#define CINF_TAG MKBETAG('C', 'I', 'N', 'F')
+#define CINH_TAG MKBETAG('C', 'I', 'N', 'H')
+#define CIND_TAG MKBETAG('C', 'I', 'N', 'D')
+#define PINF_TAG MKBETAG('P', 'I', 'N', 'F')
+#define PINH_TAG MKBETAG('P', 'I', 'N', 'H')
+#define PIND_TAG MKBETAG('P', 'I', 'N', 'D')
+#define CMDS_TAG MKBETAG('C', 'M', 'D', 'S')
+
+#define VQA_HEADER_SIZE 0x2A
+#define VQA_FRAMERATE 15
+#define VQA_VIDEO_PTS_INC (90000 / VQA_FRAMERATE)
+#define VQA_PREAMBLE_SIZE 8
+
+typedef struct WsAudDemuxContext {
+ int audio_samplerate;
+ int audio_channels;
+ int audio_bits;
+ int audio_type;
+ int audio_stream_index;
+ int64_t audio_frame_counter;
+} WsAudDemuxContext;
+
+typedef struct WsVqaDemuxContext {
+ int audio_samplerate;
+ int audio_channels;
+ int audio_bits;
+
+ int audio_stream_index;
+ int video_stream_index;
+
+ int64_t audio_frame_counter;
+ int64_t video_pts;
+} WsVqaDemuxContext;
+
+static int wsaud_probe(AVProbeData *p)
+{
+ int field;
+
+ /* Probabilistic content detection strategy: There is no file signature
+ * so perform sanity checks on various header parameters:
+ * 8000 <= sample rate (16 bits) <= 48000 ==> 40001 acceptable numbers
+ * compression type (8 bits) = 1 or 99 ==> 2 acceptable numbers
+ * There is a total of 24 bits. The number space contains 2^24 =
+ * 16777216 numbers. There are 40001 * 2 = 80002 acceptable combinations
+ * of numbers. There is a 80002/16777216 = 0.48% chance of a false
+ * positive.
+ */
+
+ if (p->buf_size < AUD_HEADER_SIZE)
+ return 0;
+
+ /* check sample rate */
+ field = LE_16(&p->buf[0]);
+ if ((field < 8000) || (field > 48000))
+ return 0;
+
+ /* note: only check for WS IMA (type 99) right now since there is no
+ * support for type 1 */
+ if (p->buf[11] != 99)
+ return 0;
+
+ /* return 1/2 certainty since this file check is a little sketchy */
+ return AVPROBE_SCORE_MAX / 2;
+}
+
+static int wsaud_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ WsAudDemuxContext *wsaud = (WsAudDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ unsigned char header[AUD_HEADER_SIZE];
+
+ if (get_buffer(pb, header, AUD_HEADER_SIZE) != AUD_HEADER_SIZE)
+ return AVERROR_IO;
+ wsaud->audio_samplerate = LE_16(&header[0]);
+ if (header[11] == 99)
+ wsaud->audio_type = CODEC_ID_ADPCM_IMA_WS;
+ else
+ return AVERROR_INVALIDDATA;
+
+ /* flag 0 indicates stereo */
+ wsaud->audio_channels = (header[10] & 0x1) + 1;
+ /* flag 1 indicates 16 bit audio */
+ wsaud->audio_bits = (((header[10] & 0x2) >> 1) + 1) * 8;
+
+ /* initialize the audio decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, wsaud->audio_samplerate);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = wsaud->audio_type;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->channels = wsaud->audio_channels;
+ st->codec->sample_rate = wsaud->audio_samplerate;
+ st->codec->bits_per_sample = wsaud->audio_bits;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample / 4;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ wsaud->audio_stream_index = st->index;
+ wsaud->audio_frame_counter = 0;
+
+ return 0;
+}
+
+static int wsaud_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ WsAudDemuxContext *wsaud = (WsAudDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ unsigned char preamble[AUD_CHUNK_PREAMBLE_SIZE];
+ unsigned int chunk_size;
+ int ret = 0;
+
+ if (get_buffer(pb, preamble, AUD_CHUNK_PREAMBLE_SIZE) !=
+ AUD_CHUNK_PREAMBLE_SIZE)
+ return AVERROR_IO;
+
+ /* validate the chunk */
+ if (LE_32(&preamble[4]) != AUD_CHUNK_SIGNATURE)
+ return AVERROR_INVALIDDATA;
+
+ chunk_size = LE_16(&preamble[0]);
+ ret= av_get_packet(pb, pkt, chunk_size);
+ if (ret != chunk_size)
+ return AVERROR_IO;
+ pkt->stream_index = wsaud->audio_stream_index;
+ pkt->pts = wsaud->audio_frame_counter;
+ pkt->pts /= wsaud->audio_samplerate;
+
+ /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
+ wsaud->audio_frame_counter += (chunk_size * 2) / wsaud->audio_channels;
+
+ return ret;
+}
+
+static int wsaud_read_close(AVFormatContext *s)
+{
+// WsAudDemuxContext *wsaud = (WsAudDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+
+static int wsvqa_probe(AVProbeData *p)
+{
+ /* need 12 bytes to qualify */
+ if (p->buf_size < 12)
+ return 0;
+
+ /* check for the VQA signatures */
+ if ((BE_32(&p->buf[0]) != FORM_TAG) ||
+ (BE_32(&p->buf[8]) != WVQA_TAG))
+ return 0;
+
+ return AVPROBE_SCORE_MAX;
+}
+
+static int wsvqa_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ WsVqaDemuxContext *wsvqa = (WsVqaDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ AVStream *st;
+ unsigned char *header;
+ unsigned char scratch[VQA_PREAMBLE_SIZE];
+ unsigned int chunk_tag;
+ unsigned int chunk_size;
+
+ /* initialize the video decoder stream */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ wsvqa->video_stream_index = st->index;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_WS_VQA;
+ st->codec->codec_tag = 0; /* no fourcc */
+
+ /* skip to the start of the VQA header */
+ url_fseek(pb, 20, SEEK_SET);
+
+ /* the VQA header needs to go to the decoder */
+ st->codec->extradata_size = VQA_HEADER_SIZE;
+ st->codec->extradata = av_mallocz(VQA_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
+ header = (unsigned char *)st->codec->extradata;
+ if (get_buffer(pb, st->codec->extradata, VQA_HEADER_SIZE) !=
+ VQA_HEADER_SIZE) {
+ av_free(st->codec->extradata);
+ return AVERROR_IO;
+ }
+ st->codec->width = LE_16(&header[6]);
+ st->codec->height = LE_16(&header[8]);
+
+ st->codec->time_base.num = 1;
+ st->codec->time_base.den = VQA_FRAMERATE;
+
+ /* initialize the audio decoder stream for VQA v1 or nonzero samplerate */
+ if (LE_16(&header[24]) || (LE_16(&header[0]) == 1)) {
+ st = av_new_stream(s, 0);
+ if (!st)
+ return AVERROR_NOMEM;
+ av_set_pts_info(st, 33, 1, 90000);
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ if (LE_16(&header[0]) == 1)
+ st->codec->codec_id = CODEC_ID_WESTWOOD_SND1;
+ else
+ st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS;
+ st->codec->codec_tag = 0; /* no tag */
+ st->codec->sample_rate = LE_16(&header[24]);
+ if (!st->codec->sample_rate)
+ st->codec->sample_rate = 22050;
+ st->codec->channels = header[26];
+ if (!st->codec->channels)
+ st->codec->channels = 1;
+ st->codec->bits_per_sample = 16;
+ st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
+ st->codec->bits_per_sample / 4;
+ st->codec->block_align = st->codec->channels * st->codec->bits_per_sample;
+
+ wsvqa->audio_stream_index = st->index;
+ wsvqa->audio_samplerate = st->codec->sample_rate;
+ wsvqa->audio_channels = st->codec->channels;
+ wsvqa->audio_frame_counter = 0;
+ }
+
+ /* there are 0 or more chunks before the FINF chunk; iterate until
+ * FINF has been skipped and the file will be ready to be demuxed */
+ do {
+ if (get_buffer(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) {
+ av_free(st->codec->extradata);
+ return AVERROR_IO;
+ }
+ chunk_tag = BE_32(&scratch[0]);
+ chunk_size = BE_32(&scratch[4]);
+
+ /* catch any unknown header tags, for curiousity */
+ switch (chunk_tag) {
+ case CINF_TAG:
+ case CINH_TAG:
+ case CIND_TAG:
+ case PINF_TAG:
+ case PINH_TAG:
+ case PIND_TAG:
+ case FINF_TAG:
+ case CMDS_TAG:
+ break;
+
+ default:
+ av_log (s, AV_LOG_ERROR, " note: unknown chunk seen (%c%c%c%c)\n",
+ scratch[0], scratch[1],
+ scratch[2], scratch[3]);
+ break;
+ }
+
+ url_fseek(pb, chunk_size, SEEK_CUR);
+ } while (chunk_tag != FINF_TAG);
+
+ wsvqa->video_pts = wsvqa->audio_frame_counter = 0;
+
+ return 0;
+}
+
+static int wsvqa_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ WsVqaDemuxContext *wsvqa = (WsVqaDemuxContext *)s->priv_data;
+ ByteIOContext *pb = &s->pb;
+ int ret = -1;
+ unsigned char preamble[VQA_PREAMBLE_SIZE];
+ unsigned int chunk_type;
+ unsigned int chunk_size;
+ int skip_byte;
+
+ while (get_buffer(pb, preamble, VQA_PREAMBLE_SIZE) == VQA_PREAMBLE_SIZE) {
+ chunk_type = BE_32(&preamble[0]);
+ chunk_size = BE_32(&preamble[4]);
+ skip_byte = chunk_size & 0x01;
+
+ if ((chunk_type == SND1_TAG) || (chunk_type == SND2_TAG) || (chunk_type == VQFR_TAG)) {
+
+ if (av_new_packet(pkt, chunk_size))
+ return AVERROR_IO;
+ ret = get_buffer(pb, pkt->data, chunk_size);
+ if (ret != chunk_size) {
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+
+ if (chunk_type == SND2_TAG) {
+ pkt->stream_index = wsvqa->audio_stream_index;
+
+ pkt->pts = 90000;
+ pkt->pts *= wsvqa->audio_frame_counter;
+ pkt->pts /= wsvqa->audio_samplerate;
+
+ /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */
+ wsvqa->audio_frame_counter += (chunk_size * 2) / wsvqa->audio_channels;
+ } else if(chunk_type == SND1_TAG) {
+ pkt->stream_index = wsvqa->audio_stream_index;
+
+ pkt->pts = 90000;
+ pkt->pts *= wsvqa->audio_frame_counter;
+ pkt->pts /= wsvqa->audio_samplerate;
+
+ /* unpacked size is stored in header */
+ wsvqa->audio_frame_counter += LE_16(pkt->data) / wsvqa->audio_channels;
+ } else {
+ pkt->stream_index = wsvqa->video_stream_index;
+ pkt->pts = wsvqa->video_pts;
+ wsvqa->video_pts += VQA_VIDEO_PTS_INC;
+ }
+ /* stay on 16-bit alignment */
+ if (skip_byte)
+ url_fseek(pb, 1, SEEK_CUR);
+
+ return ret;
+ } else {
+ switch(chunk_type){
+ case CMDS_TAG:
+ case SND0_TAG:
+ break;
+ default:
+ av_log(s, AV_LOG_INFO, "Skipping unknown chunk 0x%08X\n", chunk_type);
+ }
+ url_fseek(pb, chunk_size + skip_byte, SEEK_CUR);
+ }
+ }
+
+ return ret;
+}
+
+static int wsvqa_read_close(AVFormatContext *s)
+{
+// WsVqaDemuxContext *wsvqa = (WsVqaDemuxContext *)s->priv_data;
+
+ return 0;
+}
+
+#ifdef CONFIG_WSAUD_DEMUXER
+AVInputFormat wsaud_demuxer = {
+ "wsaud",
+ "Westwood Studios audio format",
+ sizeof(WsAudDemuxContext),
+ wsaud_probe,
+ wsaud_read_header,
+ wsaud_read_packet,
+ wsaud_read_close,
+};
+#endif
+#ifdef CONFIG_WSVQA_DEMUXER
+AVInputFormat wsvqa_demuxer = {
+ "wsvqa",
+ "Westwood Studios VQA format",
+ sizeof(WsVqaDemuxContext),
+ wsvqa_probe,
+ wsvqa_read_header,
+ wsvqa_read_packet,
+ wsvqa_read_close,
+};
+#endif
diff --git a/contrib/ffmpeg/libavformat/wv.c b/contrib/ffmpeg/libavformat/wv.c
new file mode 100644
index 000000000..2de07fe3f
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/wv.c
@@ -0,0 +1,202 @@
+/*
+ * WavPack demuxer
+ * Copyright (c) 2006 Konstantin Shishkov.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avformat.h"
+#include "allformats.h"
+#include "bswap.h"
+
+// specs say that maximum block size is 1Mb
+#define WV_BLOCK_LIMIT 1047576
+
+#define WV_EXTRA_SIZE 12
+
+enum WV_FLAGS{
+ WV_MONO = 0x0004,
+ WV_HYBRID = 0x0008,
+ WV_JOINT = 0x0010,
+ WV_CROSSD = 0x0020,
+ WV_HSHAPE = 0x0040,
+ WV_FLOAT = 0x0080,
+ WV_INT32 = 0x0100,
+ WV_HBR = 0x0200,
+ WV_HBAL = 0x0400,
+ WV_MCINIT = 0x0800,
+ WV_MCEND = 0x1000,
+};
+
+static const int wv_rates[16] = {
+ 6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000,
+ 32000, 44100, 48000, 64000, 88200, 96000, 192000, -1
+};
+
+typedef struct{
+ uint32_t blksize, flags;
+ int rate, chan, bpp;
+ int block_parsed;
+ uint8_t extra[WV_EXTRA_SIZE];
+}WVContext;
+
+static int wv_probe(AVProbeData *p)
+{
+ /* check file header */
+ if (p->buf_size <= 32)
+ return 0;
+ if (p->buf[0] == 'w' && p->buf[1] == 'v' &&
+ p->buf[2] == 'p' && p->buf[3] == 'k')
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+static int wv_read_block_header(AVFormatContext *ctx, ByteIOContext *pb)
+{
+ WVContext *wc = ctx->priv_data;
+ uint32_t tag, ver;
+ int size;
+ int rate, bpp, chan;
+
+ tag = get_le32(pb);
+ if (tag != MKTAG('w', 'v', 'p', 'k'))
+ return -1;
+ size = get_le32(pb);
+ if(size < 24 || size > WV_BLOCK_LIMIT){
+ av_log(ctx, AV_LOG_ERROR, "Incorrect block size %i\n", size);
+ return -1;
+ }
+ wc->blksize = size;
+ ver = get_le16(pb);
+ if(ver < 0x402 || ver > 0x40F){
+ av_log(ctx, AV_LOG_ERROR, "Unsupported version %03X\n", ver);
+ return -1;
+ }
+ get_byte(pb); // track no
+ get_byte(pb); // track sub index
+ get_le32(pb); // total samples in file
+ get_le32(pb); // offset in samples of current block
+ get_buffer(pb, wc->extra, WV_EXTRA_SIZE);
+ wc->flags = LE_32(wc->extra + 4);
+ //parse flags
+ if(wc->flags & WV_FLOAT){
+ av_log(ctx, AV_LOG_ERROR, "Floating point data is not supported\n");
+ return -1;
+ }
+ if(wc->flags & WV_HYBRID){
+ av_log(ctx, AV_LOG_ERROR, "Hybrid coding mode is not supported\n");
+ return -1;
+ }
+ if(wc->flags & WV_INT32){
+ av_log(ctx, AV_LOG_ERROR, "Integer point data is not supported\n");
+ return -1;
+ }
+
+ bpp = ((wc->flags & 3) + 1) << 3;
+ chan = 1 + !(wc->flags & WV_MONO);
+ rate = wv_rates[(wc->flags >> 23) & 0xF];
+ if(rate == -1){
+ av_log(ctx, AV_LOG_ERROR, "Unknown sampling rate\n");
+ return -1;
+ }
+ if(!wc->bpp) wc->bpp = bpp;
+ if(!wc->chan) wc->chan = chan;
+ if(!wc->rate) wc->rate = rate;
+
+ if(wc->flags && bpp != wc->bpp){
+ av_log(ctx, AV_LOG_ERROR, "Bits per sample differ, this block: %i, header block: %i\n", bpp, wc->bpp);
+ return -1;
+ }
+ if(wc->flags && chan != wc->chan){
+ av_log(ctx, AV_LOG_ERROR, "Channels differ, this block: %i, header block: %i\n", chan, wc->chan);
+ return -1;
+ }
+ if(wc->flags && rate != wc->rate){
+ av_log(ctx, AV_LOG_ERROR, "Sampling rate differ, this block: %i, header block: %i\n", rate, wc->rate);
+ return -1;
+ }
+ wc->blksize = size - 24;
+ return 0;
+}
+
+static int wv_read_header(AVFormatContext *s,
+ AVFormatParameters *ap)
+{
+ ByteIOContext *pb = &s->pb;
+ WVContext *wc = s->priv_data;
+ AVStream *st;
+
+ if(wv_read_block_header(s, pb) < 0)
+ return -1;
+
+ wc->block_parsed = 0;
+ /* now we are ready: build format streams */
+ st = av_new_stream(s, 0);
+ if (!st)
+ return -1;
+ st->codec->codec_type = CODEC_TYPE_AUDIO;
+ st->codec->codec_id = CODEC_ID_WAVPACK;
+ st->codec->channels = wc->chan;
+ st->codec->sample_rate = wc->rate;
+ st->codec->bits_per_sample = wc->bpp;
+ av_set_pts_info(st, 64, 1, wc->rate);
+ return 0;
+}
+
+static int wv_read_packet(AVFormatContext *s,
+ AVPacket *pkt)
+{
+ WVContext *wc = s->priv_data;
+ int ret;
+
+ if (url_feof(&s->pb))
+ return -EIO;
+ if(wc->block_parsed){
+ if(wv_read_block_header(s, &s->pb) < 0)
+ return -1;
+ }
+
+ if(av_new_packet(pkt, wc->blksize + WV_EXTRA_SIZE) < 0)
+ return AVERROR_NOMEM;
+ memcpy(pkt->data, wc->extra, WV_EXTRA_SIZE);
+ ret = get_buffer(&s->pb, pkt->data + WV_EXTRA_SIZE, wc->blksize);
+ if(ret != wc->blksize){
+ av_free_packet(pkt);
+ return AVERROR_IO;
+ }
+ pkt->stream_index = 0;
+ wc->block_parsed = 1;
+ pkt->size = ret + WV_EXTRA_SIZE;
+
+ return 0;
+}
+
+static int wv_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+AVInputFormat wv_demuxer = {
+ "wv",
+ "WavPack",
+ sizeof(WVContext),
+ wv_probe,
+ wv_read_header,
+ wv_read_packet,
+ wv_read_close,
+};
diff --git a/contrib/ffmpeg/libavformat/yuv.c b/contrib/ffmpeg/libavformat/yuv.c
new file mode 100644
index 000000000..fe52cdea5
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/yuv.c
@@ -0,0 +1,161 @@
+/*
+ * .Y.U.V image format
+ * Copyright (c) 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+static int sizes[][2] = {
+ { 640, 480 },
+ { 720, 480 },
+ { 720, 576 },
+ { 352, 288 },
+ { 352, 240 },
+ { 160, 128 },
+ { 512, 384 },
+ { 640, 352 },
+ { 640, 240 },
+};
+
+static int infer_size(int *width_ptr, int *height_ptr, int size)
+{
+ int i;
+
+ for(i=0;i<sizeof(sizes)/sizeof(sizes[0]);i++) {
+ if ((sizes[i][0] * sizes[i][1]) == size) {
+ *width_ptr = sizes[i][0];
+ *height_ptr = sizes[i][1];
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int yuv_read(ByteIOContext *f,
+ int (*alloc_cb)(void *opaque, AVImageInfo *info), void *opaque)
+{
+ ByteIOContext pb1, *pb = &pb1;
+ int img_size, ret;
+ char fname[1024], *p;
+ int size;
+ URLContext *h;
+ AVImageInfo info1, *info = &info1;
+
+ img_size = url_fsize(f);
+
+ /* XXX: hack hack */
+ h = url_fileno(f);
+ url_get_filename(h, fname, sizeof(fname));
+
+ if (infer_size(&info->width, &info->height, img_size) < 0) {
+ return AVERROR_IO;
+ }
+ info->pix_fmt = PIX_FMT_YUV420P;
+
+ ret = alloc_cb(opaque, info);
+ if (ret)
+ return ret;
+
+ size = info->width * info->height;
+
+ p = strrchr(fname, '.');
+ if (!p || p[1] != 'Y')
+ return AVERROR_IO;
+
+ get_buffer(f, info->pict.data[0], size);
+
+ p[1] = 'U';
+ if (url_fopen(pb, fname, URL_RDONLY) < 0)
+ return AVERROR_IO;
+
+ get_buffer(pb, info->pict.data[1], size / 4);
+ url_fclose(pb);
+
+ p[1] = 'V';
+ if (url_fopen(pb, fname, URL_RDONLY) < 0)
+ return AVERROR_IO;
+
+ get_buffer(pb, info->pict.data[2], size / 4);
+ url_fclose(pb);
+ return 0;
+}
+
+static int yuv_write(ByteIOContext *pb2, AVImageInfo *info)
+{
+ ByteIOContext pb1, *pb;
+ char fname[1024], *p;
+ int i, j, width, height;
+ uint8_t *ptr;
+ URLContext *h;
+ static const char *ext = "YUV";
+
+ /* XXX: hack hack */
+ h = url_fileno(pb2);
+ url_get_filename(h, fname, sizeof(fname));
+
+ p = strrchr(fname, '.');
+ if (!p || p[1] != 'Y')
+ return AVERROR_IO;
+
+ width = info->width;
+ height = info->height;
+
+ for(i=0;i<3;i++) {
+ if (i == 1) {
+ width >>= 1;
+ height >>= 1;
+ }
+
+ if (i >= 1) {
+ pb = &pb1;
+ p[1] = ext[i];
+ if (url_fopen(pb, fname, URL_WRONLY) < 0)
+ return AVERROR_IO;
+ } else {
+ pb = pb2;
+ }
+
+ ptr = info->pict.data[i];
+ for(j=0;j<height;j++) {
+ put_buffer(pb, ptr, width);
+ ptr += info->pict.linesize[i];
+ }
+ put_flush_packet(pb);
+ if (i >= 1) {
+ url_fclose(pb);
+ }
+ }
+ return 0;
+}
+
+static int yuv_probe(AVProbeData *pd)
+{
+ if (match_ext(pd->filename, "Y"))
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+AVImageFormat yuv_image_format = {
+ "yuv",
+ "Y",
+ yuv_probe,
+ yuv_read,
+ (1 << PIX_FMT_YUV420P),
+ yuv_write,
+};
diff --git a/contrib/ffmpeg/libavformat/yuv4mpeg.c b/contrib/ffmpeg/libavformat/yuv4mpeg.c
new file mode 100644
index 000000000..70214ae00
--- /dev/null
+++ b/contrib/ffmpeg/libavformat/yuv4mpeg.c
@@ -0,0 +1,408 @@
+/*
+ * YUV4MPEG format
+ * Copyright (c) 2001, 2002, 2003 Fabrice Bellard.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avformat.h"
+
+#define Y4M_MAGIC "YUV4MPEG2"
+#define Y4M_FRAME_MAGIC "FRAME"
+#define Y4M_LINE_MAX 256
+
+struct frame_attributes {
+ int interlaced_frame;
+ int top_field_first;
+};
+
+static int yuv4_generate_header(AVFormatContext *s, char* buf)
+{
+ AVStream *st;
+ int width, height;
+ int raten, rated, aspectn, aspectd, n;
+ char inter;
+ const char *colorspace = "";
+
+ st = s->streams[0];
+ width = st->codec->width;
+ height = st->codec->height;
+
+ av_reduce(&raten, &rated, st->codec->time_base.den, st->codec->time_base.num, (1UL<<31)-1);
+
+ aspectn = st->codec->sample_aspect_ratio.num;
+ aspectd = st->codec->sample_aspect_ratio.den;
+
+ if ( aspectn == 0 && aspectd == 1 ) aspectd = 0; // 0:0 means unknown
+
+ inter = 'p'; /* progressive is the default */
+ if (st->codec->coded_frame && st->codec->coded_frame->interlaced_frame) {
+ inter = st->codec->coded_frame->top_field_first ? 't' : 'b';
+ }
+
+ switch(st->codec->pix_fmt) {
+ case PIX_FMT_GRAY8:
+ colorspace = " Cmono";
+ break;
+ case PIX_FMT_YUV411P:
+ colorspace = " C411 XYSCSS=411";
+ break;
+ case PIX_FMT_YUV420P:
+ colorspace = (st->codec->codec_id == CODEC_ID_DVVIDEO)?" C420paldv XYSCSS=420PALDV":" C420mpeg2 XYSCSS=420MPEG2";
+ break;
+ case PIX_FMT_YUV422P:
+ colorspace = " C422 XYSCSS=422";
+ break;
+ case PIX_FMT_YUV444P:
+ colorspace = " C444 XYSCSS=444";
+ break;
+ }
+
+ /* construct stream header, if this is the first frame */
+ n = snprintf(buf, Y4M_LINE_MAX, "%s W%d H%d F%d:%d I%c A%d:%d%s\n",
+ Y4M_MAGIC,
+ width,
+ height,
+ raten, rated,
+ inter,
+ aspectn, aspectd,
+ colorspace);
+
+ return n;
+}
+
+static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AVStream *st = s->streams[pkt->stream_index];
+ ByteIOContext *pb = &s->pb;
+ AVPicture *picture;
+ int* first_pkt = s->priv_data;
+ int width, height, h_chroma_shift, v_chroma_shift;
+ int i, m;
+ char buf2[Y4M_LINE_MAX+1];
+ char buf1[20];
+ uint8_t *ptr, *ptr1, *ptr2;
+
+ picture = (AVPicture *)pkt->data;
+
+ /* for the first packet we have to output the header as well */
+ if (*first_pkt) {
+ *first_pkt = 0;
+ if (yuv4_generate_header(s, buf2) < 0) {
+ av_log(s, AV_LOG_ERROR, "Error. YUV4MPEG stream header write failed.\n");
+ return AVERROR_IO;
+ } else {
+ put_buffer(pb, buf2, strlen(buf2));
+ }
+ }
+
+ /* construct frame header */
+
+ m = snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
+ put_buffer(pb, buf1, strlen(buf1));
+
+ width = st->codec->width;
+ height = st->codec->height;
+
+ ptr = picture->data[0];
+ for(i=0;i<height;i++) {
+ put_buffer(pb, ptr, width);
+ ptr += picture->linesize[0];
+ }
+
+ if (st->codec->pix_fmt != PIX_FMT_GRAY8){
+ // Adjust for smaller Cb and Cr planes
+ avcodec_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift, &v_chroma_shift);
+ width >>= h_chroma_shift;
+ height >>= v_chroma_shift;
+
+ ptr1 = picture->data[1];
+ ptr2 = picture->data[2];
+ for(i=0;i<height;i++) { /* Cb */
+ put_buffer(pb, ptr1, width);
+ ptr1 += picture->linesize[1];
+ }
+ for(i=0;i<height;i++) { /* Cr */
+ put_buffer(pb, ptr2, width);
+ ptr2 += picture->linesize[2];
+ }
+ }
+ put_flush_packet(pb);
+ return 0;
+}
+
+static int yuv4_write_header(AVFormatContext *s)
+{
+ int* first_pkt = s->priv_data;
+
+ if (s->nb_streams != 1)
+ return AVERROR_IO;
+
+ if (s->streams[0]->codec->pix_fmt == PIX_FMT_YUV411P) {
+ av_log(s, AV_LOG_ERROR, "Warning: generating rarely used 4:1:1 YUV stream, some mjpegtools might not work.\n");
+ }
+ else if ((s->streams[0]->codec->pix_fmt != PIX_FMT_YUV420P) &&
+ (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV422P) &&
+ (s->streams[0]->codec->pix_fmt != PIX_FMT_GRAY8) &&
+ (s->streams[0]->codec->pix_fmt != PIX_FMT_YUV444P)) {
+ av_log(s, AV_LOG_ERROR, "ERROR: yuv4mpeg only handles yuv444p, yuv422p, yuv420p, yuv411p and gray pixel formats. Use -pix_fmt to select one.\n");
+ return AVERROR_IO;
+ }
+
+ *first_pkt = 1;
+ return 0;
+}
+
+static int yuv4_write_trailer(AVFormatContext *s)
+{
+ return 0;
+}
+
+#ifdef CONFIG_YUV4MPEGPIPE_MUXER
+AVOutputFormat yuv4mpegpipe_muxer = {
+ "yuv4mpegpipe",
+ "YUV4MPEG pipe format",
+ "",
+ "y4m",
+ sizeof(int),
+ CODEC_ID_NONE,
+ CODEC_ID_RAWVIDEO,
+ yuv4_write_header,
+ yuv4_write_packet,
+ yuv4_write_trailer,
+ .flags = AVFMT_RAWPICTURE,
+};
+#endif
+
+/* Header size increased to allow room for optional flags */
+#define MAX_YUV4_HEADER 80
+#define MAX_FRAME_HEADER 80
+
+static int yuv4_read_header(AVFormatContext *s, AVFormatParameters *ap)
+{
+ char header[MAX_YUV4_HEADER+10]; // Include headroom for the longest option
+ char *tokstart,*tokend,*header_end;
+ int i;
+ ByteIOContext *pb = &s->pb;
+ int width=-1, height=-1, raten=0, rated=0, aspectn=0, aspectd=0;
+ enum PixelFormat pix_fmt=PIX_FMT_NONE,alt_pix_fmt=PIX_FMT_NONE;
+ AVStream *st;
+ struct frame_attributes *s1 = s->priv_data;
+
+ for (i=0; i<MAX_YUV4_HEADER; i++) {
+ header[i] = get_byte(pb);
+ if (header[i] == '\n') {
+ header[i+1] = 0x20; // Add a space after last option. Makes parsing "444" vs "444alpha" easier.
+ header[i+2] = 0;
+ break;
+ }
+ }
+ if (i == MAX_YUV4_HEADER) return -1;
+ if (strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC))) return -1;
+
+ s1->interlaced_frame = 0;
+ s1->top_field_first = 0;
+ header_end = &header[i+1]; // Include space
+ for(tokstart = &header[strlen(Y4M_MAGIC) + 1]; tokstart < header_end; tokstart++) {
+ if (*tokstart==0x20) continue;
+ switch (*tokstart++) {
+ case 'W': // Width. Required.
+ width = strtol(tokstart, &tokend, 10);
+ tokstart=tokend;
+ break;
+ case 'H': // Height. Required.
+ height = strtol(tokstart, &tokend, 10);
+ tokstart=tokend;
+ break;
+ case 'C': // Color space
+ if (strncmp("420jpeg",tokstart,7)==0)
+ pix_fmt = PIX_FMT_YUV420P;
+ else if (strncmp("420mpeg2",tokstart,8)==0)
+ pix_fmt = PIX_FMT_YUV420P;
+ else if (strncmp("420paldv", tokstart, 8)==0)
+ pix_fmt = PIX_FMT_YUV420P;
+ else if (strncmp("411", tokstart, 3)==0)
+ pix_fmt = PIX_FMT_YUV411P;
+ else if (strncmp("422", tokstart, 3)==0)
+ pix_fmt = PIX_FMT_YUV422P;
+ else if (strncmp("444alpha", tokstart, 8)==0) {
+ av_log(s, AV_LOG_ERROR, "Cannot handle 4:4:4:4 YUV4MPEG stream.\n");
+ return -1;
+ } else if (strncmp("444", tokstart, 3)==0)
+ pix_fmt = PIX_FMT_YUV444P;
+ else if (strncmp("mono",tokstart, 4)==0) {
+ pix_fmt = PIX_FMT_GRAY8;
+ } else {
+ av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains an unknown pixel format.\n");
+ return -1;
+ }
+ while(tokstart<header_end&&*tokstart!=0x20) tokstart++;
+ break;
+ case 'I': // Interlace type
+ switch (*tokstart++){
+ case '?':
+ break;
+ case 'p':
+ s1->interlaced_frame=0;
+ break;
+ case 't':
+ s1->interlaced_frame=1;
+ s1->top_field_first=1;
+ break;
+ case 'b':
+ s1->interlaced_frame=1;
+ s1->top_field_first=0;
+ break;
+ case 'm':
+ av_log(s, AV_LOG_ERROR, "YUV4MPEG stream contains mixed interlaced and non-interlaced frames.\n");
+ return -1;
+ default:
+ av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
+ return -1;
+ }
+ break;
+ case 'F': // Frame rate
+ sscanf(tokstart,"%d:%d",&raten,&rated); // 0:0 if unknown
+ while(tokstart<header_end&&*tokstart!=0x20) tokstart++;
+ break;
+ case 'A': // Pixel aspect
+ sscanf(tokstart,"%d:%d",&aspectn,&aspectd); // 0:0 if unknown
+ while(tokstart<header_end&&*tokstart!=0x20) tokstart++;
+ break;
+ case 'X': // Vendor extensions
+ if (strncmp("YSCSS=",tokstart,6)==0) {
+ // Older nonstandard pixel format representation
+ tokstart+=6;
+ if (strncmp("420JPEG",tokstart,7)==0)
+ alt_pix_fmt=PIX_FMT_YUV420P;
+ else if (strncmp("420MPEG2",tokstart,8)==0)
+ alt_pix_fmt=PIX_FMT_YUV420P;
+ else if (strncmp("420PALDV",tokstart,8)==0)
+ alt_pix_fmt=PIX_FMT_YUV420P;
+ else if (strncmp("411",tokstart,3)==0)
+ alt_pix_fmt=PIX_FMT_YUV411P;
+ else if (strncmp("422",tokstart,3)==0)
+ alt_pix_fmt=PIX_FMT_YUV422P;
+ else if (strncmp("444",tokstart,3)==0)
+ alt_pix_fmt=PIX_FMT_YUV444P;
+ }
+ while(tokstart<header_end&&*tokstart!=0x20) tokstart++;
+ break;
+ }
+ }
+
+ if ((width == -1) || (height == -1)) {
+ av_log(s, AV_LOG_ERROR, "YUV4MPEG has invalid header.\n");
+ return -1;
+ }
+
+ if (pix_fmt == PIX_FMT_NONE) {
+ if (alt_pix_fmt == PIX_FMT_NONE)
+ pix_fmt = PIX_FMT_YUV420P;
+ else
+ pix_fmt = alt_pix_fmt;
+ }
+
+ if (raten == 0 && rated == 0) {
+ // Frame rate unknown
+ raten = 25;
+ rated = 1;
+ }
+
+ if (aspectn == 0 && aspectd == 0) {
+ // Pixel aspect unknown
+ aspectd = 1;
+ }
+
+ st = av_new_stream(s, 0);
+ st = s->streams[0];
+ st->codec->width = width;
+ st->codec->height = height;
+ av_reduce(&raten, &rated, raten, rated, (1UL<<31)-1);
+ av_set_pts_info(st, 64, rated, raten);
+ st->codec->pix_fmt = pix_fmt;
+ st->codec->codec_type = CODEC_TYPE_VIDEO;
+ st->codec->codec_id = CODEC_ID_RAWVIDEO;
+ st->codec->sample_aspect_ratio= (AVRational){aspectn, aspectd};
+
+ return 0;
+}
+
+static int yuv4_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int i;
+ char header[MAX_FRAME_HEADER+1];
+ int packet_size, width, height;
+ AVStream *st = s->streams[0];
+ struct frame_attributes *s1 = s->priv_data;
+
+ for (i=0; i<MAX_FRAME_HEADER; i++) {
+ header[i] = get_byte(&s->pb);
+ if (header[i] == '\n') {
+ header[i+1] = 0;
+ break;
+ }
+ }
+ if (i == MAX_FRAME_HEADER) return -1;
+ if (strncmp(header, Y4M_FRAME_MAGIC, strlen(Y4M_FRAME_MAGIC))) return -1;
+
+ width = st->codec->width;
+ height = st->codec->height;
+
+ packet_size = avpicture_get_size(st->codec->pix_fmt, width, height);
+ if (packet_size < 0)
+ return -1;
+
+ if (av_get_packet(&s->pb, pkt, packet_size) != packet_size)
+ return AVERROR_IO;
+
+ if (s->streams[0]->codec->coded_frame) {
+ s->streams[0]->codec->coded_frame->interlaced_frame = s1->interlaced_frame;
+ s->streams[0]->codec->coded_frame->top_field_first = s1->top_field_first;
+ }
+
+ pkt->stream_index = 0;
+ return 0;
+}
+
+static int yuv4_read_close(AVFormatContext *s)
+{
+ return 0;
+}
+
+static int yuv4_probe(AVProbeData *pd)
+{
+ /* check file header */
+ if (pd->buf_size <= sizeof(Y4M_MAGIC))
+ return 0;
+ if (strncmp(pd->buf, Y4M_MAGIC, sizeof(Y4M_MAGIC)-1)==0)
+ return AVPROBE_SCORE_MAX;
+ else
+ return 0;
+}
+
+#ifdef CONFIG_YUV4MPEGPIPE_DEMUXER
+AVInputFormat yuv4mpegpipe_demuxer = {
+ "yuv4mpegpipe",
+ "YUV4MPEG pipe format",
+ sizeof(struct frame_attributes),
+ yuv4_probe,
+ yuv4_read_header,
+ yuv4_read_packet,
+ yuv4_read_close,
+ .extensions = "y4m"
+};
+#endif
diff --git a/contrib/ffmpeg/libavutil/Makefile b/contrib/ffmpeg/libavutil/Makefile
new file mode 100644
index 000000000..ab0db8f9b
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/Makefile
@@ -0,0 +1,29 @@
+#
+# libavutil Makefile
+#
+include ../config.mak
+
+CFLAGS+=-DBUILD_AVUTIL
+
+OBJS= mathematics.o \
+ rational.o \
+ intfloat_readwrite.o \
+ crc.o \
+ md5.o \
+ lls.o \
+ adler32.o \
+ log.o \
+ mem.o \
+ fifo.o \
+ tree.o \
+
+HEADERS = avutil.h common.h mathematics.h integer.h rational.h \
+ intfloat_readwrite.h md5.h adler32.h log.h fifo.h
+
+NAME=avutil
+ifeq ($(BUILD_SHARED),yes)
+LIBVERSION=$(LAVUVERSION)
+LIBMAJOR=$(LAVUMAJOR)
+endif
+
+include ../common.mak
diff --git a/src/libffmpeg/libavutil/adler32.c b/contrib/ffmpeg/libavutil/adler32.c
index e185a77c4..50d57470b 100644
--- a/src/libffmpeg/libavutil/adler32.c
+++ b/contrib/ffmpeg/libavutil/adler32.c
@@ -1,6 +1,24 @@
/* adler32.c -- compute the Adler-32 checksum of a data stream
+ * This is a modified version based on adler32.c from the zlib library.
+ *
* Copyright (C) 1995 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgment in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ *
*/
#include "common.h"
diff --git a/contrib/ffmpeg/libavutil/adler32.h b/contrib/ffmpeg/libavutil/adler32.h
new file mode 100644
index 000000000..f56d416fb
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/adler32.h
@@ -0,0 +1,27 @@
+/*
+ * copyright (c) 2006 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef ADLER32_H
+#define ADLER32_H
+
+unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,
+ unsigned int len);
+
+#endif
diff --git a/contrib/ffmpeg/libavutil/avutil.h b/contrib/ffmpeg/libavutil/avutil.h
new file mode 100644
index 000000000..08cc61567
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/avutil.h
@@ -0,0 +1,137 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_H
+#define AVUTIL_H
+
+/**
+ * @file avutil.h
+ * external api header.
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define LIBAVUTIL_VERSION_INT ((49<<16)+(1<<8)+0)
+#define LIBAVUTIL_VERSION 49.1.0
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+
+#include "common.h"
+#include "mathematics.h"
+#include "rational.h"
+#include "integer.h"
+#include "intfloat_readwrite.h"
+#include "log.h"
+
+/**
+ * Pixel format. Notes:
+ *
+ * PIX_FMT_RGB32 is handled in an endian-specific manner. A RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little endian CPU architectures and ARGB on
+ * big endian CPUs.
+ *
+ * When the pixel format is palettized RGB (PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1] and, is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ */
+enum PixelFormat {
+ PIX_FMT_NONE= -1,
+ PIX_FMT_YUV420P, ///< Planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ PIX_FMT_YUYV422, ///< Packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ PIX_FMT_RGB24, ///< Packed RGB 8:8:8, 24bpp, RGBRGB...
+ PIX_FMT_BGR24, ///< Packed RGB 8:8:8, 24bpp, BGRBGR...
+ PIX_FMT_YUV422P, ///< Planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ PIX_FMT_YUV444P, ///< Planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ PIX_FMT_RGB32, ///< Packed RGB 8:8:8, 32bpp, (msb)8A 8R 8G 8B(lsb), in cpu endianness
+ PIX_FMT_YUV410P, ///< Planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ PIX_FMT_YUV411P, ///< Planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ PIX_FMT_RGB565, ///< Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), in cpu endianness
+ PIX_FMT_RGB555, ///< Packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), in cpu endianness most significant bit to 0
+ PIX_FMT_GRAY8, ///< Y , 8bpp
+ PIX_FMT_MONOWHITE, ///< Y , 1bpp, 1 is white
+ PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black
+ PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
+ PIX_FMT_YUVJ420P, ///< Planar YUV 4:2:0, 12bpp, full scale (jpeg)
+ PIX_FMT_YUVJ422P, ///< Planar YUV 4:2:2, 16bpp, full scale (jpeg)
+ PIX_FMT_YUVJ444P, ///< Planar YUV 4:4:4, 24bpp, full scale (jpeg)
+ PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing(xvmc_render.h)
+ PIX_FMT_XVMC_MPEG2_IDCT,
+ PIX_FMT_UYVY422, ///< Packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ PIX_FMT_UYYVYY411, ///< Packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ PIX_FMT_BGR32, ///< Packed RGB 8:8:8, 32bpp, (msb)8A 8B 8G 8R(lsb), in cpu endianness
+ PIX_FMT_BGR565, ///< Packed RGB 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), in cpu endianness
+ PIX_FMT_BGR555, ///< Packed RGB 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), in cpu endianness most significant bit to 1
+ PIX_FMT_BGR8, ///< Packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ PIX_FMT_BGR4, ///< Packed RGB 1:2:1, 4bpp, (msb)1B 2G 1R(lsb)
+ PIX_FMT_BGR4_BYTE, ///< Packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ PIX_FMT_RGB8, ///< Packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ PIX_FMT_RGB4, ///< Packed RGB 1:2:1, 4bpp, (msb)2R 3G 3B(lsb)
+ PIX_FMT_RGB4_BYTE, ///< Packed RGB 1:2:1, 8bpp, (msb)2R 3G 3B(lsb)
+ PIX_FMT_NV12, ///< Planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 for UV
+ PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ PIX_FMT_RGB32_1, ///< Packed RGB 8:8:8, 32bpp, (msb)8R 8G 8B 8A(lsb), in cpu endianness
+ PIX_FMT_BGR32_1, ///< Packed RGB 8:8:8, 32bpp, (msb)8B 8G 8R 8A(lsb), in cpu endianness
+
+ PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+};
+
+#ifdef WORDS_BIGENDIAN
+#define PIX_FMT_RGBA PIX_FMT_RGB32_1
+#define PIX_FMT_BGRA PIX_FMT_BGR32_1
+#define PIX_FMT_ARGB PIX_FMT_RGB32
+#define PIX_FMT_ABGR PIX_FMT_BGR32
+#define PIX_FMT_GRAY16 PIX_FMT_GRAY16BE
+#else
+#define PIX_FMT_RGBA PIX_FMT_BGR32
+#define PIX_FMT_BGRA PIX_FMT_RGB32
+#define PIX_FMT_ARGB PIX_FMT_BGR32_1
+#define PIX_FMT_ABGR PIX_FMT_RGB32_1
+#define PIX_FMT_GRAY16 PIX_FMT_GRAY16LE
+#endif
+
+#if LIBAVUTIL_VERSION_INT < (50<<16)
+#define PIX_FMT_UYVY411 PIX_FMT_UYYVYY411
+#define PIX_FMT_RGBA32 PIX_FMT_RGB32
+#define PIX_FMT_YUV422 PIX_FMT_YUYV422
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AVUTIL_H */
diff --git a/src/libffmpeg/libavutil/bswap.h b/contrib/ffmpeg/libavutil/bswap.h
index 25d418c69..4614c9045 100644
--- a/src/libffmpeg/libavutil/bswap.h
+++ b/contrib/ffmpeg/libavutil/bswap.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file bswap.h
* byte swap.
@@ -16,7 +36,7 @@
# define LEGACY_REGS "=q"
#endif
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
static always_inline uint16_t bswap_16(uint16_t x)
{
__asm("rorw $8, %0" :
@@ -129,7 +149,7 @@ static inline uint64_t bswap_64(uint64_t x)
return r.ll;
#endif
}
-#endif /* !ARCH_X86 */
+#endif /* defined(ARCH_X86) */
#endif /* !HAVE_BYTESWAP_H */
diff --git a/src/libffmpeg/libavutil/common.h b/contrib/ffmpeg/libavutil/common.h
index b26c821f8..41bbe8f63 100644
--- a/src/libffmpeg/libavutil/common.h
+++ b/contrib/ffmpeg/libavutil/common.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file common.h
* common internal and external api header.
@@ -56,18 +76,15 @@
#endif
#endif
-#ifndef EMULATE_INTTYPES
-# include <inttypes.h>
+#ifndef attribute_deprecated
+#if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ > 0)
+# define attribute_deprecated __attribute__((deprecated))
#else
- typedef signed char int8_t;
- typedef signed short int16_t;
- typedef signed int int32_t;
- typedef unsigned char uint8_t;
- typedef unsigned short uint16_t;
- typedef unsigned int uint32_t;
- typedef signed long long int64_t;
- typedef unsigned long long uint64_t;
-#endif /* EMULATE_INTTYPES */
+# define attribute_deprecated
+#endif
+#endif
+
+# include <inttypes.h>
#ifndef PRId64
#define PRId64 "lld"
@@ -81,6 +98,10 @@
#define PRIx64 "llx"
#endif
+#ifndef PRIX64
+#define PRIX64 "llX"
+#endif
+
#ifndef PRId32
#define PRId32 "d"
#endif
@@ -125,16 +146,6 @@
#define UINT64_MAX uint64_t_C(0xFFFFFFFFFFFFFFFF)
#endif
-#ifdef EMULATE_FAST_INT
-typedef signed char int_fast8_t;
-typedef signed int int_fast16_t;
-typedef signed int int_fast32_t;
-typedef unsigned char uint_fast8_t;
-typedef unsigned int uint_fast16_t;
-typedef unsigned int uint_fast32_t;
-typedef uint64_t uint_fast64_t;
-#endif
-
#ifndef INT_BIT
# if INT_MAX != 2147483647
# define INT_BIT 64
@@ -164,11 +175,14 @@ typedef uint64_t uint_fast64_t;
#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
/* assume b>0 */
#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
-#define ABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
+
/* misc math functions */
extern FF_IMPORT_ATTR const uint8_t ff_log2_tab[256];
@@ -207,7 +221,21 @@ static inline int av_log2_16bit(unsigned int v)
/* median of 3 */
static inline int mid_pred(int a, int b, int c)
{
-#if 0
+#if HAVE_CMOV
+ int i=b;
+ asm volatile(
+ "cmp %2, %1 \n\t"
+ "cmovg %1, %0 \n\t"
+ "cmovg %2, %1 \n\t"
+ "cmp %3, %1 \n\t"
+ "cmovl %3, %1 \n\t"
+ "cmp %1, %0 \n\t"
+ "cmovg %1, %0 \n\t"
+ :"+&r"(i), "+&r"(a)
+ :"r"(b), "r"(c)
+ );
+ return i;
+#elif 0
int t= (a-b)&((a-b)>>31);
a-=t;
b+=t;
@@ -273,7 +301,19 @@ static inline int ff_get_fourcc(const char *s){
#define MKTAG(a,b,c,d) (a | (b << 8) | (c << 16) | (d << 24))
#define MKBETAG(a,b,c,d) (d | (c << 8) | (b << 16) | (a << 24))
-
+/*!
+ * \def GET_UTF8(val, GET_BYTE, ERROR)
+ * converts a utf-8 character (up to 4 bytes long) to its 32-bit ucs-4 encoded form
+ * \param val is the output and should be of type uint32_t. It holds the converted
+ * ucs-4 character and should be a left value.
+ * \param GET_BYTE gets utf-8 encoded bytes from any proper source. It can be
+ * a function or a statement whose return value or evaluated value is of type
+ * uint8_t. It will be executed up to 4 times for values in the valid utf-8 range,
+ * and up to 7 times in the general case.
+ * \param ERROR action that should be taken when an invalid utf-8 byte is returned
+ * from GET_BYTE. It should be a statement that jumps out of the macro,
+ * like exit(), goto, return, break, or continue.
+ */
#define GET_UTF8(val, GET_BYTE, ERROR)\
val= GET_BYTE;\
{\
@@ -289,7 +329,43 @@ static inline int ff_get_fourcc(const char *s){
}\
}
-#if defined(ARCH_X86) || defined(ARCH_X86_64) || defined(ARCH_POWERPC)
+/*!
+ * \def PUT_UTF8(val, tmp, PUT_BYTE)
+ * converts a 32-bit unicode character to its utf-8 encoded form (up to 4 bytes long).
+ * \param val is an input only argument and should be of type uint32_t. It holds
+ * a ucs4 encoded unicode character that is to be converted to utf-8. If
+ * val is given as a function it's executed only once.
+ * \param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * outputted by PUT_BYTE.
+ * \param PUT_BYTE writes the converted utf-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid utf-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE)\
+ {\
+ int bytes, shift;\
+ uint32_t in = val;\
+ if (in < 0x80) {\
+ tmp = in;\
+ PUT_BYTE\
+ } else {\
+ bytes = (av_log2(in) + 4) / 5;\
+ shift = (bytes - 1) * 6;\
+ tmp = (256 - (256 >> bytes)) | (in >> shift);\
+ PUT_BYTE\
+ while (shift >= 6) {\
+ shift -= 6;\
+ tmp = 0x80 | ((in >> shift) & 0x3f);\
+ PUT_BYTE\
+ }\
+ }\
+ }
+
+#if defined(ARCH_X86) || defined(ARCH_POWERPC)
#if defined(ARCH_X86_64)
static inline uint64_t read_time(void)
{
@@ -299,7 +375,7 @@ static inline uint64_t read_time(void)
);
return (d << 32) | (a & 0xffffffff);
}
-#elif defined(ARCH_X86)
+#elif defined(ARCH_X86_32)
static inline long long read_time(void)
{
long long l;
@@ -344,7 +420,7 @@ tend= read_time();\
tcount++;\
}else\
tskip_count++;\
- if(256*256*256*64%(tcount+tskip_count)==0){\
+ if(((tcount+tskip_count)&(tcount+tskip_count-1))==0){\
av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" dezicycles in %s, %d runs, %d skips\n", tsum*10/tcount, id, tcount, tskip_count);\
}\
}
@@ -354,20 +430,20 @@ tend= read_time();\
#endif
/* memory */
+
+#ifdef __GNUC__
+ #define DECLARE_ALIGNED(n,t,v) t v __attribute__ ((aligned (n)))
+#else
+ #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
+#endif
+
+/* memory */
void *av_malloc(unsigned int size);
void *av_realloc(void *ptr, unsigned int size);
void av_free(void *ptr);
-/* xine: inline causes trouble for debug compiling */
-#ifdef DISABLE_INLINE
-# ifdef inline
-# undef inline
-# endif
-# ifdef always_inline
-# undef always_inline
-# endif
-# define inline
-# define always_inline
-#endif
+void *av_mallocz(unsigned int size);
+char *av_strdup(const char *s);
+void av_freep(void *ptr);
#endif /* COMMON_H */
diff --git a/src/libffmpeg/libavutil/crc.c b/contrib/ffmpeg/libavutil/crc.c
index 13be2020d..baa605d32 100644
--- a/src/libffmpeg/libavutil/crc.c
+++ b/contrib/ffmpeg/libavutil/crc.c
@@ -1,3 +1,23 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#include "common.h"
#include "crc.h"
diff --git a/contrib/ffmpeg/libavutil/crc.h b/contrib/ffmpeg/libavutil/crc.h
new file mode 100644
index 000000000..e739c309b
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/crc.h
@@ -0,0 +1,35 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef CRC_H
+#define CRC_H
+
+typedef uint32_t AVCRC;
+
+extern AVCRC *av_crcEDB88320;
+extern AVCRC *av_crc04C11DB7;
+extern AVCRC *av_crc8005 ;
+extern AVCRC *av_crc07 ;
+
+int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);
+uint32_t av_crc(const AVCRC *ctx, uint32_t start_crc, const uint8_t *buffer, size_t length);
+
+#endif /* CRC_H */
+
diff --git a/contrib/ffmpeg/libavutil/fifo.c b/contrib/ffmpeg/libavutil/fifo.c
new file mode 100644
index 000000000..550f13de5
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/fifo.c
@@ -0,0 +1,137 @@
+/*
+ * A very simple circular buffer FIFO implementation
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ * Copyright (c) 2006 Roman Shaposhnik
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "common.h"
+#include "fifo.h"
+
+int av_fifo_init(AVFifoBuffer *f, int size)
+{
+ f->buffer = av_malloc(size);
+ if (!f->buffer)
+ return -1;
+ f->end = f->buffer + size;
+ f->wptr = f->rptr = f->buffer;
+ return 0;
+}
+
+void av_fifo_free(AVFifoBuffer *f)
+{
+ av_free(f->buffer);
+}
+
+int av_fifo_size(AVFifoBuffer *f)
+{
+ int size = f->wptr - f->rptr;
+ if (size < 0)
+ size += f->end - f->buffer;
+ return size;
+}
+
+/**
+ * Get data from the fifo (returns -1 if not enough data).
+ */
+int av_fifo_read(AVFifoBuffer *f, uint8_t *buf, int buf_size)
+{
+ int len;
+ int size = f->wptr - f->rptr;
+ if (size < 0)
+ size += f->end - f->buffer;
+
+ if (size < buf_size)
+ return -1;
+ while (buf_size > 0) {
+ len = FFMIN(f->end - f->rptr, buf_size);
+ memcpy(buf, f->rptr, len);
+ buf += len;
+ f->rptr += len;
+ if (f->rptr >= f->end)
+ f->rptr = f->buffer;
+ buf_size -= len;
+ }
+ return 0;
+}
+
+/**
+ * Resizes a FIFO.
+ */
+void av_fifo_realloc(AVFifoBuffer *f, unsigned int new_size) {
+ unsigned int old_size= f->end - f->buffer;
+
+ if(old_size < new_size){
+ uint8_t *old= f->buffer;
+
+ f->buffer= av_realloc(f->buffer, new_size);
+
+ f->rptr += f->buffer - old;
+ f->wptr += f->buffer - old;
+
+ if(f->wptr < f->rptr){
+ memmove(f->rptr + new_size - old_size, f->rptr, f->buffer + old_size - f->rptr);
+ f->rptr += new_size - old_size;
+ }
+ f->end= f->buffer + new_size;
+ }
+}
+
+void av_fifo_write(AVFifoBuffer *f, const uint8_t *buf, int size)
+{
+ int len;
+
+ while (size > 0) {
+ len = FFMIN(f->end - f->wptr, size);
+ memcpy(f->wptr, buf, len);
+ f->wptr += len;
+ if (f->wptr >= f->end)
+ f->wptr = f->buffer;
+ buf += len;
+ size -= len;
+ }
+}
+
+
+/* get data from the fifo (return -1 if not enough data) */
+int av_fifo_generic_read(AVFifoBuffer *f, int buf_size, void (*func)(void*, void*, int), void* dest)
+{
+ int len;
+ int size = f->wptr - f->rptr;
+ if (size < 0)
+ size += f->end - f->buffer;
+
+ if (size < buf_size)
+ return -1;
+ while (buf_size > 0) {
+ len = FFMIN(f->end - f->rptr, buf_size);
+ func(dest, f->rptr, len);
+ f->rptr += len;
+ if (f->rptr >= f->end)
+ f->rptr = f->buffer;
+ buf_size -= len;
+ }
+ return 0;
+}
+
+/* discard data from the fifo */
+void av_fifo_drain(AVFifoBuffer *f, int size)
+{
+ f->rptr += size;
+ if (f->rptr >= f->end)
+ f->rptr -= f->end - f->buffer;
+}
diff --git a/contrib/ffmpeg/libavutil/fifo.h b/contrib/ffmpeg/libavutil/fifo.h
new file mode 100644
index 000000000..9dec0e62d
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/fifo.h
@@ -0,0 +1,25 @@
+#ifndef FIFO_H
+#define FIFO_H
+
+typedef struct AVFifoBuffer {
+ uint8_t *buffer;
+ uint8_t *rptr, *wptr, *end;
+} AVFifoBuffer;
+
+int av_fifo_init(AVFifoBuffer *f, int size);
+void av_fifo_free(AVFifoBuffer *f);
+int av_fifo_size(AVFifoBuffer *f);
+int av_fifo_read(AVFifoBuffer *f, uint8_t *buf, int buf_size);
+int av_fifo_generic_read(AVFifoBuffer *f, int buf_size, void (*func)(void*, void*, int), void* dest);
+void av_fifo_write(AVFifoBuffer *f, const uint8_t *buf, int size);
+void av_fifo_realloc(AVFifoBuffer *f, unsigned int size);
+void av_fifo_drain(AVFifoBuffer *f, int size);
+
+static inline uint8_t av_fifo_peek(AVFifoBuffer *f, int offs)
+{
+ uint8_t *ptr = f->rptr + offs;
+ if (ptr >= f->end)
+ ptr -= f->end - f->buffer;
+ return *ptr;
+}
+#endif /* FIFO_H */
diff --git a/src/libffmpeg/libavutil/integer.c b/contrib/ffmpeg/libavutil/integer.c
index 1820dbf59..09cd756e2 100644
--- a/src/libffmpeg/libavutil/integer.c
+++ b/contrib/ffmpeg/libavutil/integer.c
@@ -2,18 +2,20 @@
* arbitrary precision integers
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -124,8 +126,8 @@ AVInteger av_mod_i(AVInteger *quot, AVInteger a, AVInteger b){
AVInteger quot_temp;
if(!quot) quot = &quot_temp;
- assert((int16_t)a.v[AV_INTEGER_SIZE-1] >= 0 && (int16_t)b.v[AV_INTEGER_SIZE-1] >= 0);
- assert(av_log2_i(b)>=0);
+ assert((int16_t)a[AV_INTEGER_SIZE-1] >= 0 && (int16_t)b[AV_INTEGER_SIZE-1] >= 0);
+ assert(av_log2(b)>=0);
if(i > 0)
b= av_shr_i(b, -i);
diff --git a/src/libffmpeg/libavutil/integer.h b/contrib/ffmpeg/libavutil/integer.h
index 523752912..a50ad9bae 100644
--- a/src/libffmpeg/libavutil/integer.h
+++ b/contrib/ffmpeg/libavutil/integer.h
@@ -2,18 +2,20 @@
* arbitrary precision integers
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/src/libffmpeg/libavutil/internal.h b/contrib/ffmpeg/libavutil/internal.h
index 266976c94..4cb0d2a7e 100644
--- a/src/libffmpeg/libavutil/internal.h
+++ b/contrib/ffmpeg/libavutil/internal.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
/**
* @file internal.h
* common internal api header.
@@ -10,32 +30,17 @@
# define PIC
#endif
-# ifndef ENODATA
-# define ENODATA 61
-# endif
+#ifndef ENODATA
+# define ENODATA 61
+#endif
#include "bswap.h"
#include <stddef.h>
#ifndef offsetof
-# define offsetof(T,F) ((unsigned int)((char *)&((T *)0)->F))
+# define offsetof(T,F) ((unsigned int)((char *)&((T *)0)->F))
#endif
-#define AVOPTION_CODEC_BOOL(name, help, field) \
- { name, help, offsetof(AVCodecContext, field), FF_OPT_TYPE_BOOL }
-#define AVOPTION_CODEC_DOUBLE(name, help, field, minv, maxv, defval) \
- { name, help, offsetof(AVCodecContext, field), FF_OPT_TYPE_DOUBLE, minv, maxv, defval }
-#define AVOPTION_CODEC_FLAG(name, help, field, flag, defval) \
- { name, help, offsetof(AVCodecContext, field), FF_OPT_TYPE_FLAG, flag, 0, defval }
-#define AVOPTION_CODEC_INT(name, help, field, minv, maxv, defval) \
- { name, help, offsetof(AVCodecContext, field), FF_OPT_TYPE_INT, minv, maxv, defval }
-#define AVOPTION_CODEC_STRING(name, help, field, str, val) \
- { name, help, offsetof(AVCodecContext, field), FF_OPT_TYPE_STRING, .defval = val, .defstr = str }
-#define AVOPTION_CODEC_RCOVERRIDE(name, help, field) \
- { name, help, offsetof(AVCodecContext, field), FF_OPT_TYPE_RCOVERRIDE, .defval = 0, .defstr = NULL }
-#define AVOPTION_SUB(ptr) { .name = NULL, .help = (const char*)ptr }
-#define AVOPTION_END() AVOPTION_SUB(NULL)
-
#ifdef __MINGW32__
# ifdef _DEBUG
# define DEBUG
@@ -46,82 +51,87 @@
# ifdef CONFIG_WINCE
# define perror(a)
+# define abort()
# endif
/* __MINGW32__ end */
#elif defined (CONFIG_OS2)
/* OS/2 EMX */
-#include <float.h>
+# include <float.h>
#endif /* !__MINGW32__ && CONFIG_OS2 */
-# ifdef USE_FASTMEMCPY
-# include "fastmemcpy.h"
-# endif
+#ifdef USE_FASTMEMCPY
+# include "libvo/fastmemcpy.h"
+#endif
// Use rip-relative addressing if compiling PIC code on x86-64.
-# if defined(__MINGW32__) || defined(__CYGWIN__) || \
- defined(__OS2__) || (defined (__OpenBSD__) && !defined(__ELF__))
-# if defined(ARCH_X86_64) && defined(PIC)
-# define MANGLE(a) "_" #a"(%%rip)"
-# else
-# define MANGLE(a) "_" #a
-# endif
+#if defined(__MINGW32__) || defined(__CYGWIN__) || \
+ defined(__OS2__) || (defined (__OpenBSD__) && !defined(__ELF__))
+# if defined(ARCH_X86_64) && defined(PIC)
+# define MANGLE(a) "_" #a"(%%rip)"
# else
-# if defined(ARCH_X86_64) && defined(PIC)
-# define MANGLE(a) #a"(%%rip)"
-# elif defined(CONFIG_DARWIN)
-# define MANGLE(a) "_" #a
-# else
-# define MANGLE(a) #a
-# endif
+# define MANGLE(a) "_" #a
# endif
+#else
+# if defined(ARCH_X86_64) && defined(PIC)
+# define MANGLE(a) #a"(%%rip)"
+# elif defined(CONFIG_DARWIN)
+# define MANGLE(a) "_" #a
+# else
+# define MANGLE(a) #a
+# endif
+#endif
/* debug stuff */
-# if !defined(DEBUG) && !defined(NDEBUG)
-# define NDEBUG
-# endif
-# include <assert.h>
+#if !defined(DEBUG) && !defined(NDEBUG)
+# define NDEBUG
+#endif
+#include <assert.h>
/* dprintf macros */
-# ifdef DEBUG
-# ifdef __GNUC__
-# define dprintf(fmt,args...) av_log(NULL, AV_LOG_DEBUG, fmt, ##args)
-# else
-# define dprintf(fmt,...) av_log(NULL, AV_LOG_DEBUG, fmt, __VA_ARGS__)
-# endif
-# else
-# define dprintf(fmt,...)
-# endif
+#ifdef DEBUG
+# define dprintf(fmt,...) av_log(NULL, AV_LOG_DEBUG, fmt, __VA_ARGS__)
+#else
+# define dprintf(fmt,...)
+#endif
-# ifdef CONFIG_WINCE
-# define abort()
-# endif
+#define av_abort() do { av_log(NULL, AV_LOG_ERROR, "Abort at %s:%d\n", __FILE__, __LINE__); abort(); } while (0)
-# define av_abort() do { av_log(NULL, AV_LOG_ERROR, "Abort at %s:%d\n", __FILE__, __LINE__); abort(); } while (0)
+/* math */
-extern const uint32_t inverse[256];
+extern const uint32_t ff_inverse[256];
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
# define FASTDIV(a,b) \
({\
int ret,dmy;\
asm volatile(\
"mull %3"\
:"=d"(ret),"=a"(dmy)\
- :"1"(a),"g"(inverse[b])\
+ :"1"(a),"g"(ff_inverse[b])\
+ );\
+ ret;\
+ })
+#elif defined(ARCH_ARMV4L)
+# define FASTDIV(a,b) \
+ ({\
+ int ret,dmy;\
+ asm volatile(\
+ "umull %1, %0, %2, %3"\
+ :"=&r"(ret),"=&r"(dmy)\
+ :"r"(a),"r"(ff_inverse[b])\
);\
ret;\
})
#elif defined(CONFIG_FASTDIV)
-# define FASTDIV(a,b) ((uint32_t)((((uint64_t)a)*inverse[b])>>32))
+# define FASTDIV(a,b) ((uint32_t)((((uint64_t)a)*ff_inverse[b])>>32))
#else
# define FASTDIV(a,b) ((a)/(b))
#endif
-/* math */
extern FF_IMPORT_ATTR const uint8_t ff_sqrt_tab[128];
static inline int ff_sqrt(int a)
@@ -142,7 +152,7 @@ static inline int ff_sqrt(int a)
return ret;
}
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
#define MASK_ABS(mask, level)\
asm volatile(\
"cdq \n\t"\
@@ -156,7 +166,7 @@ static inline int ff_sqrt(int a)
level= (level^mask)-mask;
#endif
-#if __CPU__ >= 686 && !defined(RUNTIME_CPUDETECT)
+#ifdef HAVE_CMOV
#define COPY3_IF_LT(x,y,a,b,c,d)\
asm volatile (\
"cmpl %0, %3 \n\t"\
@@ -205,7 +215,7 @@ if((y)<(x)){\
static always_inline long int lrintf(float x)
{
#ifdef __MINGW32__
-# ifdef ARCH_X86
+# ifdef ARCH_X86_32
int32_t i;
asm volatile(
"fistpl %0\n\t"
@@ -215,7 +225,7 @@ static always_inline long int lrintf(float x)
# else
/* XXX: incorrect, but make it compile */
return (int)(x + (x < 0 ? -0.5 : 0.5));
-# endif /* ARCH_X86 */
+# endif /* ARCH_X86_32 */
#else
return (int)(rint(x));
#endif /* __MINGW32__ */
diff --git a/contrib/ffmpeg/libavutil/intfloat_readwrite.c b/contrib/ffmpeg/libavutil/intfloat_readwrite.c
new file mode 100644
index 000000000..261cf76c3
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/intfloat_readwrite.c
@@ -0,0 +1,97 @@
+/*
+ * portable IEEE float/double read/write functions
+ *
+ * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file intfloat_readwrite.c
+ * Portable IEEE float/double read/write functions.
+ */
+
+#include "common.h"
+#include "intfloat_readwrite.h"
+
+double av_int2dbl(int64_t v){
+ if(v+v > 0xFFEULL<<52)
+ return 0.0/0.0;
+ return ldexp(((v&((1LL<<52)-1)) + (1LL<<52)) * (v>>63|1), (v>>52&0x7FF)-1075);
+}
+
+float av_int2flt(int32_t v){
+ if(v+v > 0xFF000000U)
+ return 0.0/0.0;
+ return ldexp(((v&0x7FFFFF) + (1<<23)) * (v>>31|1), (v>>23&0xFF)-150);
+}
+
+double av_ext2dbl(const AVExtFloat ext){
+ uint64_t m = 0;
+ int e, i;
+
+ for (i = 0; i < 8; i++)
+ m = (m<<8) + ext.mantissa[i];
+ e = (((int)ext.exponent[0]&0x7f)<<8) | ext.exponent[1];
+ if (e == 0x7fff && m)
+ return 0.0/0.0;
+ e -= 16383 + 63; /* In IEEE 80 bits, the whole (i.e. 1.xxxx)
+ * mantissa bit is written as opposed to the
+ * single and double precision formats */
+ if (ext.exponent[0]&0x80)
+ m= -m;
+ return ldexp(m, e);
+}
+
+int64_t av_dbl2int(double d){
+ int e;
+ if ( !d) return 0;
+ else if(d-d) return 0x7FF0000000000000LL + ((int64_t)(d<0)<<63) + (d!=d);
+ d= frexp(d, &e);
+ return (int64_t)(d<0)<<63 | (e+1022LL)<<52 | (int64_t)((fabs(d)-0.5)*(1LL<<53));
+}
+
+int32_t av_flt2int(float d){
+ int e;
+ if ( !d) return 0;
+ else if(d-d) return 0x7F800000 + ((d<0)<<31) + (d!=d);
+ d= frexp(d, &e);
+ return (d<0)<<31 | (e+126)<<23 | (int64_t)((fabs(d)-0.5)*(1<<24));
+}
+
+AVExtFloat av_dbl2ext(double d){
+ struct AVExtFloat ext= {{0}};
+ int e, i; double f; uint64_t m;
+
+ f = fabs(frexp(d, &e));
+ if (f >= 0.5 && f < 1) {
+ e += 16382;
+ ext.exponent[0] = e>>8;
+ ext.exponent[1] = e;
+ m = (uint64_t)ldexp(f, 64);
+ for (i=0; i < 8; i++)
+ ext.mantissa[i] = m>>(56-(i<<3));
+ } else if (f != 0.0) {
+ ext.exponent[0] = 0x7f; ext.exponent[1] = 0xff;
+ if (f != 1/0.0)
+ ext.mantissa[0] = ~0;
+ }
+ if (d < 0)
+ ext.exponent[0] |= 0x80;
+ return ext;
+}
+
diff --git a/contrib/ffmpeg/libavutil/intfloat_readwrite.h b/contrib/ffmpeg/libavutil/intfloat_readwrite.h
new file mode 100644
index 000000000..c535b64c8
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/intfloat_readwrite.h
@@ -0,0 +1,39 @@
+/*
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef INTFLOAT_READWRITE_H
+#define INTFLOAT_READWRITE_H
+
+#include "common.h"
+
+/* IEEE 80 bits extended float */
+typedef struct AVExtFloat {
+ uint8_t exponent[2];
+ uint8_t mantissa[8];
+} AVExtFloat;
+
+double av_int2dbl(int64_t v);
+float av_int2flt(int32_t v);
+double av_ext2dbl(const AVExtFloat ext);
+int64_t av_dbl2int(double d);
+int32_t av_flt2int(float d);
+AVExtFloat av_dbl2ext(double d);
+
+#endif /* INTFLOAT_READWRITE_H */
diff --git a/src/libffmpeg/libavutil/lls.c b/contrib/ffmpeg/libavutil/lls.c
index 6bf4d9278..aa9467dce 100644
--- a/src/libffmpeg/libavutil/lls.c
+++ b/contrib/ffmpeg/libavutil/lls.c
@@ -3,18 +3,20 @@
*
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavutil/lls.h b/contrib/ffmpeg/libavutil/lls.h
index 944fba75d..59ad2e958 100644
--- a/src/libffmpeg/libavutil/lls.h
+++ b/contrib/ffmpeg/libavutil/lls.h
@@ -3,18 +3,20 @@
*
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavutil/log.c b/contrib/ffmpeg/libavutil/log.c
index f43593ad9..8b2dc6f6d 100644
--- a/src/libffmpeg/libavutil/log.c
+++ b/contrib/ffmpeg/libavutil/log.c
@@ -2,18 +2,20 @@
* log functions
* Copyright (c) 2003 Michel Bardiaux
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/src/libffmpeg/libavutil/log.h b/contrib/ffmpeg/libavutil/log.h
index 13366064e..0ff1f9fcf 100644
--- a/src/libffmpeg/libavutil/log.h
+++ b/contrib/ffmpeg/libavutil/log.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#ifndef LOG_H
#define LOG_H
@@ -13,7 +33,7 @@ struct AVCLASS {
or AVFormatContext, which begin with an AVClass.
Needed because av_log is in libavcodec and has no visibility
of AVIn/OutputFormat */
- struct AVOption *option;
+ const struct AVOption *option;
};
/* av_log API */
diff --git a/src/libffmpeg/libavutil/mathematics.c b/contrib/ffmpeg/libavutil/mathematics.c
index 951324e99..4be027d9d 100644
--- a/src/libffmpeg/libavutil/mathematics.c
+++ b/contrib/ffmpeg/libavutil/mathematics.c
@@ -1,18 +1,20 @@
/*
* Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -129,7 +131,7 @@ main(){
if((double)a * (double)b / (double)c > (1LL<<63))
continue;
- if(d!=e) printf("%Ld*%Ld/%Ld= %Ld=%Ld\n", a, b, c, d, e);
+ if(d!=e) printf("%"PRId64"*%"PRId64"/%"PRId64"= %"PRId64"=%"PRId64"\n", a, b, c, d, e);
}
}
}
diff --git a/src/libffmpeg/libavutil/mathematics.h b/contrib/ffmpeg/libavutil/mathematics.h
index 0cf726cbe..0b74b254b 100644
--- a/src/libffmpeg/libavutil/mathematics.h
+++ b/contrib/ffmpeg/libavutil/mathematics.h
@@ -1,3 +1,23 @@
+/*
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
#ifndef MATHEMATICS_H
#define MATHEMATICS_H
diff --git a/src/libffmpeg/libavutil/md5.c b/contrib/ffmpeg/libavutil/md5.c
index 32eca3a8e..d33ad1483 100644
--- a/src/libffmpeg/libavutil/md5.c
+++ b/contrib/ffmpeg/libavutil/md5.c
@@ -2,18 +2,20 @@
* Copyright (C) 2006 Michael Niedermayer (michaelni@gmx.at)
* Copyright (C) 2003-2005 by Christopher R. Hertel (crh@ubiqx.mn.org)
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* References:
@@ -174,11 +176,11 @@ main(){
uint8_t in[1000];
for(i=0; i<1000; i++) in[i]= i*i;
- av_md5_sum( (uint8_t*)&md5val, in, 1000); printf("%lld\n", md5val);
- av_md5_sum( (uint8_t*)&md5val, in, 63); printf("%lld\n", md5val);
- av_md5_sum( (uint8_t*)&md5val, in, 64); printf("%lld\n", md5val);
- av_md5_sum( (uint8_t*)&md5val, in, 65); printf("%lld\n", md5val);
+ av_md5_sum( (uint8_t*)&md5val, in, 1000); printf("%"PRId64"\n", md5val);
+ av_md5_sum( (uint8_t*)&md5val, in, 63); printf("%"PRId64"\n", md5val);
+ av_md5_sum( (uint8_t*)&md5val, in, 64); printf("%"PRId64"\n", md5val);
+ av_md5_sum( (uint8_t*)&md5val, in, 65); printf("%"PRId64"\n", md5val);
for(i=0; i<1000; i++) in[i]= i % 127;
- av_md5_sum( (uint8_t*)&md5val, in, 999); printf("%lld\n", md5val);
+ av_md5_sum( (uint8_t*)&md5val, in, 999); printf("%"PRId64"\n", md5val);
}
#endif
diff --git a/contrib/ffmpeg/libavutil/md5.h b/contrib/ffmpeg/libavutil/md5.h
new file mode 100644
index 000000000..8d1b4b5fe
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/md5.h
@@ -0,0 +1,34 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef MD5_H
+#define MD5_H
+
+extern const int av_md5_size;
+
+struct AVMD5;
+
+void av_md5_init(struct AVMD5 *ctx);
+void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, const int len);
+void av_md5_final(struct AVMD5 *ctx, uint8_t *dst);
+void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);
+
+#endif /* MD5_H */
+
diff --git a/src/libffmpeg/libavcodec/mem.c b/contrib/ffmpeg/libavutil/mem.c
index 24d75e948..f43fb5420 100644
--- a/src/libffmpeg/libavcodec/mem.c
+++ b/contrib/ffmpeg/libavutil/mem.c
@@ -1,28 +1,30 @@
/*
- * default memory allocator for libavcodec
+ * default memory allocator for libavutil
* Copyright (c) 2002 Fabrice Bellard.
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file mem.c
- * default memory allocator for libavcodec.
+ * default memory allocator for libavutil.
*/
-#include "avcodec.h"
+#include "common.h"
/* here we can use OS dependant allocation functions */
#undef malloc
@@ -45,16 +47,18 @@
void *av_malloc(unsigned int size)
{
void *ptr;
-#ifdef MEMALIGN_HACK
+#ifdef CONFIG_MEMALIGN_HACK
long diff;
#endif
- /* lets disallow possible ambiguous cases */
- if(size > INT_MAX)
+ /* let's disallow possible ambiguous cases */
+ if(size > (INT_MAX-16) )
return NULL;
-#ifdef MEMALIGN_HACK
- ptr = malloc(size+16+1);
+#ifdef CONFIG_MEMALIGN_HACK
+ ptr = malloc(size+16);
+ if(!ptr)
+ return ptr;
diff= ((-(long)ptr - 1)&15) + 1;
ptr += diff;
((char*)ptr)[-1]= diff;
@@ -99,16 +103,16 @@ void *av_malloc(unsigned int size)
*/
void *av_realloc(void *ptr, unsigned int size)
{
-#ifdef MEMALIGN_HACK
+#ifdef CONFIG_MEMALIGN_HACK
int diff;
#endif
- /* lets disallow possible ambiguous cases */
- if(size > INT_MAX)
+ /* let's disallow possible ambiguous cases */
+ if(size > (INT_MAX-16) )
return NULL;
-#ifdef MEMALIGN_HACK
- //FIXME this isnt aligned correctly though it probably isnt needed
+#ifdef CONFIG_MEMALIGN_HACK
+ //FIXME this isn't aligned correctly, though it probably isn't needed
if(!ptr) return av_malloc(size);
diff= ((char*)ptr)[-1];
return realloc(ptr - diff, size + diff) + diff;
@@ -120,16 +124,48 @@ void *av_realloc(void *ptr, unsigned int size)
/**
* Free memory which has been allocated with av_malloc(z)() or av_realloc().
* NOTE: ptr = NULL is explicetly allowed
- * Note2: it is recommanded that you use av_freep() instead
+ * Note2: it is recommended that you use av_freep() instead
*/
void av_free(void *ptr)
{
/* XXX: this test should not be needed on most libcs */
if (ptr)
-#ifdef MEMALIGN_HACK
+#ifdef CONFIG_MEMALIGN_HACK
free(ptr - ((char*)ptr)[-1]);
#else
free(ptr);
#endif
}
+/**
+ * Frees memory and sets the pointer to NULL.
+ * @param arg pointer to the pointer which should be freed
+ */
+void av_freep(void *arg)
+{
+ void **ptr= (void**)arg;
+ av_free(*ptr);
+ *ptr = NULL;
+}
+
+void *av_mallocz(unsigned int size)
+{
+ void *ptr;
+
+ ptr = av_malloc(size);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+char *av_strdup(const char *s)
+{
+ char *ptr;
+ int len;
+ len = strlen(s) + 1;
+ ptr = av_malloc(len);
+ if (ptr)
+ memcpy(ptr, s, len);
+ return ptr;
+}
+
diff --git a/src/libffmpeg/libavutil/rational.c b/contrib/ffmpeg/libavutil/rational.c
index 4a7b0edf7..0e018c41b 100644
--- a/src/libffmpeg/libavutil/rational.c
+++ b/contrib/ffmpeg/libavutil/rational.c
@@ -2,18 +2,20 @@
* Rational numbers
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
@@ -34,22 +36,29 @@
int av_reduce(int *dst_nom, int *dst_den, int64_t nom, int64_t den, int64_t max){
AVRational a0={0,1}, a1={1,0};
int sign= (nom<0) ^ (den<0);
- int64_t gcd= ff_gcd(ABS(nom), ABS(den));
+ int64_t gcd= ff_gcd(FFABS(nom), FFABS(den));
- nom = ABS(nom)/gcd;
- den = ABS(den)/gcd;
+ nom = FFABS(nom)/gcd;
+ den = FFABS(den)/gcd;
if(nom<=max && den<=max){
a1= (AVRational){nom, den};
den=0;
}
while(den){
- int64_t x = nom / den;
+ uint64_t x = nom / den;
int64_t next_den= nom - den*x;
int64_t a2n= x*a1.num + a0.num;
int64_t a2d= x*a1.den + a0.den;
- if(a2n > max || a2d > max) break;
+ if(a2n > max || a2d > max){
+ if(a1.num) x= (max - a0.num) / a1.num;
+ if(a1.den) x= FFMIN(x, (max - a0.den) / a1.den);
+
+ if (den*(2*x*a1.den + a0.den) > nom*a1.den)
+ a1 = (AVRational){x*a1.num + a0.num, x*a1.den + a0.den};
+ break;
+ }
a0= a1;
a1= (AVRational){a2n, a2d};
diff --git a/src/libffmpeg/libavutil/rational.h b/contrib/ffmpeg/libavutil/rational.h
index 0fbe0d29d..43fc22114 100644
--- a/src/libffmpeg/libavutil/rational.h
+++ b/contrib/ffmpeg/libavutil/rational.h
@@ -2,18 +2,20 @@
* Rational numbers
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * This library is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * version 2.1 of the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
diff --git a/contrib/ffmpeg/libavutil/softfloat.c b/contrib/ffmpeg/libavutil/softfloat.c
new file mode 100644
index 000000000..f12fd17a0
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/softfloat.c
@@ -0,0 +1,72 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <assert.h>
+#include "softfloat.h"
+#include "common.h"
+#include "log.h"
+
+#undef printf
+
+int main(){
+ SoftFloat one= av_int2sf(1, 0);
+ SoftFloat sf1, sf2;
+ double d1, d2;
+ int i, j;
+av_log_level = AV_LOG_DEBUG;
+
+ d1= 1;
+ for(i= 0; i<10; i++){
+ d1= 1/(d1+1);
+ }
+ printf("test1 double=%d\n", (int)(d1 * (1<<24)));
+
+ sf1= one;
+ for(i= 0; i<10; i++){
+ sf1= av_div_sf(one, av_normalize_sf(av_add_sf(one, sf1)));
+ }
+ printf("test1 sf =%d\n", av_sf2int(sf1, 24));
+
+
+ for(i= 0; i<100; i++){
+ START_TIMER
+ d1= i;
+ d2= i/100.0;
+ for(j= 0; j<1000; j++){
+ d1= (d1+1)*d2;
+ }
+ STOP_TIMER("float add mul")
+ }
+ printf("test2 double=%d\n", (int)(d1 * (1<<24)));
+
+ for(i= 0; i<100; i++){
+ START_TIMER
+ sf1= av_int2sf(i, 0);
+ sf2= av_div_sf(av_int2sf(i, 2), av_int2sf(200, 3));
+ for(j= 0; j<1000; j++){
+ sf1= av_mul_sf(av_add_sf(sf1, one),sf2);
+ }
+ STOP_TIMER("softfloat add mul")
+ }
+ printf("test2 sf =%d (%d %d)\n", av_sf2int(sf1, 24), sf1.exp, sf1.mant);
+ return 0;
+}
diff --git a/contrib/ffmpeg/libavutil/softfloat.h b/contrib/ffmpeg/libavutil/softfloat.h
new file mode 100644
index 000000000..5bb2c1cbc
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/softfloat.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#define MIN_EXP -126
+#define MAX_EXP 126
+#define ONE_BITS 29
+
+typedef struct SoftFloat{
+ int32_t exp;
+ int32_t mant;
+}SoftFloat;
+
+static SoftFloat av_normalize_sf(SoftFloat a){
+ if(a.mant){
+#if 1
+ while((a.mant + 0x20000000U)<0x40000000U){
+ a.mant += a.mant;
+ a.exp -= 1;
+ }
+#else
+ int s=ONE_BITS + 1 - av_log2(a.mant ^ (a.mant<<1));
+ a.exp -= s;
+ a.mant <<= s;
+#endif
+ if(a.exp < MIN_EXP){
+ a.exp = MIN_EXP;
+ a.mant= 0;
+ }
+ }else{
+ a.exp= MIN_EXP;
+ }
+ return a;
+}
+
+static inline SoftFloat av_normalize1_sf(SoftFloat a){
+#if 1
+ if(a.mant + 0x40000000 < 0){
+ a.exp++;
+ a.mant>>=1;
+ }
+ return a;
+#elif 1
+ int t= a.mant + 0x40000000 < 0;
+ return (SoftFloat){a.exp+t, a.mant>>t};
+#else
+ int t= (a.mant + 0x40000000U)>>31;
+ return (SoftFloat){a.exp+t, a.mant>>t};
+#endif
+}
+
+/**
+ *
+ * @return will not be more denormalized then a+b, so if either input is
+ * normalized then the output wont be worse then the other input
+ * if both are normalized then the output will be normalized
+ */
+static inline SoftFloat av_mul_sf(SoftFloat a, SoftFloat b){
+ a.exp += b.exp;
+ a.mant = (a.mant * (int64_t)b.mant) >> ONE_BITS;
+ return av_normalize1_sf(a);
+}
+
+/**
+ *
+ * b has to be normalized and not zero
+ * @return will not be more denormalized then a
+ */
+static SoftFloat av_div_sf(SoftFloat a, SoftFloat b){
+ a.exp -= b.exp+1;
+ a.mant = ((int64_t)a.mant<<(ONE_BITS+1)) / b.mant;
+ return av_normalize1_sf(a);
+}
+
+static inline int av_cmp_sf(SoftFloat a, SoftFloat b){
+ int t= a.exp - b.exp;
+ if(t<0) return (a.mant >> (-t)) - b.mant ;
+ else return a.mant - (b.mant >> t);
+}
+
+static inline SoftFloat av_add_sf(SoftFloat a, SoftFloat b){
+ int t= a.exp - b.exp;
+ if(t<0) return av_normalize1_sf((SoftFloat){b.exp, b.mant + (a.mant >> (-t))});
+ else return av_normalize1_sf((SoftFloat){a.exp, a.mant + (b.mant >> t )});
+}
+
+static inline SoftFloat av_sub_sf(SoftFloat a, SoftFloat b){
+ return av_add_sf(a, (SoftFloat){b.exp, -b.mant});
+}
+
+//FIXME sqrt, log, exp, pow, sin, cos
+
+static inline SoftFloat av_int2sf(int v, int frac_bits){
+ return av_normalize_sf((SoftFloat){ONE_BITS-frac_bits, v});
+}
+
+/**
+ *
+ * rounding is to -inf
+ */
+static inline int av_sf2int(SoftFloat v, int frac_bits){
+ v.exp += frac_bits - ONE_BITS;
+ if(v.exp >= 0) return v.mant << v.exp ;
+ else return v.mant >>(-v.exp);
+}
diff --git a/contrib/ffmpeg/libavutil/tree.c b/contrib/ffmpeg/libavutil/tree.c
new file mode 100644
index 000000000..c929e4819
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/tree.c
@@ -0,0 +1,151 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "common.h"
+#include "log.h"
+#include "tree.h"
+
+typedef struct AVTreeNode{
+ struct AVTreeNode *child[2];
+ void *elem;
+ int state;
+}AVTreeNode;
+
+void *av_tree_find(const AVTreeNode *t, void *key, int (*cmp)(void *key, const void *b), void *next[2]){
+ if(t){
+ unsigned int v= cmp(t->elem, key);
+ if(v){
+ if(next) next[(v>>31)^1]= t->elem;
+ return av_tree_find(t->child[v>>31], key, cmp, next);
+ }else{
+ return t->elem;
+ }
+ }
+ return NULL;
+}
+
+void *av_tree_insert(AVTreeNode **tp, void *key, int (*cmp)(void *key, const void *b)){
+ AVTreeNode *t= *tp;
+ if(t){
+ unsigned int v= cmp(t->elem, key);
+ if(v){
+ int i= v>>31;
+ AVTreeNode **child= &t->child[i];
+ void *ret= av_tree_insert(child, key, cmp);
+ if(!ret){
+ t->state -= ((int)v>>31)|1;
+ if(!(t->state&1)){
+ if(t->state){
+ if((*child)->state*2 == t->state){
+ *tp= *child;
+ *child= (*child)->child[i^1];
+ (*tp)->child[i^1]= t;
+ t->state= 0;
+ }else{
+ *tp= (*child)->child[i^1];
+ (*child)->child[i^1]= (*tp)->child[i];
+ (*tp)->child[i]= *child;
+ *child= (*tp)->child[i^1];
+ (*tp)->child[i^1]= t;
+
+ i= (*tp)->state > 0;
+ (*tp)->child[i ]->state= 0;
+ (*tp)->child[i^1]->state= -(*tp)->state;
+ }
+ (*tp)->state=0;
+ }
+ return key;
+ }
+ }
+ return ret;
+ }else{
+ return t->elem;
+ }
+ }else{
+ *tp= av_mallocz(sizeof(AVTreeNode));
+ (*tp)->elem= key;
+ return NULL;
+ }
+}
+
+void av_tree_destroy(AVTreeNode *t){
+ av_tree_destroy(t->child[0]);
+ av_tree_destroy(t->child[1]);
+ av_free(t);
+}
+
+#if 0
+void av_tree_enumerate(AVTreeNode *t, void *opaque, int (*f)(void *opaque, void *elem)){
+ int v= f(opaque, t->elem);
+ if(v>=0) av_tree_enumerate(t->child[0], opaque, f);
+ if(v<=0) av_tree_enumerate(t->child[1], opaque, f);
+}
+#endif
+
+#ifdef TEST
+
+static int check(AVTreeNode *t){
+ if(t){
+ int left= check(t->child[0]);
+ int right= check(t->child[1]);
+
+ if(left>999 || right>999)
+ return 1000;
+ if(right - left != t->state)
+ return 1000;
+ if(t->state>1 || t->state<-1)
+ return 1000;
+ return FFMAX(left, right)+1;
+ }
+ return 0;
+}
+
+static void print(AVTreeNode *t, int depth){
+ int i;
+ for(i=0; i<depth*4; i++) av_log(NULL, AV_LOG_ERROR, " ");
+ if(t){
+ av_log(NULL, AV_LOG_ERROR, "Node %p %2d %4d\n", t, t->state, t->elem);
+ print(t->child[0], depth+1);
+ print(t->child[1], depth+1);
+ }else
+ av_log(NULL, AV_LOG_ERROR, "NULL\n");
+}
+
+int cmp(const void *a, const void *b){
+ return a-b;
+}
+
+int main(){
+ int i,j,k;
+ AVTreeNode *root= NULL;
+
+ for(i=0; i<10000; i++){
+ int j= (random()%863294);
+ if(check(root) > 999){
+ av_log(NULL, AV_LOG_ERROR, "FATAL error %d\n", i);
+ print(root, 0);
+ return -1;
+ }
+ av_log(NULL, AV_LOG_ERROR, "inserting %4d\n", j);
+ av_tree_insert(&root, (void*)(j+1), cmp);
+ }
+ return 0;
+}
+#endif
diff --git a/contrib/ffmpeg/libavutil/tree.h b/contrib/ffmpeg/libavutil/tree.h
new file mode 100644
index 000000000..36897ef46
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/tree.h
@@ -0,0 +1,52 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef TREE_H
+#define TREE_H
+
+struct AVTreeNode;
+
+/**
+ * Finds an element.
+ * @param root a pointer to the root node of the tree
+ * @param next if next is not NULL then next[0] will contain the previous element and
+ * next[1] the next element if either doesnt exist then the corresponding
+ * entry in next is unchanged
+ * @return an element with cmp(key, elem)==0 or NULL if no such element exists in
+ * the tree
+ */
+void *av_tree_find(const struct AVTreeNode *root, void *key, int (*cmp)(void *key, const void *b), void *next[2]);
+
+/**
+ * Finds a element for which cmp(key, elem)==0, if no such element is found key
+ * is inserted into the tree.
+ * @param rootp a pointer to a pointer to the root node of the tree note the
+ * root node can change during insertions, this is required to
+ * keep the tree balanced
+ *
+ * @return if no insertion happened, the found element
+ * if a insertion happened, then either key or NULL is returned (which it is
+ * depends on the tree state and the implemenattion, you should make no
+ * asumtations that its one or the other in code)
+ */
+void *av_tree_insert(struct AVTreeNode **rootp, void *key, int (*cmp)(void *key, const void *b));
+void av_tree_destroy(struct AVTreeNode *t);
+
+#endif /* TREE_H */
diff --git a/contrib/ffmpeg/libavutil/x86_cpu.h b/contrib/ffmpeg/libavutil/x86_cpu.h
new file mode 100644
index 000000000..3d54b2a60
--- /dev/null
+++ b/contrib/ffmpeg/libavutil/x86_cpu.h
@@ -0,0 +1,60 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_X86CPU_H
+#define AVUTIL_X86CPU_H
+
+#ifdef ARCH_X86_64
+# define REG_a "rax"
+# define REG_b "rbx"
+# define REG_c "rcx"
+# define REG_d "rdx"
+# define REG_D "rdi"
+# define REG_S "rsi"
+# define PTR_SIZE "8"
+
+# define REG_SP "rsp"
+# define REG_BP "rbp"
+# define REGBP rbp
+# define REGa rax
+# define REGb rbx
+# define REGc rcx
+# define REGSP rsp
+
+#else
+
+# define REG_a "eax"
+# define REG_b "ebx"
+# define REG_c "ecx"
+# define REG_d "edx"
+# define REG_D "edi"
+# define REG_S "esi"
+# define PTR_SIZE "4"
+
+# define REG_SP "esp"
+# define REG_BP "ebp"
+# define REGBP ebp
+# define REGa eax
+# define REGb ebx
+# define REGc ecx
+# define REGSP esp
+#endif
+
+#endif /* AVUTIL_X86CPU_H */
diff --git a/contrib/ffmpeg/libpostproc/Makefile b/contrib/ffmpeg/libpostproc/Makefile
new file mode 100644
index 000000000..a6765365d
--- /dev/null
+++ b/contrib/ffmpeg/libpostproc/Makefile
@@ -0,0 +1,26 @@
+
+include ../config.mak
+
+# Overload incdir, postproc include files go in a different directory.
+incdir=$(prefix)/include/postproc
+
+EXTRALIBS := -L$(BUILD_ROOT)/libavutil -lavutil$(BUILDSUF) $(EXTRALIBS)
+
+NAME=postproc
+ifeq ($(BUILD_SHARED),yes)
+LIBVERSION=$(SPPVERSION)
+LIBMAJOR=$(SPPMAJOR)
+endif
+
+STATIC_OBJS=postprocess.o
+SHARED_OBJS=postprocess_pic.o
+
+HEADERS = postprocess.h
+
+include ../common.mak
+
+depend dep: postprocess.c
+
+postprocess_pic.o: postprocess.c
+ $(CC) -c $(CFLAGS) -fomit-frame-pointer -fPIC -DPIC -o $@ $<
+
diff --git a/contrib/ffmpeg/libpostproc/mangle.h b/contrib/ffmpeg/libpostproc/mangle.h
new file mode 100644
index 000000000..3521fa9bb
--- /dev/null
+++ b/contrib/ffmpeg/libpostproc/mangle.h
@@ -0,0 +1,47 @@
+/*
+ * mangle.h - This file has some CPP macros to deal with different symbol
+ * mangling across binary formats.
+ *
+ * (c)2002 by Felix Buenemann <atmosfear at users.sourceforge.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __MANGLE_H
+#define __MANGLE_H
+
+/* Feel free to add more to the list, eg. a.out IMO */
+/* Use rip-relative addressing if compiling PIC code on x86-64. */
+#if defined(__CYGWIN__) || defined(__MINGW32__) || defined(__OS2__) || \
+ (defined(__OpenBSD__) && !defined(__ELF__))
+#if defined(ARCH_X86_64) && defined(PIC)
+#define MANGLE(a) "_" #a"(%%rip)"
+#else
+#define MANGLE(a) "_" #a
+#endif
+#else
+#if defined(ARCH_X86_64) && defined(PIC)
+#define MANGLE(a) #a"(%%rip)"
+#elif defined(CONFIG_DARWIN)
+#define MANGLE(a) "_" #a
+#else
+#define MANGLE(a) #a
+#endif
+#endif
+
+#endif /* !__MANGLE_H */
+
diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess.c b/contrib/ffmpeg/libpostproc/postprocess.c
index 63d65fece..c9f2893e3 100644
--- a/src/libffmpeg/libavcodec/libpostproc/postprocess.c
+++ b/contrib/ffmpeg/libpostproc/postprocess.c
@@ -1,22 +1,24 @@
/*
- Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
-
- AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-*/
+ * Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
+ *
+ * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
/**
* @file postprocess.c
@@ -69,9 +71,10 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
...
*/
-//Changelog: use the CVS log
+//Changelog: use the Subversion log
#include "config.h"
+#include "avutil.h"
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
@@ -85,7 +88,7 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
//#undef ARCH_X86
//#define DEBUG_BRIGHTNESS
#ifdef USE_FASTMEMCPY
-#include "fastmemcpy.h"
+#include "libvo/fastmemcpy.h"
#endif
#include "postprocess.h"
#include "postprocess_internal.h"
@@ -96,30 +99,13 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
#include <altivec.h>
#endif
-#ifndef HAVE_MEMALIGN
-#define memalign(a,b) malloc(b)
-#endif
-
-#define MIN(a,b) ((a) > (b) ? (b) : (a))
-#define MAX(a,b) ((a) < (b) ? (b) : (a))
-#define ABS(a) ((a) > 0 ? (a) : (-(a)))
-#define SIGN(a) ((a) > 0 ? 1 : -1)
-
#define GET_MODE_BUFFER_SIZE 500
#define OPTIONS_ARRAY_SIZE 10
#define BLOCK_SIZE 8
#define TEMP_STRIDE 8
//#define NUM_BLOCKS_AT_ONCE 16 //not used yet
-#if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ > 0)
-# define attribute_used __attribute__((used))
-# define always_inline __attribute__((always_inline)) inline
-#else
-# define attribute_used
-# define always_inline inline
-#endif
-
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
static uint64_t __attribute__((aligned(8))) attribute_used w05= 0x0005000500050005LL;
static uint64_t __attribute__((aligned(8))) attribute_used w04= 0x0004000400040004LL;
static uint64_t __attribute__((aligned(8))) attribute_used w20= 0x0020002000200020LL;
@@ -133,8 +119,6 @@ static uint64_t __attribute__((aligned(8))) attribute_used b80= 0x80808080808080
static uint8_t clip_table[3*256];
static uint8_t * const clip_tab= clip_table + 256;
-static const int verbose= 0;
-
static const int attribute_used deringThreshold= 20;
@@ -161,7 +145,7 @@ static struct PPFilter filters[]=
{NULL, NULL,0,0,0,0} //End Marker
};
-static char *replaceTable[]=
+static const char *replaceTable[]=
{
"default", "hdeblock:a,vdeblock:a,dering:a",
"de", "hdeblock:a,vdeblock:a,dering:a",
@@ -172,7 +156,7 @@ static char *replaceTable[]=
};
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
static inline void prefetchnta(void *p)
{
asm volatile( "prefetchnta (%0)\n\t"
@@ -346,17 +330,17 @@ static inline void doHorizDefFilter_C(uint8_t dst[], int stride, PPContext *c)
{
const int middleEnergy= 5*(dst[4] - dst[3]) + 2*(dst[2] - dst[5]);
- if(ABS(middleEnergy) < 8*c->QP)
+ if(FFABS(middleEnergy) < 8*c->QP)
{
const int q=(dst[3] - dst[4])/2;
const int leftEnergy= 5*(dst[2] - dst[1]) + 2*(dst[0] - dst[3]);
const int rightEnergy= 5*(dst[6] - dst[5]) + 2*(dst[4] - dst[7]);
- int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
- d= MAX(d, 0);
+ int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
+ d= FFMAX(d, 0);
d= (5*d + 32) >> 6;
- d*= SIGN(-middleEnergy);
+ d*= FFSIGN(-middleEnergy);
if(q>0)
{
@@ -385,8 +369,8 @@ static inline void doHorizLowPass_C(uint8_t dst[], int stride, PPContext *c)
int y;
for(y=0; y<BLOCK_SIZE; y++)
{
- const int first= ABS(dst[-1] - dst[0]) < c->QP ? dst[-1] : dst[0];
- const int last= ABS(dst[8] - dst[7]) < c->QP ? dst[8] : dst[7];
+ const int first= FFABS(dst[-1] - dst[0]) < c->QP ? dst[-1] : dst[0];
+ const int last= FFABS(dst[8] - dst[7]) < c->QP ? dst[8] : dst[7];
int sums[10];
sums[0] = 4*first + dst[0] + dst[1] + dst[2] + 4;
@@ -428,7 +412,7 @@ static inline void horizX1Filter(uint8_t *src, int stride, int QP)
if(lut==NULL)
{
int i;
- lut= (uint64_t*)memalign(8, 256*8);
+ lut = av_malloc(256*8);
for(i=0; i<256; i++)
{
int v= i < 128 ? 2*i : 2*(i-256);
@@ -461,11 +445,11 @@ static inline void horizX1Filter(uint8_t *src, int stride, int QP)
int b= src[3] - src[4];
int c= src[5] - src[6];
- int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
+ int d= FFMAX(FFABS(b) - (FFABS(a) + FFABS(c))/2, 0);
if(d < QP)
{
- int v = d * SIGN(-b);
+ int v = d * FFSIGN(-b);
src[1] +=v/8;
src[2] +=v/4;
@@ -521,8 +505,8 @@ static always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, PPC
}
}
if(max-min < 2*QP){
- const int first= ABS(src[-1*step] - src[0]) < QP ? src[-1*step] : src[0];
- const int last= ABS(src[8*step] - src[7*step]) < QP ? src[8*step] : src[7*step];
+ const int first= FFABS(src[-1*step] - src[0]) < QP ? src[-1*step] : src[0];
+ const int last= FFABS(src[8*step] - src[7*step]) < QP ? src[8*step] : src[7*step];
int sums[10];
sums[0] = 4*first + src[0*step] + src[1*step] + src[2*step] + 4;
@@ -548,17 +532,17 @@ static always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, PPC
}else{
const int middleEnergy= 5*(src[4*step] - src[3*step]) + 2*(src[2*step] - src[5*step]);
- if(ABS(middleEnergy) < 8*QP)
+ if(FFABS(middleEnergy) < 8*QP)
{
const int q=(src[3*step] - src[4*step])/2;
const int leftEnergy= 5*(src[2*step] - src[1*step]) + 2*(src[0*step] - src[3*step]);
const int rightEnergy= 5*(src[6*step] - src[5*step]) + 2*(src[4*step] - src[7*step]);
- int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
- d= MAX(d, 0);
+ int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
+ d= FFMAX(d, 0);
d= (5*d + 32) >> 6;
- d*= SIGN(-middleEnergy);
+ d*= FFSIGN(-middleEnergy);
if(q>0)
{
@@ -597,7 +581,7 @@ static always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, PPC
#endif //HAVE_ALTIVEC
#endif //ARCH_POWERPC
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX
@@ -610,7 +594,7 @@ static always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, PPC
#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_3DNOW
#endif
-#endif //ARCH_X86
+#endif /* defined(ARCH_X86) */
#undef HAVE_MMX
#undef HAVE_MMX2
@@ -678,7 +662,7 @@ static inline void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int
// difference wouldnt be messureable here but its much better because
// someone might exchange the cpu whithout restarting mplayer ;)
#ifdef RUNTIME_CPUDETECT
-#if defined(ARCH_X86) || defined(ARCH_X86_64)
+#if defined(ARCH_X86)
// ordered per speed fasterst first
if(c->cpuCaps & PP_CPU_CAPS_MMX2)
postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
@@ -759,18 +743,19 @@ char *pp_help=
"vb:a/hb:a/lb de,-vb\n"
"more examples:\n"
"tn:64:128:256\n"
+"\n"
;
pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
{
char temp[GET_MODE_BUFFER_SIZE];
char *p= temp;
- char *filterDelimiters= ",/";
- char *optionDelimiters= ":";
+ const char *filterDelimiters= ",/";
+ const char *optionDelimiters= ":";
struct PPMode *ppMode;
char *filterToken;
- ppMode= memalign(8, sizeof(PPMode));
+ ppMode= av_malloc(sizeof(PPMode));
ppMode->lumMode= 0;
ppMode->chromMode= 0;
@@ -786,7 +771,7 @@ pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
strncpy(temp, name, GET_MODE_BUFFER_SIZE);
- if(verbose>1) printf("pp: %s\n", name);
+ av_log(NULL, AV_LOG_DEBUG, "pp: %s\n", name);
for(;;){
char *filterName;
@@ -804,7 +789,7 @@ pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
if(filterToken == NULL) break;
p+= strlen(filterToken) + 1; // p points to next filterToken
filterName= strtok(filterToken, optionDelimiters);
- if(verbose>1) printf("pp: %s::%s\n", filterToken, filterName);
+ av_log(NULL, AV_LOG_DEBUG, "pp: %s::%s\n", filterToken, filterName);
if(*filterName == '-')
{
@@ -816,7 +801,7 @@ pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
option= strtok(NULL, optionDelimiters);
if(option == NULL) break;
- if(verbose>1) printf("pp: option: %s\n", option);
+ av_log(NULL, AV_LOG_DEBUG, "pp: option: %s\n", option);
if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
@@ -857,7 +842,6 @@ pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
for(i=0; filters[i].shortName!=NULL; i++)
{
-// printf("Compareing %s, %s, %s\n", filters[i].shortName,filters[i].longName, filterName);
if( !strcmp(filters[i].longName, filterName)
|| !strcmp(filters[i].shortName, filterName))
{
@@ -944,24 +928,23 @@ pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
ppMode->error += numOfUnknownOptions;
}
- if(verbose>1) printf("pp: lumMode=%X, chromMode=%X\n", ppMode->lumMode, ppMode->chromMode);
+ av_log(NULL, AV_LOG_DEBUG, "pp: lumMode=%X, chromMode=%X\n", ppMode->lumMode, ppMode->chromMode);
if(ppMode->error)
{
- fprintf(stderr, "%d errors in postprocess string \"%s\"\n", ppMode->error, name);
- free(ppMode);
+ av_log(NULL, AV_LOG_ERROR, "%d errors in postprocess string \"%s\"\n", ppMode->error, name);
+ av_free(ppMode);
return NULL;
}
return ppMode;
}
void pp_free_mode(pp_mode_t *mode){
- if(mode) free(mode);
+ av_free(mode);
}
static void reallocAlign(void **p, int alignment, int size){
- if(*p) free(*p);
- *p= memalign(alignment, size);
- memset(*p, 0, size);
+ av_free(*p);
+ *p= av_mallocz(size);
}
static void reallocBuffers(PPContext *c, int width, int height, int stride, int qpStride){
@@ -1000,14 +983,21 @@ static void global_init(void){
memset(clip_table+512, 0, 256);
}
+static const char * context_to_name(void * ptr) {
+ return "postproc";
+}
+
+static AVClass av_codec_context_class = { "Postproc", context_to_name, NULL };
+
pp_context_t *pp_get_context(int width, int height, int cpuCaps){
- PPContext *c= memalign(32, sizeof(PPContext));
+ PPContext *c= av_malloc(sizeof(PPContext));
int stride= (width+15)&(~15); //assumed / will realloc if needed
int qpStride= (width+15)/16 + 2; //assumed / will realloc if needed
global_init();
memset(c, 0, sizeof(PPContext));
+ c->av_class = &av_codec_context_class;
c->cpuCaps= cpuCaps;
if(cpuCaps&PP_FORMAT){
c->hChromaSubSample= cpuCaps&0x3;
@@ -1028,21 +1018,21 @@ void pp_free_context(void *vc){
PPContext *c = (PPContext*)vc;
int i;
- for(i=0; i<3; i++) free(c->tempBlured[i]);
- for(i=0; i<3; i++) free(c->tempBluredPast[i]);
+ for(i=0; i<3; i++) av_free(c->tempBlured[i]);
+ for(i=0; i<3; i++) av_free(c->tempBluredPast[i]);
- free(c->tempBlocks);
- free(c->yHistogram);
- free(c->tempDst);
- free(c->tempSrc);
- free(c->deintTemp);
- free(c->stdQPTable);
- free(c->nonBQPTable);
- free(c->forcedQPTable);
+ av_free(c->tempBlocks);
+ av_free(c->yHistogram);
+ av_free(c->tempDst);
+ av_free(c->tempSrc);
+ av_free(c->deintTemp);
+ av_free(c->stdQPTable);
+ av_free(c->nonBQPTable);
+ av_free(c->forcedQPTable);
memset(c, 0, sizeof(PPContext));
- free(c);
+ av_free(c);
}
void pp_postprocess(uint8_t * src[3], int srcStride[3],
@@ -1055,14 +1045,14 @@ void pp_postprocess(uint8_t * src[3], int srcStride[3],
int mbHeight= (height+15)>>4;
PPMode *mode = (PPMode*)vm;
PPContext *c = (PPContext*)vc;
- int minStride= MAX(ABS(srcStride[0]), ABS(dstStride[0]));
- int absQPStride = ABS(QPStride);
+ int minStride= FFMAX(FFABS(srcStride[0]), FFABS(dstStride[0]));
+ int absQPStride = FFABS(QPStride);
// c->stride and c->QPStride are always positive
if(c->stride < minStride || c->qpStride < absQPStride)
reallocBuffers(c, width, height,
- MAX(minStride, c->stride),
- MAX(c->qpStride, absQPStride));
+ FFMAX(minStride, c->stride),
+ FFMAX(c->qpStride, absQPStride));
if(QP_store==NULL || (mode->lumMode & FORCE_QUANT))
{
@@ -1074,7 +1064,6 @@ void pp_postprocess(uint8_t * src[3], int srcStride[3],
else
for(i=0; i<mbWidth; i++) QP_store[i]= 1;
}
-//printf("pict_type:%d\n", pict_type);
if(pict_type & PP_PICT_TYPE_QP2){
int i;
@@ -1093,11 +1082,11 @@ if(0){
int x,y;
for(y=0; y<mbHeight; y++){
for(x=0; x<mbWidth; x++){
- printf("%2d ", QP_store[x + y*QPStride]);
+ av_log(c, AV_LOG_INFO, "%2d ", QP_store[x + y*QPStride]);
}
- printf("\n");
+ av_log(c, AV_LOG_INFO, "\n");
}
- printf("\n");
+ av_log(c, AV_LOG_INFO, "\n");
}
if((pict_type&7)!=3)
@@ -1121,10 +1110,8 @@ for(y=0; y<mbHeight; y++){
}
}
- if(verbose>2)
- {
- printf("using npp filters 0x%X/0x%X\n", mode->lumMode, mode->chromMode);
- }
+ av_log(c, AV_LOG_DEBUG, "using npp filters 0x%X/0x%X\n",
+ mode->lumMode, mode->chromMode);
postProcess(src[0], srcStride[0], dst[0], dstStride[0],
width, height, QP_store, QPStride, 0, mode, c);
diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess.h b/contrib/ffmpeg/libpostproc/postprocess.h
index 114c88a38..eed92ba6d 100644
--- a/src/libffmpeg/libavcodec/libpostproc/postprocess.h
+++ b/contrib/ffmpeg/libpostproc/postprocess.h
@@ -1,20 +1,22 @@
/*
- Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-*/
+ * Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
#ifndef NEWPOSTPROCESS_H
#define NEWPOSTPROCESS_H
diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess_altivec_template.c b/contrib/ffmpeg/libpostproc/postprocess_altivec_template.c
index 38adeb32d..3a33a5885 100644
--- a/src/libffmpeg/libavcodec/libpostproc/postprocess_altivec_template.c
+++ b/contrib/ffmpeg/libpostproc/postprocess_altivec_template.c
@@ -1,22 +1,24 @@
/*
- AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
-
- based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-*/
+ * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
#ifdef CONFIG_DARWIN
@@ -65,7 +67,6 @@ static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c)
vector by assuming (stride % 16) == 0, unfortunately
this is not always true.
*/
- register int y;
short __attribute__ ((aligned(16))) data[8];
int numEq;
uint8_t *src2 = src;
@@ -263,7 +264,6 @@ static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c)
#undef LOAD_LINE
#undef LOAD_LINE_ALIGNED
- const vector unsigned short v_1 = vec_splat_u16(1);
const vector unsigned short v_2 = vec_splat_u16(2);
const vector unsigned short v_4 = vec_splat_u16(4);
@@ -516,7 +516,6 @@ static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
*/
uint8_t *srcCopy = src;
uint8_t __attribute__((aligned(16))) dt[16];
- const vector unsigned char vuint8_1 = vec_splat_u8(1);
const vector signed int zero = vec_splat_s32(0);
vector unsigned char v_dt;
dt[0] = deringThreshold;
@@ -680,7 +679,6 @@ static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
tQP2[0]= c->QP/2 + 1;
vector signed int vQP2 = vec_ld(0, tQP2);
vQP2 = vec_splat(vQP2, 0);
- const vector unsigned char vuint8_2 = vec_splat_u8(2);
const vector signed int vsint32_8 = vec_splat_s32(8);
const vector unsigned int vuint32_4 = vec_splat_u32(4);
@@ -1105,9 +1103,6 @@ static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst,
static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
const vector unsigned char zero = vec_splat_u8(0);
- const vector unsigned char magic_perm = (const vector unsigned char)
- AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
#define LOAD_DOUBLE_LINE(i, j) \
vector unsigned char src##i = vec_ld(i * 16, src); \
diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h b/contrib/ffmpeg/libpostproc/postprocess_internal.h
index 3d19c694b..537d728c0 100644
--- a/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h
+++ b/contrib/ffmpeg/libpostproc/postprocess_internal.h
@@ -1,26 +1,30 @@
/*
- Copyright (C) 2001-2002 Michael Niedermayer (michaelni@gmx.at)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-*/
+ * Copyright (C) 2001-2002 Michael Niedermayer (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
/**
* @file postprocess_internal.h
* internal api header.
*/
+#include "avutil.h"
+
#define V_DEBLOCK 0x01
#define H_DEBLOCK 0x02
#define DERING 0x04
@@ -83,8 +87,8 @@ static inline int CLIP(int a){
* Postprocessng filter.
*/
struct PPFilter{
- char *shortName;
- char *longName;
+ const char *shortName;
+ const char *longName;
int chromDefault; ///< is chrominance filtering on by default if this filter is manually activated
int minLumQuality; ///< minimum quality to turn luminance filtering on
int minChromQuality; ///< minimum quality to turn chrominance filtering on
@@ -115,6 +119,11 @@ typedef struct PPMode{
* postprocess context.
*/
typedef struct PPContext{
+ /**
+ * info on struct for av_log
+ */
+ AVClass *av_class;
+
uint8_t *tempBlocks; ///<used for the horizontal code
/**
@@ -124,8 +133,8 @@ typedef struct PPContext{
*/
uint64_t *yHistogram;
- uint64_t __attribute__((aligned(8))) packedYOffset;
- uint64_t __attribute__((aligned(8))) packedYScale;
+ DECLARE_ALIGNED(8, uint64_t, packedYOffset);
+ DECLARE_ALIGNED(8, uint64_t, packedYScale);
/** Temporal noise reducing buffers */
uint8_t *tempBlured[3];
@@ -137,11 +146,11 @@ typedef struct PPContext{
uint8_t *deintTemp;
- uint64_t __attribute__((aligned(8))) pQPb;
- uint64_t __attribute__((aligned(8))) pQPb2;
+ DECLARE_ALIGNED(8, uint64_t, pQPb);
+ DECLARE_ALIGNED(8, uint64_t, pQPb2);
- uint64_t __attribute__((aligned(8))) mmxDcOffset[64];
- uint64_t __attribute__((aligned(8))) mmxDcThreshold[64];
+ DECLARE_ALIGNED(8, uint64_t, mmxDcOffset[64]);
+ DECLARE_ALIGNED(8, uint64_t, mmxDcThreshold[64]);
QP_STORE_T *stdQPTable; ///< used to fix MPEG2 style qscale
QP_STORE_T *nonBQPTable;
diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c b/contrib/ffmpeg/libpostproc/postprocess_template.c
index 1171bd2aa..c22d5d1b6 100644
--- a/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c
+++ b/contrib/ffmpeg/libpostproc/postprocess_template.c
@@ -1,19 +1,21 @@
/*
- Copyright (C) 2001-2002 Michael Niedermayer (michaelni@gmx.at)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * Copyright (C) 2001-2002 Michael Niedermayer (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
@@ -336,8 +338,8 @@ static inline void RENAME(doVertLowPass)(uint8_t *src, int stride, PPContext *c)
src+= stride*3;
for(x=0; x<BLOCK_SIZE; x++)
{
- const int first= ABS(src[0] - src[l1]) < c->QP ? src[0] : src[l1];
- const int last= ABS(src[l8] - src[l9]) < c->QP ? src[l9] : src[l8];
+ const int first= FFABS(src[0] - src[l1]) < c->QP ? src[0] : src[l1];
+ const int last= FFABS(src[l8] - src[l9]) < c->QP ? src[l9] : src[l8];
int sums[10];
sums[0] = 4*first + src[l1] + src[l2] + src[l3] + 4;
@@ -458,7 +460,7 @@ static inline void RENAME(vertRK1Filter)(uint8_t *src, int stride, int QP)
for(x=0; x<BLOCK_SIZE; x++)
{
const int v = (src[x+l5] - src[x+l4]);
- if(ABS(v) < QP15)
+ if(FFABS(v) < QP15)
{
src[x+l3] +=v>>3;
src[x+l4] +=v>>1;
@@ -587,12 +589,12 @@ static inline void RENAME(vertX1Filter)(uint8_t *src, int stride, PPContext *co)
int b= src[l4] - src[l5];
int c= src[l5] - src[l6];
- int d= ABS(b) - ((ABS(a) + ABS(c))>>1);
- d= MAX(d, 0);
+ int d= FFABS(b) - ((FFABS(a) + FFABS(c))>>1);
+ d= FFMAX(d, 0);
if(d < co->QP*2)
{
- int v = d * SIGN(-b);
+ int v = d * FFSIGN(-b);
src[l2] +=v>>3;
src[l3] +=v>>2;
@@ -843,17 +845,17 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
for(x=0; x<BLOCK_SIZE; x++)
{
const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
- if(ABS(middleEnergy)< 8*QP)
+ if(FFABS(middleEnergy)< 8*QP)
{
const int q=(src[l4] - src[l5])/2;
const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
- int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
- d= MAX(d, 0);
+ int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
+ d= FFMAX(d, 0);
d= (5*d + 32) >> 6;
- d*= SIGN(-middleEnergy);
+ d*= FFSIGN(-middleEnergy);
if(q>0)
{
@@ -878,7 +880,7 @@ src-=8;
for(y=4; y<6; y++)
{
int d= src[x+y*stride] - tmp[x+(y-4)*8];
- int ad= ABS(d);
+ int ad= FFABS(d);
static int max=0;
static int sum=0;
static int num=0;
@@ -894,7 +896,7 @@ src-=8;
num++;
if(num%1000000 == 0)
{
- printf(" %d %d %d %d\n", num, sum, max, bias);
+ av_log(c, AV_LOG_INFO, " %d %d %d %d\n", num, sum, max, bias);
}
}
}
@@ -1147,17 +1149,17 @@ src-=8;
for(x=0; x<BLOCK_SIZE; x++)
{
const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
- if(ABS(middleEnergy) < 8*c->QP)
+ if(FFABS(middleEnergy) < 8*c->QP)
{
const int q=(src[l4] - src[l5])/2;
const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
- int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
- d= MAX(d, 0);
+ int d= FFABS(middleEnergy) - FFMIN( FFABS(leftEnergy), FFABS(rightEnergy) );
+ d= FFMAX(d, 0);
d= (5*d + 32) >> 6;
- d*= SIGN(-middleEnergy);
+ d*= FFSIGN(-middleEnergy);
if(q>0)
{
@@ -1489,7 +1491,7 @@ DERING_CORE((%0, %1, 8) ,(%%REGd, %1, 4),%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,
static int worstRange=0;
static int worstDiff=0;
int diff= (f - *p);
- int absDiff= ABS(diff);
+ int absDiff= FFABS(diff);
int error= diff*diff;
if(x==1 || x==8 || y==1 || y==8) continue;
@@ -1505,7 +1507,7 @@ DERING_CORE((%0, %1, 8) ,(%%REGd, %1, 4),%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,
if(1024LL*1024LL*1024LL % numSkiped == 0)
{
- printf( "sum:%1.3f, skip:%d, wQP:%d, "
+ av_log(c, AV_LOG_INFO, "sum:%1.3f, skip:%d, wQP:%d, "
"wRange:%d, wDiff:%d, relSkip:%1.3f\n",
(float)errorSum/numSkiped, numSkiped, worstQP, worstRange,
worstDiff, (float)numSkiped/numPixels);
@@ -1530,7 +1532,7 @@ DERING_CORE((%0, %1, 8) ,(%%REGd, %1, 4),%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,
for(x=1; x<9; x++)
{
p++;
- *p = MIN(*p + 20, 255);
+ *p = FFMIN(*p + 20, 255);
}
}
// src[0] = src[7]=src[stride*7]=src[stride*7 + 7]=255;
@@ -2533,7 +2535,6 @@ L2_DIFF_CORE((%0, %%REGc) , (%1, %%REGc))
:: "r" (src), "r" (tempBlured), "r"((long)stride), "m" (tempBluredPast)
: "%"REG_a, "%"REG_d, "%"REG_c, "memory"
);
-//printf("%d\n", test);
#else //defined (HAVE_MMX2) || defined (HAVE_3DNOW)
{
int y;
@@ -2551,7 +2552,7 @@ L2_DIFF_CORE((%0, %%REGc) , (%1, %%REGc))
int d1=ref - cur;
// if(x==0 || x==7) d1+= d1>>1;
// if(y==0 || y==7) d1+= d1>>1;
-// d+= ABS(d1);
+// d+= FFABS(d1);
d+= d1*d1;
// sysd+= d1;
}
@@ -2566,7 +2567,6 @@ L2_DIFF_CORE((%0, %%REGc) , (%1, %%REGc))
*tempBluredPast=i;
// ((*tempBluredPast)*3 + d + 2)>>2;
-//printf("%d %d %d\n", maxNoise[0], maxNoise[1], maxNoise[2]);
/*
Switch between
1 0 0 0 0 0 0 (0)
@@ -3413,9 +3413,7 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
for(i=0; i<256; i++)
{
sum+= yHistogram[i];
-// printf("%d ", yHistogram[i]);
}
-// printf("\n\n");
/* we allways get a completly black picture first */
maxClipped= (uint64_t)(sum * c.ppMode.maxClippedThreshold);
@@ -3531,7 +3529,7 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
dstBlock+=8;
srcBlock+=8;
}
- if(width==ABS(dstStride))
+ if(width==FFABS(dstStride))
linecpy(dst, tempDst + 9*dstStride, copyAhead, dstStride);
else
{
@@ -3543,7 +3541,6 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
}
}
-//printf("\n");
for(y=0; y<height; y+=BLOCK_SIZE)
{
//1% speedup if these are here instead of the inner loop
@@ -3554,7 +3551,7 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
uint8_t *tempBlock2= c.tempBlocks + 8;
#endif
int8_t *QPptr= &QPs[(y>>qpVShift)*QPStride];
- int8_t *nonBQPptr= &c.nonBQPTable[(y>>qpVShift)*ABS(QPStride)];
+ int8_t *nonBQPptr= &c.nonBQPTable[(y>>qpVShift)*FFABS(QPStride)];
int QP=0;
/* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards
if not than use a temporary buffer */
@@ -3564,23 +3561,22 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
/* copy from line (copyAhead) to (copyAhead+7) of src, these will be copied with
blockcopy to dst later */
linecpy(tempSrc + srcStride*copyAhead, srcBlock + srcStride*copyAhead,
- MAX(height-y-copyAhead, 0), srcStride);
+ FFMAX(height-y-copyAhead, 0), srcStride);
/* duplicate last line of src to fill the void upto line (copyAhead+7) */
- for(i=MAX(height-y, 8); i<copyAhead+8; i++)
- memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), ABS(srcStride));
+ for(i=FFMAX(height-y, 8); i<copyAhead+8; i++)
+ memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), FFABS(srcStride));
/* copy up to (copyAhead+1) lines of dst (line -1 to (copyAhead-1))*/
- linecpy(tempDst, dstBlock - dstStride, MIN(height-y+1, copyAhead+1), dstStride);
+ linecpy(tempDst, dstBlock - dstStride, FFMIN(height-y+1, copyAhead+1), dstStride);
/* duplicate last line of dst to fill the void upto line (copyAhead) */
for(i=height-y+1; i<=copyAhead; i++)
- memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), ABS(dstStride));
+ memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), FFABS(dstStride));
dstBlock= tempDst + dstStride;
srcBlock= tempSrc;
}
-//printf("\n");
// From this point on it is guranteed that we can read and write 16 lines downward
// finish 1 block before the next otherwise we might have a problem
@@ -3787,7 +3783,7 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
if(y+15 >= height)
{
uint8_t *dstBlock= &(dst[y*dstStride]);
- if(width==ABS(dstStride))
+ if(width==FFABS(dstStride))
linecpy(dstBlock, tempDst + dstStride, height-y, dstStride);
else
{
diff --git a/contrib/ffmpeg/libswscale/Makefile b/contrib/ffmpeg/libswscale/Makefile
new file mode 100644
index 000000000..82e9bfc02
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/Makefile
@@ -0,0 +1,26 @@
+
+include ../config.mak
+
+NAME=swscale
+ifeq ($(BUILD_SHARED),yes)
+LIBVERSION=$(SWSVERSION)
+LIBMAJOR=$(SWSMAJOR)
+endif
+
+EXTRALIBS := -L$(BUILD_ROOT)/libavutil -lavutil$(BUILDSUF) $(EXTRALIBS)
+
+OBJS= swscale.o rgb2rgb.o yuv2rgb.o
+ifeq ($(TARGET_ALTIVEC),yes)
+OBJS+= yuv2rgb_altivec.o
+endif
+
+HEADERS = swscale.h rgb2rgb.h
+
+include ../common.mak
+
+cs_test: cs_test.c $(LIB)
+
+swscale-example: swscale-example.o $(LIB)
+
+clean::
+ rm -f cs_test swscale-example
diff --git a/contrib/ffmpeg/libswscale/cs_test.c b/contrib/ffmpeg/libswscale/cs_test.c
new file mode 100644
index 000000000..6b2deab3e
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/cs_test.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <string.h> /* for memset() */
+#include <unistd.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <malloc.h>
+
+#include "swscale.h"
+#include "rgb2rgb.h"
+
+#define SIZE 1000
+#define srcByte 0x55
+#define dstByte 0xBB
+
+#ifdef __APPLE_CC__
+#define memalign(x,y) malloc(y)
+#endif
+
+static int cpu_caps;
+
+static char *args_parse(int argc, char *argv[])
+{
+ int o;
+
+ while ((o = getopt(argc, argv, "m23")) != -1) {
+ switch (o) {
+ case 'm':
+ cpu_caps |= SWS_CPU_CAPS_MMX;
+ break;
+ case '2':
+ cpu_caps |= SWS_CPU_CAPS_MMX2;
+ break;
+ case '3':
+ cpu_caps |= SWS_CPU_CAPS_3DNOW;
+ break;
+ default:
+ fprintf(stderr, "Unknown option %c\n", o);
+ }
+ }
+
+ return argv[optind];
+}
+
+int main(int argc, char **argv)
+{
+ int i, funcNum;
+ uint8_t *srcBuffer= (uint8_t*)memalign(128, SIZE);
+ uint8_t *dstBuffer= (uint8_t*)memalign(128, SIZE);
+ int failedNum=0;
+ int passedNum=0;
+
+ printf("memory corruption test ...\n");
+ args_parse(argc, argv);
+ fprintf(stderr, "CPU capabilities forced to %x\n", cpu_caps);
+ sws_rgb2rgb_init(cpu_caps);
+
+ for(funcNum=0; funcNum<100; funcNum++){
+ int width;
+ int failed=0;
+ int srcBpp=0;
+ int dstBpp=0;
+
+ printf("."); fflush(stdout);
+ memset(srcBuffer, srcByte, SIZE);
+
+ for(width=32; width<64; width++){
+ int dstOffset;
+ for(dstOffset=128; dstOffset<196; dstOffset++){
+ int srcOffset;
+ memset(dstBuffer, dstByte, SIZE);
+
+ for(srcOffset=128; srcOffset<196; srcOffset++){
+ uint8_t *src= srcBuffer+srcOffset;
+ uint8_t *dst= dstBuffer+dstOffset;
+ char *name=NULL;
+
+ if(failed) break; //don't fill the screen with shit ...
+
+ switch(funcNum){
+ case 0:
+ srcBpp=2;
+ dstBpp=2;
+ name="rgb15to16";
+ rgb15to16(src, dst, width*srcBpp);
+ break;
+ case 1:
+ srcBpp=2;
+ dstBpp=3;
+ name="rgb15to24";
+ rgb15to24(src, dst, width*srcBpp);
+ break;
+ case 2:
+ srcBpp=2;
+ dstBpp=4;
+ name="rgb15to32";
+ rgb15to32(src, dst, width*srcBpp);
+ break;
+ case 3:
+ srcBpp=2;
+ dstBpp=3;
+ name="rgb16to24";
+ rgb16to24(src, dst, width*srcBpp);
+ break;
+ case 4:
+ srcBpp=2;
+ dstBpp=4;
+ name="rgb16to32";
+ rgb16to32(src, dst, width*srcBpp);
+ break;
+ case 5:
+ srcBpp=3;
+ dstBpp=2;
+ name="rgb24to15";
+ rgb24to15(src, dst, width*srcBpp);
+ break;
+ case 6:
+ srcBpp=3;
+ dstBpp=2;
+ name="rgb24to16";
+ rgb24to16(src, dst, width*srcBpp);
+ break;
+ case 7:
+ srcBpp=3;
+ dstBpp=4;
+ name="rgb24to32";
+ rgb24to32(src, dst, width*srcBpp);
+ break;
+ case 8:
+ srcBpp=4;
+ dstBpp=2;
+ name="rgb32to15";
+ rgb32to15(src, dst, width*srcBpp);
+ break;
+ case 9:
+ srcBpp=4;
+ dstBpp=2;
+ name="rgb32to16";
+ rgb32to16(src, dst, width*srcBpp);
+ break;
+ case 10:
+ srcBpp=4;
+ dstBpp=3;
+ name="rgb32to24";
+ rgb32to24(src, dst, width*srcBpp);
+ break;
+ case 11:
+ srcBpp=2;
+ dstBpp=2;
+ name="rgb16to15";
+ rgb16to15(src, dst, width*srcBpp);
+ break;
+
+ case 14:
+ srcBpp=2;
+ dstBpp=2;
+ name="rgb15tobgr15";
+ rgb15tobgr15(src, dst, width*srcBpp);
+ break;
+ case 15:
+ srcBpp=2;
+ dstBpp=2;
+ name="rgb15tobgr16";
+ rgb15tobgr16(src, dst, width*srcBpp);
+ break;
+ case 16:
+ srcBpp=2;
+ dstBpp=3;
+ name="rgb15tobgr24";
+ rgb15tobgr24(src, dst, width*srcBpp);
+ break;
+ case 17:
+ srcBpp=2;
+ dstBpp=4;
+ name="rgb15tobgr32";
+ rgb15tobgr32(src, dst, width*srcBpp);
+ break;
+ case 18:
+ srcBpp=2;
+ dstBpp=2;
+ name="rgb16tobgr15";
+ rgb16tobgr15(src, dst, width*srcBpp);
+ break;
+ case 19:
+ srcBpp=2;
+ dstBpp=2;
+ name="rgb16tobgr16";
+ rgb16tobgr16(src, dst, width*srcBpp);
+ break;
+ case 20:
+ srcBpp=2;
+ dstBpp=3;
+ name="rgb16tobgr24";
+ rgb16tobgr24(src, dst, width*srcBpp);
+ break;
+ case 21:
+ srcBpp=2;
+ dstBpp=4;
+ name="rgb16tobgr32";
+ rgb16tobgr32(src, dst, width*srcBpp);
+ break;
+ case 22:
+ srcBpp=3;
+ dstBpp=2;
+ name="rgb24tobgr15";
+ rgb24tobgr15(src, dst, width*srcBpp);
+ break;
+ case 23:
+ srcBpp=3;
+ dstBpp=2;
+ name="rgb24tobgr16";
+ rgb24tobgr16(src, dst, width*srcBpp);
+ break;
+ case 24:
+ srcBpp=3;
+ dstBpp=3;
+ name="rgb24tobgr24";
+ rgb24tobgr24(src, dst, width*srcBpp);
+ break;
+ case 25:
+ srcBpp=3;
+ dstBpp=4;
+ name="rgb24tobgr32";
+ rgb24tobgr32(src, dst, width*srcBpp);
+ break;
+ case 26:
+ srcBpp=4;
+ dstBpp=2;
+ name="rgb32tobgr15";
+ rgb32tobgr15(src, dst, width*srcBpp);
+ break;
+ case 27:
+ srcBpp=4;
+ dstBpp=2;
+ name="rgb32tobgr16";
+ rgb32tobgr16(src, dst, width*srcBpp);
+ break;
+ case 28:
+ srcBpp=4;
+ dstBpp=3;
+ name="rgb32tobgr24";
+ rgb32tobgr24(src, dst, width*srcBpp);
+ break;
+ case 29:
+ srcBpp=4;
+ dstBpp=4;
+ name="rgb32tobgr32";
+ rgb32tobgr32(src, dst, width*srcBpp);
+ break;
+
+ }
+ if(!srcBpp) break;
+
+ for(i=0; i<SIZE; i++){
+ if(srcBuffer[i]!=srcByte){
+ printf("src damaged at %d w:%d src:%d dst:%d %s\n",
+ i, width, srcOffset, dstOffset, name);
+ failed=1;
+ break;
+ }
+ }
+ for(i=0; i<dstOffset; i++){
+ if(dstBuffer[i]!=dstByte){
+ printf("dst damaged at %d w:%d src:%d dst:%d %s\n",
+ i, width, srcOffset, dstOffset, name);
+ failed=1;
+ break;
+ }
+ }
+ for(i=dstOffset + width*dstBpp; i<SIZE; i++){
+ if(dstBuffer[i]!=dstByte){
+ printf("dst damaged at %d w:%d src:%d dst:%d %s\n",
+ i, width, srcOffset, dstOffset, name);
+ failed=1;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if(failed) failedNum++;
+ else if(srcBpp) passedNum++;
+ }
+
+ printf("%d converters passed, %d converters randomly overwrote memory\n", passedNum, failedNum);
+ return failedNum;
+}
diff --git a/contrib/ffmpeg/libswscale/rgb2rgb.c b/contrib/ffmpeg/libswscale/rgb2rgb.c
new file mode 100644
index 000000000..2bb5d3355
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/rgb2rgb.c
@@ -0,0 +1,665 @@
+/*
+ *
+ * rgb2rgb.c, Software RGB to RGB convertor
+ * pluralize by Software PAL8 to RGB convertor
+ * Software YUV to YUV convertor
+ * Software YUV to RGB convertor
+ * Written by Nick Kurshev.
+ * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * the C code (not assembly, mmx, ...) of this file can be used
+ * under the LGPL license too
+ */
+#include <inttypes.h>
+#include "config.h"
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+#include "x86_cpu.h"
+#include "bswap.h"
+#ifdef USE_FASTMEMCPY
+#include "libvo/fastmemcpy.h"
+#endif
+
+#define FAST_BGR2YV12 // use 7 bit coeffs instead of 15bit
+
+void (*rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb24to16)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb24to15)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb32to16)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb32to15)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb15to24)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb15to32)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb16to24)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb16to32)(const uint8_t *src,uint8_t *dst,long src_size);
+//void (*rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size);
+//void (*rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
+
+void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
+void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
+void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
+void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride);
+void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride);
+void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height,
+ long srcStride, long dstStride);
+void (*interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dst,
+ long width, long height, long src1Stride,
+ long src2Stride, long dstStride);
+void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+ uint8_t *dst1, uint8_t *dst2,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long dstStride1, long dstStride2);
+void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+ uint8_t *dst,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long srcStride3, long dstStride);
+
+#if defined(ARCH_X86)
+static const uint64_t mmx_null __attribute__((aligned(8))) = 0x0000000000000000ULL;
+static const uint64_t mmx_one __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL;
+static const uint64_t mask32b attribute_used __attribute__((aligned(8))) = 0x000000FF000000FFULL;
+static const uint64_t mask32g attribute_used __attribute__((aligned(8))) = 0x0000FF000000FF00ULL;
+static const uint64_t mask32r attribute_used __attribute__((aligned(8))) = 0x00FF000000FF0000ULL;
+static const uint64_t mask32 __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
+static const uint64_t mask3216br __attribute__((aligned(8)))=0x00F800F800F800F8ULL;
+static const uint64_t mask3216g __attribute__((aligned(8)))=0x0000FC000000FC00ULL;
+static const uint64_t mask3215g __attribute__((aligned(8)))=0x0000F8000000F800ULL;
+static const uint64_t mul3216 __attribute__((aligned(8))) = 0x2000000420000004ULL;
+static const uint64_t mul3215 __attribute__((aligned(8))) = 0x2000000820000008ULL;
+static const uint64_t mask24b attribute_used __attribute__((aligned(8))) = 0x00FF0000FF0000FFULL;
+static const uint64_t mask24g attribute_used __attribute__((aligned(8))) = 0xFF0000FF0000FF00ULL;
+static const uint64_t mask24r attribute_used __attribute__((aligned(8))) = 0x0000FF0000FF0000ULL;
+static const uint64_t mask24l __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
+static const uint64_t mask24h __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
+static const uint64_t mask24hh __attribute__((aligned(8))) = 0xffff000000000000ULL;
+static const uint64_t mask24hhh __attribute__((aligned(8))) = 0xffffffff00000000ULL;
+static const uint64_t mask24hhhh __attribute__((aligned(8))) = 0xffffffffffff0000ULL;
+static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
+static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
+static const uint64_t mask15s __attribute__((aligned(8))) = 0xFFE0FFE0FFE0FFE0ULL;
+static const uint64_t mask15g __attribute__((aligned(8))) = 0x03E003E003E003E0ULL;
+static const uint64_t mask15r __attribute__((aligned(8))) = 0x7C007C007C007C00ULL;
+#define mask16b mask15b
+static const uint64_t mask16g __attribute__((aligned(8))) = 0x07E007E007E007E0ULL;
+static const uint64_t mask16r __attribute__((aligned(8))) = 0xF800F800F800F800ULL;
+static const uint64_t red_16mask __attribute__((aligned(8))) = 0x0000f8000000f800ULL;
+static const uint64_t green_16mask __attribute__((aligned(8)))= 0x000007e0000007e0ULL;
+static const uint64_t blue_16mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
+static const uint64_t red_15mask __attribute__((aligned(8))) = 0x00007c000000f800ULL;
+static const uint64_t green_15mask __attribute__((aligned(8)))= 0x000003e0000007e0ULL;
+static const uint64_t blue_15mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
+
+#ifdef FAST_BGR2YV12
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000000210041000DULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000FFEEFFDC0038ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00000038FFD2FFF8ULL;
+#else
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000020E540830C8BULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000ED0FDAC23831ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00003831D0E6F6EAULL;
+#endif
+static const uint64_t bgr2YOffset attribute_used __attribute__((aligned(8))) = 0x1010101010101010ULL;
+static const uint64_t bgr2UVOffset attribute_used __attribute__((aligned(8)))= 0x8080808080808080ULL;
+static const uint64_t w1111 attribute_used __attribute__((aligned(8))) = 0x0001000100010001ULL;
+
+#if 0
+static volatile uint64_t __attribute__((aligned(8))) b5Dither;
+static volatile uint64_t __attribute__((aligned(8))) g5Dither;
+static volatile uint64_t __attribute__((aligned(8))) g6Dither;
+static volatile uint64_t __attribute__((aligned(8))) r5Dither;
+
+static uint64_t __attribute__((aligned(8))) dither4[2]={
+ 0x0103010301030103LL,
+ 0x0200020002000200LL,};
+
+static uint64_t __attribute__((aligned(8))) dither8[2]={
+ 0x0602060206020602LL,
+ 0x0004000400040004LL,};
+#endif
+#endif /* defined(ARCH_X86) */
+
+#define RGB2YUV_SHIFT 8
+#define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5))
+#define BV ((int)(-0.071*(1<<RGB2YUV_SHIFT)+0.5))
+#define BU ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define GY ((int)( 0.504*(1<<RGB2YUV_SHIFT)+0.5))
+#define GV ((int)(-0.368*(1<<RGB2YUV_SHIFT)+0.5))
+#define GU ((int)(-0.291*(1<<RGB2YUV_SHIFT)+0.5))
+#define RY ((int)( 0.257*(1<<RGB2YUV_SHIFT)+0.5))
+#define RV ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define RU ((int)(-0.148*(1<<RGB2YUV_SHIFT)+0.5))
+
+//Note: we have C, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one
+//Plain C versions
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_3DNOW
+#undef HAVE_SSE2
+#define RENAME(a) a ## _C
+#include "rgb2rgb_template.c"
+
+#if defined(ARCH_X86)
+
+//MMX versions
+#undef RENAME
+#define HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_3DNOW
+#undef HAVE_SSE2
+#define RENAME(a) a ## _MMX
+#include "rgb2rgb_template.c"
+
+//MMX2 versions
+#undef RENAME
+#define HAVE_MMX
+#define HAVE_MMX2
+#undef HAVE_3DNOW
+#undef HAVE_SSE2
+#define RENAME(a) a ## _MMX2
+#include "rgb2rgb_template.c"
+
+//3DNOW versions
+#undef RENAME
+#define HAVE_MMX
+#undef HAVE_MMX2
+#define HAVE_3DNOW
+#undef HAVE_SSE2
+#define RENAME(a) a ## _3DNOW
+#include "rgb2rgb_template.c"
+
+#endif //ARCH_X86 || ARCH_X86_64
+
+/*
+ rgb15->rgb16 Original by Strepto/Astral
+ ported to gcc & bugfixed : A'rpi
+ MMX2, 3DNOW optimization by Nick Kurshev
+ 32bit c version, and and&add trick by Michael Niedermayer
+*/
+
+void sws_rgb2rgb_init(int flags){
+#if defined(HAVE_MMX2) || defined(HAVE_3DNOW) || defined(HAVE_MMX)
+ if(flags & SWS_CPU_CAPS_MMX2){
+ rgb15to16= rgb15to16_MMX2;
+ rgb15to24= rgb15to24_MMX2;
+ rgb15to32= rgb15to32_MMX2;
+ rgb16to24= rgb16to24_MMX2;
+ rgb16to32= rgb16to32_MMX2;
+ rgb16to15= rgb16to15_MMX2;
+ rgb24to16= rgb24to16_MMX2;
+ rgb24to15= rgb24to15_MMX2;
+ rgb24to32= rgb24to32_MMX2;
+ rgb32to16= rgb32to16_MMX2;
+ rgb32to15= rgb32to15_MMX2;
+ rgb32to24= rgb32to24_MMX2;
+ rgb24tobgr15= rgb24tobgr15_MMX2;
+ rgb24tobgr16= rgb24tobgr16_MMX2;
+ rgb24tobgr24= rgb24tobgr24_MMX2;
+ rgb32tobgr32= rgb32tobgr32_MMX2;
+ rgb32tobgr16= rgb32tobgr16_MMX2;
+ rgb32tobgr15= rgb32tobgr15_MMX2;
+ yv12toyuy2= yv12toyuy2_MMX2;
+ yv12touyvy= yv12touyvy_MMX2;
+ yuv422ptoyuy2= yuv422ptoyuy2_MMX2;
+ yuy2toyv12= yuy2toyv12_MMX2;
+// uyvytoyv12= uyvytoyv12_MMX2;
+// yvu9toyv12= yvu9toyv12_MMX2;
+ planar2x= planar2x_MMX2;
+ rgb24toyv12= rgb24toyv12_MMX2;
+ interleaveBytes= interleaveBytes_MMX2;
+ vu9_to_vu12= vu9_to_vu12_MMX2;
+ yvu9_to_yuy2= yvu9_to_yuy2_MMX2;
+ }else if(flags & SWS_CPU_CAPS_3DNOW){
+ rgb15to16= rgb15to16_3DNOW;
+ rgb15to24= rgb15to24_3DNOW;
+ rgb15to32= rgb15to32_3DNOW;
+ rgb16to24= rgb16to24_3DNOW;
+ rgb16to32= rgb16to32_3DNOW;
+ rgb16to15= rgb16to15_3DNOW;
+ rgb24to16= rgb24to16_3DNOW;
+ rgb24to15= rgb24to15_3DNOW;
+ rgb24to32= rgb24to32_3DNOW;
+ rgb32to16= rgb32to16_3DNOW;
+ rgb32to15= rgb32to15_3DNOW;
+ rgb32to24= rgb32to24_3DNOW;
+ rgb24tobgr15= rgb24tobgr15_3DNOW;
+ rgb24tobgr16= rgb24tobgr16_3DNOW;
+ rgb24tobgr24= rgb24tobgr24_3DNOW;
+ rgb32tobgr32= rgb32tobgr32_3DNOW;
+ rgb32tobgr16= rgb32tobgr16_3DNOW;
+ rgb32tobgr15= rgb32tobgr15_3DNOW;
+ yv12toyuy2= yv12toyuy2_3DNOW;
+ yv12touyvy= yv12touyvy_3DNOW;
+ yuv422ptoyuy2= yuv422ptoyuy2_3DNOW;
+ yuy2toyv12= yuy2toyv12_3DNOW;
+// uyvytoyv12= uyvytoyv12_3DNOW;
+// yvu9toyv12= yvu9toyv12_3DNOW;
+ planar2x= planar2x_3DNOW;
+ rgb24toyv12= rgb24toyv12_3DNOW;
+ interleaveBytes= interleaveBytes_3DNOW;
+ vu9_to_vu12= vu9_to_vu12_3DNOW;
+ yvu9_to_yuy2= yvu9_to_yuy2_3DNOW;
+ }else if(flags & SWS_CPU_CAPS_MMX){
+ rgb15to16= rgb15to16_MMX;
+ rgb15to24= rgb15to24_MMX;
+ rgb15to32= rgb15to32_MMX;
+ rgb16to24= rgb16to24_MMX;
+ rgb16to32= rgb16to32_MMX;
+ rgb16to15= rgb16to15_MMX;
+ rgb24to16= rgb24to16_MMX;
+ rgb24to15= rgb24to15_MMX;
+ rgb24to32= rgb24to32_MMX;
+ rgb32to16= rgb32to16_MMX;
+ rgb32to15= rgb32to15_MMX;
+ rgb32to24= rgb32to24_MMX;
+ rgb24tobgr15= rgb24tobgr15_MMX;
+ rgb24tobgr16= rgb24tobgr16_MMX;
+ rgb24tobgr24= rgb24tobgr24_MMX;
+ rgb32tobgr32= rgb32tobgr32_MMX;
+ rgb32tobgr16= rgb32tobgr16_MMX;
+ rgb32tobgr15= rgb32tobgr15_MMX;
+ yv12toyuy2= yv12toyuy2_MMX;
+ yv12touyvy= yv12touyvy_MMX;
+ yuv422ptoyuy2= yuv422ptoyuy2_MMX;
+ yuy2toyv12= yuy2toyv12_MMX;
+// uyvytoyv12= uyvytoyv12_MMX;
+// yvu9toyv12= yvu9toyv12_MMX;
+ planar2x= planar2x_MMX;
+ rgb24toyv12= rgb24toyv12_MMX;
+ interleaveBytes= interleaveBytes_MMX;
+ vu9_to_vu12= vu9_to_vu12_MMX;
+ yvu9_to_yuy2= yvu9_to_yuy2_MMX;
+ }else
+#endif /* defined(HAVE_MMX2) || defined(HAVE_3DNOW) || defined(HAVE_MMX) */
+ {
+ rgb15to16= rgb15to16_C;
+ rgb15to24= rgb15to24_C;
+ rgb15to32= rgb15to32_C;
+ rgb16to24= rgb16to24_C;
+ rgb16to32= rgb16to32_C;
+ rgb16to15= rgb16to15_C;
+ rgb24to16= rgb24to16_C;
+ rgb24to15= rgb24to15_C;
+ rgb24to32= rgb24to32_C;
+ rgb32to16= rgb32to16_C;
+ rgb32to15= rgb32to15_C;
+ rgb32to24= rgb32to24_C;
+ rgb24tobgr15= rgb24tobgr15_C;
+ rgb24tobgr16= rgb24tobgr16_C;
+ rgb24tobgr24= rgb24tobgr24_C;
+ rgb32tobgr32= rgb32tobgr32_C;
+ rgb32tobgr16= rgb32tobgr16_C;
+ rgb32tobgr15= rgb32tobgr15_C;
+ yv12toyuy2= yv12toyuy2_C;
+ yv12touyvy= yv12touyvy_C;
+ yuv422ptoyuy2= yuv422ptoyuy2_C;
+ yuy2toyv12= yuy2toyv12_C;
+// uyvytoyv12= uyvytoyv12_C;
+// yvu9toyv12= yvu9toyv12_C;
+ planar2x= planar2x_C;
+ rgb24toyv12= rgb24toyv12_C;
+ interleaveBytes= interleaveBytes_C;
+ vu9_to_vu12= vu9_to_vu12_C;
+ yvu9_to_yuy2= yvu9_to_yuy2_C;
+ }
+}
+
+/**
+ * Pallete is assumed to contain bgr32
+ */
+void palette8torgb32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+
+/*
+ for(i=0; i<num_pixels; i++)
+ ((unsigned *)dst)[i] = ((unsigned *)palette)[ src[i] ];
+*/
+
+ for(i=0; i<num_pixels; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ dst[3]= palette[ src[i]*4+2 ];
+ dst[2]= palette[ src[i]*4+1 ];
+ dst[1]= palette[ src[i]*4+0 ];
+ #else
+ //FIXME slow?
+ dst[0]= palette[ src[i]*4+2 ];
+ dst[1]= palette[ src[i]*4+1 ];
+ dst[2]= palette[ src[i]*4+0 ];
+ //dst[3]= 0; /* do we need this cleansing? */
+ #endif
+ dst+= 4;
+ }
+}
+
+void palette8tobgr32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for(i=0; i<num_pixels; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ dst[3]= palette[ src[i]*4+0 ];
+ dst[2]= palette[ src[i]*4+1 ];
+ dst[1]= palette[ src[i]*4+2 ];
+ #else
+ //FIXME slow?
+ dst[0]= palette[ src[i]*4+0 ];
+ dst[1]= palette[ src[i]*4+1 ];
+ dst[2]= palette[ src[i]*4+2 ];
+ //dst[3]= 0; /* do we need this cleansing? */
+ #endif
+
+ dst+= 4;
+ }
+}
+
+/**
+ * Pallete is assumed to contain bgr32
+ */
+void palette8torgb24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+/*
+ writes 1 byte o much and might cause alignment issues on some architectures?
+ for(i=0; i<num_pixels; i++)
+ ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
+*/
+ for(i=0; i<num_pixels; i++)
+ {
+ //FIXME slow?
+ dst[0]= palette[ src[i]*4+2 ];
+ dst[1]= palette[ src[i]*4+1 ];
+ dst[2]= palette[ src[i]*4+0 ];
+ dst+= 3;
+ }
+}
+
+void palette8tobgr24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+/*
+ writes 1 byte o much and might cause alignment issues on some architectures?
+ for(i=0; i<num_pixels; i++)
+ ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
+*/
+ for(i=0; i<num_pixels; i++)
+ {
+ //FIXME slow?
+ dst[0]= palette[ src[i]*4+0 ];
+ dst[1]= palette[ src[i]*4+1 ];
+ dst[2]= palette[ src[i]*4+2 ];
+ dst+= 3;
+ }
+}
+
+/**
+ * Palette is assumed to contain bgr16, see rgb32to16 to convert the palette
+ */
+void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for(i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
+}
+void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for(i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]);
+}
+
+/**
+ * Pallete is assumed to contain bgr15, see rgb32to15 to convert the palette
+ */
+void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for(i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
+}
+void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+ long i;
+ for(i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]);
+}
+
+void rgb32tobgr24(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ long i;
+ long num_pixels = src_size >> 2;
+ for(i=0; i<num_pixels; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ /* RGB32 (= A,B,G,R) -> BGR24 (= B,G,R) */
+ dst[3*i + 0] = src[4*i + 1];
+ dst[3*i + 1] = src[4*i + 2];
+ dst[3*i + 2] = src[4*i + 3];
+ #else
+ dst[3*i + 0] = src[4*i + 2];
+ dst[3*i + 1] = src[4*i + 1];
+ dst[3*i + 2] = src[4*i + 0];
+ #endif
+ }
+}
+
+void rgb24tobgr32(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ long i;
+ for(i=0; 3*i<src_size; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ /* RGB24 (= R,G,B) -> BGR32 (= A,R,G,B) */
+ dst[4*i + 0] = 0;
+ dst[4*i + 1] = src[3*i + 0];
+ dst[4*i + 2] = src[3*i + 1];
+ dst[4*i + 3] = src[3*i + 2];
+ #else
+ dst[4*i + 0] = src[3*i + 2];
+ dst[4*i + 1] = src[3*i + 1];
+ dst[4*i + 2] = src[3*i + 0];
+ dst[4*i + 3] = 0;
+ #endif
+ }
+}
+
+void rgb16tobgr32(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ #ifdef WORDS_BIGENDIAN
+ *d++ = 0;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0xF800)>>8;
+ #else
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = 0;
+ #endif
+ }
+}
+
+void rgb16tobgr24(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+ }
+}
+
+void rgb16tobgr16(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ long i;
+ long num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x7E0)>>5;
+ b = (rgb&0xF800)>>11;
+ dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
+ }
+}
+
+void rgb16tobgr15(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ long i;
+ long num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x7E0)>>5;
+ b = (rgb&0xF800)>>11;
+ dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
+ }
+}
+
+void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ #ifdef WORDS_BIGENDIAN
+ *d++ = 0;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x7C00)>>7;
+ #else
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = 0;
+ #endif
+ }
+}
+
+void rgb15tobgr24(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+ }
+}
+
+void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ long i;
+ long num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x3E0)>>5;
+ b = (rgb&0x7C00)>>10;
+ dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
+ }
+}
+
+void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ long i;
+ long num_pixels = src_size >> 1;
+
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x3E0)>>5;
+ b = (rgb&0x7C00)>>10;
+ dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
+ }
+}
+
+void rgb8tobgr8(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ long i;
+ long num_pixels = src_size;
+ for(i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint8_t rgb;
+ rgb = src[i];
+ r = (rgb&0x07);
+ g = (rgb&0x38)>>3;
+ b = (rgb&0xC0)>>6;
+ dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6);
+ }
+}
diff --git a/contrib/ffmpeg/libswscale/rgb2rgb.h b/contrib/ffmpeg/libswscale/rgb2rgb.h
new file mode 100644
index 000000000..4b5cc0a69
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/rgb2rgb.h
@@ -0,0 +1,147 @@
+/*
+ *
+ * rgb2rgb.h, Software RGB to RGB convertor
+ * pluralize by Software PAL8 to RGB convertor
+ * Software YUV to YUV convertor
+ * Software YUV to RGB convertor
+ * Written by Nick Kurshev.
+ * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef RGB2RGB_INCLUDED
+#define RGB2RGB_INCLUDED
+
+// Note: do not fix the dependence on stdio.h
+
+/* A full collection of rgb to rgb(bgr) convertors */
+extern void (*rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb24to16)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb24to15)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb32to16)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb32to15)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb15to24)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb15to32)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb16to24)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb16to32)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
+
+extern void rgb24tobgr32(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb32tobgr24(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb16tobgr32(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb16tobgr24(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb16tobgr16(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb16tobgr15(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb15tobgr24(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb8tobgr8(const uint8_t *src, uint8_t *dst, long src_size);
+
+
+extern void palette8torgb32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8tobgr32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8torgb24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8tobgr24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+extern void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ * chrominance data is only taken from every secound line others are ignored FIXME write HQ version
+ */
+//void uyvytoyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ */
+extern void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
+
+/**
+ *
+ * width should be a multiple of 16
+ */
+extern void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ */
+extern void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride);
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ */
+extern void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 2 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ * chrominance data is only taken from every secound line others are ignored FIXME write HQ version
+ */
+extern void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride);
+extern void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height,
+ long srcStride, long dstStride);
+
+extern void (*interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dst,
+ long width, long height, long src1Stride,
+ long src2Stride, long dstStride);
+
+extern void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+ uint8_t *dst1, uint8_t *dst2,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long dstStride1, long dstStride2);
+
+extern void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+ uint8_t *dst,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long srcStride3, long dstStride);
+
+void sws_rgb2rgb_init(int flags);
+
+#endif
diff --git a/contrib/ffmpeg/libswscale/rgb2rgb_template.c b/contrib/ffmpeg/libswscale/rgb2rgb_template.c
new file mode 100644
index 000000000..d9511c955
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/rgb2rgb_template.c
@@ -0,0 +1,2688 @@
+/*
+ *
+ * rgb2rgb.c, Software RGB to RGB convertor
+ * pluralize by Software PAL8 to RGB convertor
+ * Software YUV to YUV convertor
+ * Software YUV to RGB convertor
+ * Written by Nick Kurshev.
+ * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
+ * lot of big-endian byteorder fixes by Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * the C code (not assembly, mmx, ...) of this file can be used
+ * under the LGPL license too
+ */
+
+#include <stddef.h>
+#include <inttypes.h> /* for __WORDSIZE */
+
+#ifndef __WORDSIZE
+// #warning You have misconfigured system and probably will lose performance!
+#define __WORDSIZE MP_WORDSIZE
+#endif
+
+#undef PREFETCH
+#undef MOVNTQ
+#undef EMMS
+#undef SFENCE
+#undef MMREG_SIZE
+#undef PREFETCHW
+#undef PAVGB
+
+#ifdef HAVE_SSE2
+#define MMREG_SIZE 16
+#else
+#define MMREG_SIZE 8
+#endif
+
+#ifdef HAVE_3DNOW
+#define PREFETCH "prefetch"
+#define PREFETCHW "prefetchw"
+#define PAVGB "pavgusb"
+#elif defined ( HAVE_MMX2 )
+#define PREFETCH "prefetchnta"
+#define PREFETCHW "prefetcht0"
+#define PAVGB "pavgb"
+#else
+#ifdef __APPLE__
+#define PREFETCH "#"
+#define PREFETCHW "#"
+#else
+#define PREFETCH " # nop"
+#define PREFETCHW " # nop"
+#endif
+#endif
+
+#ifdef HAVE_3DNOW
+/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
+#define EMMS "femms"
+#else
+#define EMMS "emms"
+#endif
+
+#ifdef HAVE_MMX2
+#define MOVNTQ "movntq"
+#define SFENCE "sfence"
+#else
+#define MOVNTQ "movq"
+#define SFENCE " # nop"
+#endif
+
+static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size)
+{
+ uint8_t *dest = dst;
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 23;
+ __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory");
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "punpckldq 3%1, %%mm0\n\t"
+ "movd 6%1, %%mm1\n\t"
+ "punpckldq 9%1, %%mm1\n\t"
+ "movd 12%1, %%mm2\n\t"
+ "punpckldq 15%1, %%mm2\n\t"
+ "movd 18%1, %%mm3\n\t"
+ "punpckldq 21%1, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm1\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm2, 16%0\n\t"
+ MOVNTQ" %%mm3, 24%0"
+ :"=m"(*dest)
+ :"m"(*s)
+ :"memory");
+ dest += 32;
+ s += 24;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+#ifdef WORDS_BIGENDIAN
+ /* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */
+ *dest++ = 0;
+ *dest++ = s[2];
+ *dest++ = s[1];
+ *dest++ = s[0];
+ s+=3;
+#else
+ *dest++ = *s++;
+ *dest++ = *s++;
+ *dest++ = *s++;
+ *dest++ = 0;
+#endif
+ }
+}
+
+static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size)
+{
+ uint8_t *dest = dst;
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 31;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq 8%1, %%mm1\n\t"
+ "movq 16%1, %%mm4\n\t"
+ "movq 24%1, %%mm5\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+ "movq %%mm4, %%mm6\n\t"
+ "movq %%mm5, %%mm7\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "psrlq $8, %%mm3\n\t"
+ "psrlq $8, %%mm6\n\t"
+ "psrlq $8, %%mm7\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm1\n\t"
+ "pand %2, %%mm4\n\t"
+ "pand %2, %%mm5\n\t"
+ "pand %3, %%mm2\n\t"
+ "pand %3, %%mm3\n\t"
+ "pand %3, %%mm6\n\t"
+ "pand %3, %%mm7\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "por %%mm6, %%mm4\n\t"
+ "por %%mm7, %%mm5\n\t"
+
+ "movq %%mm1, %%mm2\n\t"
+ "movq %%mm4, %%mm3\n\t"
+ "psllq $48, %%mm2\n\t"
+ "psllq $32, %%mm3\n\t"
+ "pand %4, %%mm2\n\t"
+ "pand %5, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psrlq $16, %%mm1\n\t"
+ "psrlq $32, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "pand %6, %%mm5\n\t"
+ "por %%mm5, %%mm4\n\t"
+
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm4, 16%0"
+ :"=m"(*dest)
+ :"m"(*s),"m"(mask24l),
+ "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
+ :"memory");
+ dest += 24;
+ s += 32;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+#ifdef WORDS_BIGENDIAN
+ /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */
+ s++;
+ dest[2] = *s++;
+ dest[1] = *s++;
+ dest[0] = *s++;
+ dest += 3;
+#else
+ *dest++ = *s++;
+ *dest++ = *s++;
+ *dest++ = *s++;
+ s++;
+#endif
+ }
+}
+
+/*
+ Original by Strepto/Astral
+ ported to gcc & bugfixed : A'rpi
+ MMX2, 3DNOW optimization by Nick Kurshev
+ 32bit c version, and and&add trick by Michael Niedermayer
+*/
+static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size)
+{
+ register const uint8_t* s=src;
+ register uint8_t* d=dst;
+ register const uint8_t *end;
+ const uint8_t *mm_end;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s));
+ __asm __volatile("movq %0, %%mm4"::"m"(mask15s));
+ mm_end = end - 15;
+ while(s<mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "pand %%mm4, %%mm0\n\t"
+ "pand %%mm4, %%mm2\n\t"
+ "paddw %%mm1, %%mm0\n\t"
+ "paddw %%mm3, %%mm2\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm2, 8%0"
+ :"=m"(*d)
+ :"m"(*s)
+ );
+ d+=16;
+ s+=16;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ mm_end = end - 3;
+ while(s < mm_end)
+ {
+ register unsigned x= *((uint32_t *)s);
+ *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
+ d+=4;
+ s+=4;
+ }
+ if(s < end)
+ {
+ register unsigned short x= *((uint16_t *)s);
+ *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
+ }
+}
+
+static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size)
+{
+ register const uint8_t* s=src;
+ register uint8_t* d=dst;
+ register const uint8_t *end;
+ const uint8_t *mm_end;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s));
+ __asm __volatile("movq %0, %%mm7"::"m"(mask15rg));
+ __asm __volatile("movq %0, %%mm6"::"m"(mask15b));
+ mm_end = end - 15;
+ while(s<mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "psrlq $1, %%mm0\n\t"
+ "psrlq $1, %%mm2\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm3\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm3, %%mm2\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm2, 8%0"
+ :"=m"(*d)
+ :"m"(*s)
+ );
+ d+=16;
+ s+=16;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ mm_end = end - 3;
+ while(s < mm_end)
+ {
+ register uint32_t x= *((uint32_t *)s);
+ *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
+ s+=4;
+ d+=4;
+ }
+ if(s < end)
+ {
+ register uint16_t x= *((uint16_t *)s);
+ *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
+ s+=2;
+ d+=2;
+ }
+}
+
+static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ mm_end = end - 15;
+#if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster)
+ asm volatile(
+ "movq %3, %%mm5 \n\t"
+ "movq %4, %%mm6 \n\t"
+ "movq %5, %%mm7 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1) \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd 4(%1), %%mm3 \n\t"
+ "punpckldq 8(%1), %%mm0 \n\t"
+ "punpckldq 12(%1), %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm6, %%mm3 \n\t"
+ "pmaddwd %%mm7, %%mm0 \n\t"
+ "pmaddwd %%mm7, %%mm3 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "pand %%mm5, %%mm4 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "psrld $5, %%mm0 \n\t"
+ "pslld $11, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, (%0) \n\t"
+ "add $16, %1 \n\t"
+ "add $8, %0 \n\t"
+ "cmp %2, %1 \n\t"
+ " jb 1b \n\t"
+ : "+r" (d), "+r"(s)
+ : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
+ );
+#else
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 4%1, %%mm3\n\t"
+ "punpckldq 8%1, %%mm0\n\t"
+ "punpckldq 12%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psrlq $3, %%mm0\n\t"
+ "psrlq $3, %%mm3\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm3\n\t"
+ "psrlq $5, %%mm1\n\t"
+ "psrlq $5, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "psrlq $8, %%mm5\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm7, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 16;
+ }
+#endif
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
+ }
+}
+
+static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = end - 15;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 4%1, %%mm3\n\t"
+ "punpckldq 8%1, %%mm0\n\t"
+ "punpckldq 12%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $8, %%mm0\n\t"
+ "psllq $8, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $5, %%mm1\n\t"
+ "psrlq $5, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 16;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
+ }
+}
+
+static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ mm_end = end - 15;
+#if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster)
+ asm volatile(
+ "movq %3, %%mm5 \n\t"
+ "movq %4, %%mm6 \n\t"
+ "movq %5, %%mm7 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1) \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd 4(%1), %%mm3 \n\t"
+ "punpckldq 8(%1), %%mm0 \n\t"
+ "punpckldq 12(%1), %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm6, %%mm3 \n\t"
+ "pmaddwd %%mm7, %%mm0 \n\t"
+ "pmaddwd %%mm7, %%mm3 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "pand %%mm5, %%mm4 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "psrld $6, %%mm0 \n\t"
+ "pslld $10, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, (%0) \n\t"
+ "add $16, %1 \n\t"
+ "add $8, %0 \n\t"
+ "cmp %2, %1 \n\t"
+ " jb 1b \n\t"
+ : "+r" (d), "+r"(s)
+ : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
+ );
+#else
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 4%1, %%mm3\n\t"
+ "punpckldq 8%1, %%mm0\n\t"
+ "punpckldq 12%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psrlq $3, %%mm0\n\t"
+ "psrlq $3, %%mm3\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $9, %%mm2\n\t"
+ "psrlq $9, %%mm5\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm7, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 16;
+ }
+#endif
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
+ }
+}
+
+static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = end - 15;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 4%1, %%mm3\n\t"
+ "punpckldq 8%1, %%mm0\n\t"
+ "punpckldq 12%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $7, %%mm0\n\t"
+ "psllq $7, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 16;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
+ }
+}
+
+static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = end - 11;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psrlq $3, %%mm0\n\t"
+ "psrlq $3, %%mm3\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm3\n\t"
+ "psrlq $5, %%mm1\n\t"
+ "psrlq $5, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "psrlq $8, %%mm5\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm7, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int b= *s++;
+ const int g= *s++;
+ const int r= *s++;
+ *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+ }
+}
+
+static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = end - 15;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $8, %%mm0\n\t"
+ "psllq $8, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $5, %%mm1\n\t"
+ "psrlq $5, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int r= *s++;
+ const int g= *s++;
+ const int b= *s++;
+ *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+ }
+}
+
+static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = end - 11;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psrlq $3, %%mm0\n\t"
+ "psrlq $3, %%mm3\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $9, %%mm2\n\t"
+ "psrlq $9, %%mm5\n\t"
+ "pand %%mm7, %%mm2\n\t"
+ "pand %%mm7, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int b= *s++;
+ const int g= *s++;
+ const int r= *s++;
+ *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+ }
+}
+
+static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint8_t *s = src;
+ const uint8_t *end;
+#ifdef HAVE_MMX
+ const uint8_t *mm_end;
+#endif
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
+ __asm __volatile(
+ "movq %0, %%mm7\n\t"
+ "movq %1, %%mm6\n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = end - 15;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movd %1, %%mm0\n\t"
+ "movd 3%1, %%mm3\n\t"
+ "punpckldq 6%1, %%mm0\n\t"
+ "punpckldq 9%1, %%mm3\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm3, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "psllq $7, %%mm0\n\t"
+ "psllq $7, %%mm3\n\t"
+ "pand %%mm7, %%mm0\n\t"
+ "pand %%mm7, %%mm3\n\t"
+ "psrlq $6, %%mm1\n\t"
+ "psrlq $6, %%mm4\n\t"
+ "pand %%mm6, %%mm1\n\t"
+ "pand %%mm6, %%mm4\n\t"
+ "psrlq $19, %%mm2\n\t"
+ "psrlq $19, %%mm5\n\t"
+ "pand %2, %%mm2\n\t"
+ "pand %2, %%mm5\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm5, %%mm3\n\t"
+ "psllq $16, %%mm3\n\t"
+ "por %%mm3, %%mm0\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ const int r= *s++;
+ const int g= *s++;
+ const int b= *s++;
+ *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+ }
+}
+
+/*
+ I use here less accurate approximation by simply
+ left-shifting the input
+ value and filling the low order bits with
+ zeroes. This method improves png's
+ compression but this scheme cannot reproduce white exactly, since it does not
+ generate an all-ones maximum value; the net effect is to darken the
+ image slightly.
+
+ The better method should be "left bit replication":
+
+ 4 3 2 1 0
+ ---------
+ 1 1 0 1 1
+
+ 7 6 5 4 3 2 1 0
+ ----------------
+ 1 1 0 1 1 1 1 0
+ |=======| |===|
+ | Leftmost Bits Repeated to Fill Open Bits
+ |
+ Original Bits
+*/
+static inline void RENAME(rgb15to24)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint16_t *end;
+#ifdef HAVE_MMX
+ const uint16_t *mm_end;
+#endif
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 7;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ "movq %1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $2, %%mm1\n\t"
+ "psrlq $7, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %5, %%mm0\n\t"
+ "punpcklwd %5, %%mm1\n\t"
+ "punpcklwd %5, %%mm2\n\t"
+ "punpckhwd %5, %%mm3\n\t"
+ "punpckhwd %5, %%mm4\n\t"
+ "punpckhwd %5, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+
+ "movq %%mm0, %%mm6\n\t"
+ "movq %%mm3, %%mm7\n\t"
+
+ "movq 8%1, %%mm0\n\t"
+ "movq 8%1, %%mm1\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $2, %%mm1\n\t"
+ "psrlq $7, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %5, %%mm0\n\t"
+ "punpcklwd %5, %%mm1\n\t"
+ "punpcklwd %5, %%mm2\n\t"
+ "punpckhwd %5, %%mm3\n\t"
+ "punpckhwd %5, %%mm4\n\t"
+ "punpckhwd %5, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+
+ :"=m"(*d)
+ :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
+ :"memory");
+ /* Borrowed 32 to 24 */
+ __asm __volatile(
+ "movq %%mm0, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "movq %%mm6, %%mm0\n\t"
+ "movq %%mm7, %%mm1\n\t"
+
+ "movq %%mm4, %%mm6\n\t"
+ "movq %%mm5, %%mm7\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+
+ "psrlq $8, %%mm2\n\t"
+ "psrlq $8, %%mm3\n\t"
+ "psrlq $8, %%mm6\n\t"
+ "psrlq $8, %%mm7\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm1\n\t"
+ "pand %2, %%mm4\n\t"
+ "pand %2, %%mm5\n\t"
+ "pand %3, %%mm2\n\t"
+ "pand %3, %%mm3\n\t"
+ "pand %3, %%mm6\n\t"
+ "pand %3, %%mm7\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "por %%mm6, %%mm4\n\t"
+ "por %%mm7, %%mm5\n\t"
+
+ "movq %%mm1, %%mm2\n\t"
+ "movq %%mm4, %%mm3\n\t"
+ "psllq $48, %%mm2\n\t"
+ "psllq $32, %%mm3\n\t"
+ "pand %4, %%mm2\n\t"
+ "pand %5, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psrlq $16, %%mm1\n\t"
+ "psrlq $32, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "pand %6, %%mm5\n\t"
+ "por %%mm5, %%mm4\n\t"
+
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm4, 16%0"
+
+ :"=m"(*d)
+ :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
+ :"memory");
+ d += 24;
+ s += 8;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x7C00)>>7;
+ }
+}
+
+static inline void RENAME(rgb16to24)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint16_t *end;
+#ifdef HAVE_MMX
+ const uint16_t *mm_end;
+#endif
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 7;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ "movq %1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $3, %%mm1\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %5, %%mm0\n\t"
+ "punpcklwd %5, %%mm1\n\t"
+ "punpcklwd %5, %%mm2\n\t"
+ "punpckhwd %5, %%mm3\n\t"
+ "punpckhwd %5, %%mm4\n\t"
+ "punpckhwd %5, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+
+ "movq %%mm0, %%mm6\n\t"
+ "movq %%mm3, %%mm7\n\t"
+
+ "movq 8%1, %%mm0\n\t"
+ "movq 8%1, %%mm1\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $3, %%mm1\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %5, %%mm0\n\t"
+ "punpcklwd %5, %%mm1\n\t"
+ "punpcklwd %5, %%mm2\n\t"
+ "punpckhwd %5, %%mm3\n\t"
+ "punpckhwd %5, %%mm4\n\t"
+ "punpckhwd %5, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+ :"=m"(*d)
+ :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
+ :"memory");
+ /* Borrowed 32 to 24 */
+ __asm __volatile(
+ "movq %%mm0, %%mm4\n\t"
+ "movq %%mm3, %%mm5\n\t"
+ "movq %%mm6, %%mm0\n\t"
+ "movq %%mm7, %%mm1\n\t"
+
+ "movq %%mm4, %%mm6\n\t"
+ "movq %%mm5, %%mm7\n\t"
+ "movq %%mm0, %%mm2\n\t"
+ "movq %%mm1, %%mm3\n\t"
+
+ "psrlq $8, %%mm2\n\t"
+ "psrlq $8, %%mm3\n\t"
+ "psrlq $8, %%mm6\n\t"
+ "psrlq $8, %%mm7\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %2, %%mm1\n\t"
+ "pand %2, %%mm4\n\t"
+ "pand %2, %%mm5\n\t"
+ "pand %3, %%mm2\n\t"
+ "pand %3, %%mm3\n\t"
+ "pand %3, %%mm6\n\t"
+ "pand %3, %%mm7\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "por %%mm6, %%mm4\n\t"
+ "por %%mm7, %%mm5\n\t"
+
+ "movq %%mm1, %%mm2\n\t"
+ "movq %%mm4, %%mm3\n\t"
+ "psllq $48, %%mm2\n\t"
+ "psllq $32, %%mm3\n\t"
+ "pand %4, %%mm2\n\t"
+ "pand %5, %%mm3\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psrlq $16, %%mm1\n\t"
+ "psrlq $32, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm3, %%mm1\n\t"
+ "pand %6, %%mm5\n\t"
+ "por %%mm5, %%mm4\n\t"
+
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm4, 16%0"
+
+ :"=m"(*d)
+ :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
+ :"memory");
+ d += 24;
+ s += 8;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0xF800)>>8;
+ }
+}
+
+static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint16_t *end;
+#ifdef HAVE_MMX
+ const uint16_t *mm_end;
+#endif
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory");
+ mm_end = end - 3;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ "movq %1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $2, %%mm1\n\t"
+ "psrlq $7, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %%mm7, %%mm0\n\t"
+ "punpcklwd %%mm7, %%mm1\n\t"
+ "punpcklwd %%mm7, %%mm2\n\t"
+ "punpckhwd %%mm7, %%mm3\n\t"
+ "punpckhwd %%mm7, %%mm4\n\t"
+ "punpckhwd %%mm7, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm3, 8%0\n\t"
+ :"=m"(*d)
+ :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
+ :"memory");
+ d += 16;
+ s += 4;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+#if 0 //slightly slower on athlon
+ int bgr= *s++;
+ *((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9);
+#else
+ register uint16_t bgr;
+ bgr = *s++;
+#ifdef WORDS_BIGENDIAN
+ *d++ = 0;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+#else
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = 0;
+#endif
+
+#endif
+ }
+}
+
+static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ const uint16_t *end;
+#ifdef HAVE_MMX
+ const uint16_t *mm_end;
+#endif
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+#ifdef HAVE_MMX
+ __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
+ __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory");
+ mm_end = end - 3;
+ while(s < mm_end)
+ {
+ __asm __volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq %1, %%mm1\n\t"
+ "movq %1, %%mm2\n\t"
+ "pand %2, %%mm0\n\t"
+ "pand %3, %%mm1\n\t"
+ "pand %4, %%mm2\n\t"
+ "psllq $3, %%mm0\n\t"
+ "psrlq $3, %%mm1\n\t"
+ "psrlq $8, %%mm2\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "movq %%mm1, %%mm4\n\t"
+ "movq %%mm2, %%mm5\n\t"
+ "punpcklwd %%mm7, %%mm0\n\t"
+ "punpcklwd %%mm7, %%mm1\n\t"
+ "punpcklwd %%mm7, %%mm2\n\t"
+ "punpckhwd %%mm7, %%mm3\n\t"
+ "punpckhwd %%mm7, %%mm4\n\t"
+ "punpckhwd %%mm7, %%mm5\n\t"
+ "psllq $8, %%mm1\n\t"
+ "psllq $16, %%mm2\n\t"
+ "por %%mm1, %%mm0\n\t"
+ "por %%mm2, %%mm0\n\t"
+ "psllq $8, %%mm4\n\t"
+ "psllq $16, %%mm5\n\t"
+ "por %%mm4, %%mm3\n\t"
+ "por %%mm5, %%mm3\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm3, 8%0\n\t"
+ :"=m"(*d)
+ :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r)
+ :"memory");
+ d += 16;
+ s += 4;
+ }
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ while(s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+#ifdef WORDS_BIGENDIAN
+ *d++ = 0;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+#else
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = 0;
+#endif
+ }
+}
+
+static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+#ifdef HAVE_MMX
+/* TODO: unroll this loop */
+ asm volatile (
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%0, %%"REG_a") \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "pslld $16, %%mm0 \n\t"
+ "psrld $16, %%mm1 \n\t"
+ "pand "MANGLE(mask32r)", %%mm0 \n\t"
+ "pand "MANGLE(mask32g)", %%mm2 \n\t"
+ "pand "MANGLE(mask32b)", %%mm1 \n\t"
+ "por %%mm0, %%mm2 \n\t"
+ "por %%mm1, %%mm2 \n\t"
+ MOVNTQ" %%mm2, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ :: "r" (src), "r"(dst), "r" (src_size-7)
+ : "%"REG_a
+ );
+
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#else
+ unsigned i;
+ unsigned num_pixels = src_size >> 2;
+ for(i=0; i<num_pixels; i++)
+ {
+#ifdef WORDS_BIGENDIAN
+ dst[4*i + 1] = src[4*i + 3];
+ dst[4*i + 2] = src[4*i + 2];
+ dst[4*i + 3] = src[4*i + 1];
+#else
+ dst[4*i + 0] = src[4*i + 2];
+ dst[4*i + 1] = src[4*i + 1];
+ dst[4*i + 2] = src[4*i + 0];
+#endif
+ }
+#endif
+}
+
+static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+ unsigned i;
+#ifdef HAVE_MMX
+ long mmx_size= 23 - src_size;
+ asm volatile (
+ "movq "MANGLE(mask24r)", %%mm5 \n\t"
+ "movq "MANGLE(mask24g)", %%mm6 \n\t"
+ "movq "MANGLE(mask24b)", %%mm7 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1, %%"REG_a") \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
+ "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
+ "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
+ "psllq $16, %%mm0 \n\t" // 00 BGR BGR
+ "pand %%mm5, %%mm0 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "por %%mm0, %%mm1 \n\t"
+ "por %%mm2, %%mm1 \n\t"
+ "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
+ MOVNTQ" %%mm1, (%2, %%"REG_a")\n\t" // RGB RGB RG
+ "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
+ "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "pand %%mm6, %%mm2 \n\t"
+ "por %%mm0, %%mm1 \n\t"
+ "por %%mm2, %%mm1 \n\t"
+ "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
+ MOVNTQ" %%mm1, 8(%2, %%"REG_a")\n\t" // B RGB RGB R
+ "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
+ "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm5, %%mm2 \n\t"
+ "por %%mm0, %%mm1 \n\t"
+ "por %%mm2, %%mm1 \n\t"
+ MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t"
+ "add $24, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : "+a" (mmx_size)
+ : "r" (src-mmx_size), "r"(dst-mmx_size)
+ );
+
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+
+ if(mmx_size==23) return; //finihsed, was multiple of 8
+
+ src+= src_size;
+ dst+= src_size;
+ src_size= 23-mmx_size;
+ src-= src_size;
+ dst-= src_size;
+#endif
+ for(i=0; i<src_size; i+=3)
+ {
+ register uint8_t x;
+ x = src[i + 2];
+ dst[i + 1] = src[i + 1];
+ dst[i + 2] = src[i + 0];
+ dst[i + 0] = x;
+ }
+}
+
+static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
+{
+ long y;
+ const long chromWidth= width>>1;
+ for(y=0; y<height; y++)
+ {
+#ifdef HAVE_MMX
+//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
+ PREFETCH" 32(%2, %%"REG_a") \n\t"
+ PREFETCH" 32(%3, %%"REG_a") \n\t"
+ "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
+ "movq %%mm0, %%mm2 \n\t" // U(0)
+ "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
+ "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
+
+ "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
+ "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
+ "movq %%mm3, %%mm4 \n\t" // Y(0)
+ "movq %%mm5, %%mm6 \n\t" // Y(8)
+ "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
+ "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
+ "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
+ "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
+
+ MOVNTQ" %%mm3, (%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
+ : "%"REG_a
+ );
+#else
+
+#if defined ARCH_ALPHA && defined HAVE_MVI
+#define pl2yuy2(n) \
+ y1 = yc[n]; \
+ y2 = yc2[n]; \
+ u = uc[n]; \
+ v = vc[n]; \
+ asm("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \
+ asm("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \
+ asm("unpkbl %1, %0" : "=r"(u) : "r"(u)); \
+ asm("unpkbl %1, %0" : "=r"(v) : "r"(v)); \
+ yuv1 = (u << 8) + (v << 24); \
+ yuv2 = yuv1 + y2; \
+ yuv1 += y1; \
+ qdst[n] = yuv1; \
+ qdst2[n] = yuv2;
+
+ int i;
+ uint64_t *qdst = (uint64_t *) dst;
+ uint64_t *qdst2 = (uint64_t *) (dst + dstStride);
+ const uint32_t *yc = (uint32_t *) ysrc;
+ const uint32_t *yc2 = (uint32_t *) (ysrc + lumStride);
+ const uint16_t *uc = (uint16_t*) usrc, *vc = (uint16_t*) vsrc;
+ for(i = 0; i < chromWidth; i += 8){
+ uint64_t y1, y2, yuv1, yuv2;
+ uint64_t u, v;
+ /* Prefetch */
+ asm("ldq $31,64(%0)" :: "r"(yc));
+ asm("ldq $31,64(%0)" :: "r"(yc2));
+ asm("ldq $31,64(%0)" :: "r"(uc));
+ asm("ldq $31,64(%0)" :: "r"(vc));
+
+ pl2yuy2(0);
+ pl2yuy2(1);
+ pl2yuy2(2);
+ pl2yuy2(3);
+
+ yc += 4;
+ yc2 += 4;
+ uc += 4;
+ vc += 4;
+ qdst += 4;
+ qdst2 += 4;
+ }
+ y++;
+ ysrc += lumStride;
+ dst += dstStride;
+
+#elif __WORDSIZE >= 64
+ int i;
+ uint64_t *ldst = (uint64_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for(i = 0; i < chromWidth; i += 2){
+ uint64_t k, l;
+ k = yc[0] + (uc[0] << 8) +
+ (yc[1] << 16) + (vc[0] << 24);
+ l = yc[2] + (uc[1] << 8) +
+ (yc[3] << 16) + (vc[1] << 24);
+ *ldst++ = k + (l << 32);
+ yc += 4;
+ uc += 2;
+ vc += 2;
+ }
+
+#else
+ int i, *idst = (int32_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for(i = 0; i < chromWidth; i++){
+#ifdef WORDS_BIGENDIAN
+ *idst++ = (yc[0] << 24)+ (uc[0] << 16) +
+ (yc[1] << 8) + (vc[0] << 0);
+#else
+ *idst++ = yc[0] + (uc[0] << 8) +
+ (yc[1] << 16) + (vc[0] << 24);
+#endif
+ yc += 2;
+ uc++;
+ vc++;
+ }
+#endif
+#endif
+ if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
+#ifdef HAVE_MMX
+asm( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ */
+static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride)
+{
+ //FIXME interpolate chroma
+ RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+}
+
+static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
+{
+ long y;
+ const long chromWidth= width>>1;
+ for(y=0; y<height; y++)
+ {
+#ifdef HAVE_MMX
+//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
+ PREFETCH" 32(%2, %%"REG_a") \n\t"
+ PREFETCH" 32(%3, %%"REG_a") \n\t"
+ "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
+ "movq %%mm0, %%mm2 \n\t" // U(0)
+ "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
+ "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
+
+ "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
+ "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
+ "movq %%mm0, %%mm4 \n\t" // Y(0)
+ "movq %%mm2, %%mm6 \n\t" // Y(8)
+ "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
+ "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4)
+ "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
+ "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
+
+ MOVNTQ" %%mm0, (%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
+ : "%"REG_a
+ );
+#else
+//FIXME adapt the alpha asm code from yv12->yuy2
+
+#if __WORDSIZE >= 64
+ int i;
+ uint64_t *ldst = (uint64_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for(i = 0; i < chromWidth; i += 2){
+ uint64_t k, l;
+ k = uc[0] + (yc[0] << 8) +
+ (vc[0] << 16) + (yc[1] << 24);
+ l = uc[1] + (yc[2] << 8) +
+ (vc[1] << 16) + (yc[3] << 24);
+ *ldst++ = k + (l << 32);
+ yc += 4;
+ uc += 2;
+ vc += 2;
+ }
+
+#else
+ int i, *idst = (int32_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for(i = 0; i < chromWidth; i++){
+#ifdef WORDS_BIGENDIAN
+ *idst++ = (uc[0] << 24)+ (yc[0] << 16) +
+ (vc[0] << 8) + (yc[1] << 0);
+#else
+ *idst++ = uc[0] + (yc[0] << 8) +
+ (vc[0] << 16) + (yc[1] << 24);
+#endif
+ yc += 2;
+ uc++;
+ vc++;
+ }
+#endif
+#endif
+ if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
+#ifdef HAVE_MMX
+asm( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ */
+static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride)
+{
+ //FIXME interpolate chroma
+ RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+}
+
+/**
+ *
+ * width should be a multiple of 16
+ */
+static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+ long width, long height,
+ long lumStride, long chromStride, long dstStride)
+{
+ RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
+}
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ */
+static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
+{
+ long y;
+ const long chromWidth= width>>1;
+ for(y=0; y<height; y+=2)
+ {
+#ifdef HAVE_MMX
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
+ "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
+ "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
+ "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
+ "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
+ "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
+ "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
+ "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
+
+ MOVNTQ" %%mm2, (%1, %%"REG_a", 2)\n\t"
+
+ "movq 16(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(12)
+ "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
+ "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
+ "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
+ "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
+ "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
+ "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
+ "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2)\n\t"
+
+ "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
+ "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
+ "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
+ "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
+ "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
+ "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
+ "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
+
+ MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
+ MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%"REG_a
+ );
+
+ ydst += lumStride;
+ src += srcStride;
+
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
+ "movq 16(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm3\n\t" // YUYV YUYV(12)
+ "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
+ "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
+ "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
+ "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm0, (%1, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2)\n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%"REG_a
+ );
+#else
+ long i;
+ for(i=0; i<chromWidth; i++)
+ {
+ ydst[2*i+0] = src[4*i+0];
+ udst[i] = src[4*i+1];
+ ydst[2*i+1] = src[4*i+2];
+ vdst[i] = src[4*i+3];
+ }
+ ydst += lumStride;
+ src += srcStride;
+
+ for(i=0; i<chromWidth; i++)
+ {
+ ydst[2*i+0] = src[4*i+0];
+ ydst[2*i+1] = src[4*i+2];
+ }
+#endif
+ udst += chromStride;
+ vdst += chromStride;
+ ydst += lumStride;
+ src += srcStride;
+ }
+#ifdef HAVE_MMX
+asm volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+static inline void RENAME(yvu9toyv12)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc,
+ uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height, long lumStride, long chromStride)
+{
+ /* Y Plane */
+ memcpy(ydst, ysrc, width*height);
+
+ /* XXX: implement upscaling for U,V */
+}
+
+static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWidth, long srcHeight, long srcStride, long dstStride)
+{
+ long x,y;
+
+ dst[0]= src[0];
+
+ // first line
+ for(x=0; x<srcWidth-1; x++){
+ dst[2*x+1]= (3*src[x] + src[x+1])>>2;
+ dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
+ }
+ dst[2*srcWidth-1]= src[srcWidth-1];
+
+ dst+= dstStride;
+
+ for(y=1; y<srcHeight; y++){
+#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
+ const long mmxSize= srcWidth&~15;
+ asm volatile(
+ "mov %4, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "movq 1(%0, %%"REG_a"), %%mm2 \n\t"
+ "movq 1(%1, %%"REG_a"), %%mm3 \n\t"
+ "movq -1(%0, %%"REG_a"), %%mm4 \n\t"
+ "movq -1(%1, %%"REG_a"), %%mm5 \n\t"
+ PAVGB" %%mm0, %%mm5 \n\t"
+ PAVGB" %%mm0, %%mm3 \n\t"
+ PAVGB" %%mm0, %%mm5 \n\t"
+ PAVGB" %%mm0, %%mm3 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ "movq %%mm5, %%mm7 \n\t"
+ "movq %%mm4, %%mm6 \n\t"
+ "punpcklbw %%mm3, %%mm5 \n\t"
+ "punpckhbw %%mm3, %%mm7 \n\t"
+ "punpcklbw %%mm2, %%mm4 \n\t"
+ "punpckhbw %%mm2, %%mm6 \n\t"
+#if 1
+ MOVNTQ" %%mm5, (%2, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm4, (%3, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2)\n\t"
+#else
+ "movq %%mm5, (%2, %%"REG_a", 2) \n\t"
+ "movq %%mm7, 8(%2, %%"REG_a", 2)\n\t"
+ "movq %%mm4, (%3, %%"REG_a", 2) \n\t"
+ "movq %%mm6, 8(%3, %%"REG_a", 2)\n\t"
+#endif
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
+ "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
+ "g" (-mmxSize)
+ : "%"REG_a
+
+ );
+#else
+ const long mmxSize=1;
+#endif
+ dst[0 ]= (3*src[0] + src[srcStride])>>2;
+ dst[dstStride]= ( src[0] + 3*src[srcStride])>>2;
+
+ for(x=mmxSize-1; x<srcWidth-1; x++){
+ dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
+ dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
+ dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
+ dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
+ }
+ dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
+ dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
+
+ dst+=dstStride*2;
+ src+=srcStride;
+ }
+
+ // last line
+#if 1
+ dst[0]= src[0];
+
+ for(x=0; x<srcWidth-1; x++){
+ dst[2*x+1]= (3*src[x] + src[x+1])>>2;
+ dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
+ }
+ dst[2*srcWidth-1]= src[srcWidth-1];
+#else
+ for(x=0; x<srcWidth; x++){
+ dst[2*x+0]=
+ dst[2*x+1]= src[x];
+ }
+#endif
+
+#ifdef HAVE_MMX
+asm volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ * chrominance data is only taken from every secound line others are ignored FIXME write HQ version
+ */
+static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
+{
+ long y;
+ const long chromWidth= width>>1;
+ for(y=0; y<height; y+=2)
+ {
+#ifdef HAVE_MMX
+ asm volatile(
+ "xorl %%eax, %%eax \n\t"
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%eax, 4) \n\t"
+ "movq (%0, %%eax, 4), %%mm0 \n\t" // UYVY UYVY(0)
+ "movq 8(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(4)
+ "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
+ "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
+ "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
+ "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4)
+ "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
+ "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
+ "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
+
+ MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t"
+
+ "movq 16(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(8)
+ "movq 24(%0, %%eax, 4), %%mm2 \n\t" // UYVY UYVY(12)
+ "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
+ "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
+ "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
+ "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12)
+ "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
+ "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
+ "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t"
+
+ "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
+ "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
+ "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
+ "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
+ "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
+ "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
+ "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
+
+ MOVNTQ" %%mm0, (%3, %%eax) \n\t"
+ MOVNTQ" %%mm2, (%2, %%eax) \n\t"
+
+ "addl $8, %%eax \n\t"
+ "cmpl %4, %%eax \n\t"
+ " jb 1b \n\t"
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%eax"
+ );
+
+ ydst += lumStride;
+ src += srcStride;
+
+ asm volatile(
+ "xorl %%eax, %%eax \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%eax, 4) \n\t"
+ "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4)
+ "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12)
+ "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
+ "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
+ "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
+ "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t"
+ MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t"
+
+ "addl $8, %%eax \n\t"
+ "cmpl %4, %%eax \n\t"
+ " jb 1b \n\t"
+
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%eax"
+ );
+#else
+ long i;
+ for(i=0; i<chromWidth; i++)
+ {
+ udst[i] = src[4*i+0];
+ ydst[2*i+0] = src[4*i+1];
+ vdst[i] = src[4*i+2];
+ ydst[2*i+1] = src[4*i+3];
+ }
+ ydst += lumStride;
+ src += srcStride;
+
+ for(i=0; i<chromWidth; i++)
+ {
+ ydst[2*i+0] = src[4*i+1];
+ ydst[2*i+1] = src[4*i+3];
+ }
+#endif
+ udst += chromStride;
+ vdst += chromStride;
+ ydst += lumStride;
+ src += srcStride;
+ }
+#ifdef HAVE_MMX
+asm volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#endif
+}
+
+/**
+ *
+ * height should be a multiple of 2 and width should be a multiple of 2 (if this is a
+ * problem for anyone then tell me, and ill fix it)
+ * chrominance data is only taken from every secound line others are ignored in the C version FIXME write HQ version
+ */
+static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
+{
+ long y;
+ const long chromWidth= width>>1;
+#ifdef HAVE_MMX
+ for(y=0; y<height-2; y+=2)
+ {
+ long i;
+ for(i=0; i<2; i++)
+ {
+ asm volatile(
+ "mov %2, %%"REG_a" \n\t"
+ "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_d") \n\t"
+ "movd (%0, %%"REG_d"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "packssdw %%mm2, %%mm0 \n\t"
+ "psraw $7, %%mm0 \n\t"
+
+ "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "add $24, %%"REG_d" \n\t"
+ "packssdw %%mm2, %%mm4 \n\t"
+ "psraw $7, %%mm4 \n\t"
+
+ "packuswb %%mm4, %%mm0 \n\t"
+ "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
+
+ MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src+width*3), "r" (ydst+width), "g" (-width)
+ : "%"REG_a, "%"REG_d
+ );
+ ydst += lumStride;
+ src += srcStride;
+ }
+ src -= srcStride*2;
+ asm volatile(
+ "mov %4, %%"REG_a" \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
+ "add %%"REG_d", %%"REG_d" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_d") \n\t"
+ PREFETCH" 64(%1, %%"REG_d") \n\t"
+#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
+ "movq (%0, %%"REG_d"), %%mm0 \n\t"
+ "movq (%1, %%"REG_d"), %%mm1 \n\t"
+ "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
+ "movq 6(%1, %%"REG_d"), %%mm3 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm0 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+#else
+ "movd (%0, %%"REG_d"), %%mm0 \n\t"
+ "movd (%1, %%"REG_d"), %%mm1 \n\t"
+ "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 3(%1, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 6(%1, %%"REG_d"), %%mm1 \n\t"
+ "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 9(%1, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm4, %%mm2 \n\t"
+ "psrlw $2, %%mm0 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm0, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
+ "psraw $7, %%mm0 \n\t"
+
+#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
+ "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movq 12(%1, %%"REG_d"), %%mm1 \n\t"
+ "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
+ "movq 18(%1, %%"REG_d"), %%mm3 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm4, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm4 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+#else
+ "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 12(%1, %%"REG_d"), %%mm1 \n\t"
+ "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 15(%1, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm2, %%mm4 \n\t"
+ "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
+ "movd 18(%1, %%"REG_d"), %%mm1 \n\t"
+ "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 21(%1, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm5 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm5, %%mm2 \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm4, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "add $24, %%"REG_d" \n\t"
+ "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
+ "psraw $7, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm1 \n\t"
+ "punpckldq %%mm4, %%mm0 \n\t"
+ "punpckhdq %%mm4, %%mm1 \n\t"
+ "packsswb %%mm1, %%mm0 \n\t"
+ "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
+ "movd %%mm0, (%2, %%"REG_a") \n\t"
+ "punpckhdq %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
+ : "%"REG_a, "%"REG_d
+ );
+
+ udst += chromStride;
+ vdst += chromStride;
+ src += srcStride*2;
+ }
+
+ asm volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
+#else
+ y=0;
+#endif
+ for(; y<height; y+=2)
+ {
+ long i;
+ for(i=0; i<chromWidth; i++)
+ {
+ unsigned int b= src[6*i+0];
+ unsigned int g= src[6*i+1];
+ unsigned int r= src[6*i+2];
+
+ unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+ unsigned int V = ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128;
+ unsigned int U = ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128;
+
+ udst[i] = U;
+ vdst[i] = V;
+ ydst[2*i] = Y;
+
+ b= src[6*i+3];
+ g= src[6*i+4];
+ r= src[6*i+5];
+
+ Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+ ydst[2*i+1] = Y;
+ }
+ ydst += lumStride;
+ src += srcStride;
+
+ for(i=0; i<chromWidth; i++)
+ {
+ unsigned int b= src[6*i+0];
+ unsigned int g= src[6*i+1];
+ unsigned int r= src[6*i+2];
+
+ unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+
+ ydst[2*i] = Y;
+
+ b= src[6*i+3];
+ g= src[6*i+4];
+ r= src[6*i+5];
+
+ Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+ ydst[2*i+1] = Y;
+ }
+ udst += chromStride;
+ vdst += chromStride;
+ ydst += lumStride;
+ src += srcStride;
+ }
+}
+
+void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
+ long width, long height, long src1Stride,
+ long src2Stride, long dstStride){
+ long h;
+
+ for(h=0; h < height; h++)
+ {
+ long w;
+
+#ifdef HAVE_MMX
+#ifdef HAVE_SSE2
+ asm(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "1: \n\t"
+ PREFETCH" 64(%1, %%"REG_a") \n\t"
+ PREFETCH" 64(%2, %%"REG_a") \n\t"
+ "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
+ "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
+ "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
+ "punpcklbw %%xmm2, %%xmm0 \n\t"
+ "punpckhbw %%xmm2, %%xmm1 \n\t"
+ "movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t"
+ "movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t"
+ "add $16, %%"REG_a" \n\t"
+ "cmp %3, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
+ : "memory", "%"REG_a""
+ );
+#else
+ asm(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "1: \n\t"
+ PREFETCH" 64(%1, %%"REG_a") \n\t"
+ PREFETCH" 64(%2, %%"REG_a") \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a"), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq (%2, %%"REG_a"), %%mm4 \n\t"
+ "movq 8(%2, %%"REG_a"), %%mm5 \n\t"
+ "punpcklbw %%mm4, %%mm0 \n\t"
+ "punpckhbw %%mm4, %%mm1 \n\t"
+ "punpcklbw %%mm5, %%mm2 \n\t"
+ "punpckhbw %%mm5, %%mm3 \n\t"
+ MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t"
+ "add $16, %%"REG_a" \n\t"
+ "cmp %3, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
+ : "memory", "%"REG_a
+ );
+#endif
+ for(w= (width&(~15)); w < width; w++)
+ {
+ dest[2*w+0] = src1[w];
+ dest[2*w+1] = src2[w];
+ }
+#else
+ for(w=0; w < width; w++)
+ {
+ dest[2*w+0] = src1[w];
+ dest[2*w+1] = src2[w];
+ }
+#endif
+ dest += dstStride;
+ src1 += src1Stride;
+ src2 += src2Stride;
+ }
+#ifdef HAVE_MMX
+ asm(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
+
+static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+ uint8_t *dst1, uint8_t *dst2,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long dstStride1, long dstStride2)
+{
+ long y,x,w,h;
+ w=width/2; h=height/2;
+#ifdef HAVE_MMX
+ asm volatile(
+ PREFETCH" %0\n\t"
+ PREFETCH" %1\n\t"
+ ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
+#endif
+ for(y=0;y<h;y++){
+ const uint8_t* s1=src1+srcStride1*(y>>1);
+ uint8_t* d=dst1+dstStride1*y;
+ x=0;
+#ifdef HAVE_MMX
+ for(;x<w-31;x+=32)
+ {
+ asm volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "movq 16%1, %%mm4\n\t"
+ "movq 24%1, %%mm6\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "movq %%mm6, %%mm7\n\t"
+ "punpcklbw %%mm0, %%mm0\n\t"
+ "punpckhbw %%mm1, %%mm1\n\t"
+ "punpcklbw %%mm2, %%mm2\n\t"
+ "punpckhbw %%mm3, %%mm3\n\t"
+ "punpcklbw %%mm4, %%mm4\n\t"
+ "punpckhbw %%mm5, %%mm5\n\t"
+ "punpcklbw %%mm6, %%mm6\n\t"
+ "punpckhbw %%mm7, %%mm7\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm2, 16%0\n\t"
+ MOVNTQ" %%mm3, 24%0\n\t"
+ MOVNTQ" %%mm4, 32%0\n\t"
+ MOVNTQ" %%mm5, 40%0\n\t"
+ MOVNTQ" %%mm6, 48%0\n\t"
+ MOVNTQ" %%mm7, 56%0"
+ :"=m"(d[2*x])
+ :"m"(s1[x])
+ :"memory");
+ }
+#endif
+ for(;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
+ }
+ for(y=0;y<h;y++){
+ const uint8_t* s2=src2+srcStride2*(y>>1);
+ uint8_t* d=dst2+dstStride2*y;
+ x=0;
+#ifdef HAVE_MMX
+ for(;x<w-31;x+=32)
+ {
+ asm volatile(
+ PREFETCH" 32%1\n\t"
+ "movq %1, %%mm0\n\t"
+ "movq 8%1, %%mm2\n\t"
+ "movq 16%1, %%mm4\n\t"
+ "movq 24%1, %%mm6\n\t"
+ "movq %%mm0, %%mm1\n\t"
+ "movq %%mm2, %%mm3\n\t"
+ "movq %%mm4, %%mm5\n\t"
+ "movq %%mm6, %%mm7\n\t"
+ "punpcklbw %%mm0, %%mm0\n\t"
+ "punpckhbw %%mm1, %%mm1\n\t"
+ "punpcklbw %%mm2, %%mm2\n\t"
+ "punpckhbw %%mm3, %%mm3\n\t"
+ "punpcklbw %%mm4, %%mm4\n\t"
+ "punpckhbw %%mm5, %%mm5\n\t"
+ "punpcklbw %%mm6, %%mm6\n\t"
+ "punpckhbw %%mm7, %%mm7\n\t"
+ MOVNTQ" %%mm0, %0\n\t"
+ MOVNTQ" %%mm1, 8%0\n\t"
+ MOVNTQ" %%mm2, 16%0\n\t"
+ MOVNTQ" %%mm3, 24%0\n\t"
+ MOVNTQ" %%mm4, 32%0\n\t"
+ MOVNTQ" %%mm5, 40%0\n\t"
+ MOVNTQ" %%mm6, 48%0\n\t"
+ MOVNTQ" %%mm7, 56%0"
+ :"=m"(d[2*x])
+ :"m"(s2[x])
+ :"memory");
+ }
+#endif
+ for(;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
+ }
+#ifdef HAVE_MMX
+ asm(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
+
+static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+ uint8_t *dst,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long srcStride3, long dstStride)
+{
+ long y,x,w,h;
+ w=width/2; h=height;
+ for(y=0;y<h;y++){
+ const uint8_t* yp=src1+srcStride1*y;
+ const uint8_t* up=src2+srcStride2*(y>>2);
+ const uint8_t* vp=src3+srcStride3*(y>>2);
+ uint8_t* d=dst+dstStride*y;
+ x=0;
+#ifdef HAVE_MMX
+ for(;x<w-7;x+=8)
+ {
+ asm volatile(
+ PREFETCH" 32(%1, %0)\n\t"
+ PREFETCH" 32(%2, %0)\n\t"
+ PREFETCH" 32(%3, %0)\n\t"
+ "movq (%1, %0, 4), %%mm0\n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+ "movq (%2, %0), %%mm1\n\t" /* U0U1U2U3U4U5U6U7 */
+ "movq (%3, %0), %%mm2\n\t" /* V0V1V2V3V4V5V6V7 */
+ "movq %%mm0, %%mm3\n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+ "movq %%mm1, %%mm4\n\t" /* U0U1U2U3U4U5U6U7 */
+ "movq %%mm2, %%mm5\n\t" /* V0V1V2V3V4V5V6V7 */
+ "punpcklbw %%mm1, %%mm1\n\t" /* U0U0 U1U1 U2U2 U3U3 */
+ "punpcklbw %%mm2, %%mm2\n\t" /* V0V0 V1V1 V2V2 V3V3 */
+ "punpckhbw %%mm4, %%mm4\n\t" /* U4U4 U5U5 U6U6 U7U7 */
+ "punpckhbw %%mm5, %%mm5\n\t" /* V4V4 V5V5 V6V6 V7V7 */
+
+ "movq %%mm1, %%mm6\n\t"
+ "punpcklbw %%mm2, %%mm1\n\t" /* U0V0 U0V0 U1V1 U1V1*/
+ "punpcklbw %%mm1, %%mm0\n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
+ "punpckhbw %%mm1, %%mm3\n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
+ MOVNTQ" %%mm0, (%4, %0, 8)\n\t"
+ MOVNTQ" %%mm3, 8(%4, %0, 8)\n\t"
+
+ "punpckhbw %%mm2, %%mm6\n\t" /* U2V2 U2V2 U3V3 U3V3*/
+ "movq 8(%1, %0, 4), %%mm0\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "punpcklbw %%mm6, %%mm0\n\t" /* Y U2 Y V2 Y U2 Y V2*/
+ "punpckhbw %%mm6, %%mm3\n\t" /* Y U3 Y V3 Y U3 Y V3*/
+ MOVNTQ" %%mm0, 16(%4, %0, 8)\n\t"
+ MOVNTQ" %%mm3, 24(%4, %0, 8)\n\t"
+
+ "movq %%mm4, %%mm6\n\t"
+ "movq 16(%1, %0, 4), %%mm0\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "punpcklbw %%mm5, %%mm4\n\t"
+ "punpcklbw %%mm4, %%mm0\n\t" /* Y U4 Y V4 Y U4 Y V4*/
+ "punpckhbw %%mm4, %%mm3\n\t" /* Y U5 Y V5 Y U5 Y V5*/
+ MOVNTQ" %%mm0, 32(%4, %0, 8)\n\t"
+ MOVNTQ" %%mm3, 40(%4, %0, 8)\n\t"
+
+ "punpckhbw %%mm5, %%mm6\n\t"
+ "movq 24(%1, %0, 4), %%mm0\n\t"
+ "movq %%mm0, %%mm3\n\t"
+ "punpcklbw %%mm6, %%mm0\n\t" /* Y U6 Y V6 Y U6 Y V6*/
+ "punpckhbw %%mm6, %%mm3\n\t" /* Y U7 Y V7 Y U7 Y V7*/
+ MOVNTQ" %%mm0, 48(%4, %0, 8)\n\t"
+ MOVNTQ" %%mm3, 56(%4, %0, 8)\n\t"
+
+ : "+r" (x)
+ : "r"(yp), "r" (up), "r"(vp), "r"(d)
+ :"memory");
+ }
+#endif
+ for(; x<w; x++)
+ {
+ const long x2= x<<2;
+ d[8*x+0]=yp[x2];
+ d[8*x+1]=up[x];
+ d[8*x+2]=yp[x2+1];
+ d[8*x+3]=vp[x];
+ d[8*x+4]=yp[x2+2];
+ d[8*x+5]=up[x];
+ d[8*x+6]=yp[x2+3];
+ d[8*x+7]=vp[x];
+ }
+ }
+#ifdef HAVE_MMX
+ asm(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
+#endif
+}
diff --git a/contrib/ffmpeg/libswscale/swscale-example.c b/contrib/ffmpeg/libswscale/swscale-example.c
new file mode 100644
index 000000000..40f04c7d0
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/swscale-example.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdarg.h>
+
+#undef HAVE_AV_CONFIG_H
+#include "avutil.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+#include "rgb2rgb.h"
+
+static uint64_t getSSD(uint8_t *src1, uint8_t *src2, int stride1, int stride2, int w, int h){
+ int x,y;
+ uint64_t ssd=0;
+
+//printf("%d %d\n", w, h);
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int d= src1[x + y*stride1] - src2[x + y*stride2];
+ ssd+= d*d;
+//printf("%d", abs(src1[x + y*stride1] - src2[x + y*stride2])/26 );
+ }
+//printf("\n");
+ }
+ return ssd;
+}
+
+// test by ref -> src -> dst -> out & compare out against ref
+// ref & out are YV12
+static int doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcFormat, int dstFormat,
+ int srcW, int srcH, int dstW, int dstH, int flags){
+ uint8_t *src[3];
+ uint8_t *dst[3];
+ uint8_t *out[3];
+ int srcStride[3], dstStride[3];
+ int i;
+ uint64_t ssdY, ssdU, ssdV;
+ struct SwsContext *srcContext, *dstContext, *outContext;
+ int res;
+
+ res = 0;
+ for(i=0; i<3; i++){
+ // avoid stride % bpp != 0
+ if(srcFormat==PIX_FMT_RGB24 || srcFormat==PIX_FMT_BGR24)
+ srcStride[i]= srcW*3;
+ else
+ srcStride[i]= srcW*4;
+
+ if(dstFormat==PIX_FMT_RGB24 || dstFormat==PIX_FMT_BGR24)
+ dstStride[i]= dstW*3;
+ else
+ dstStride[i]= dstW*4;
+
+ src[i]= (uint8_t*) malloc(srcStride[i]*srcH);
+ dst[i]= (uint8_t*) malloc(dstStride[i]*dstH);
+ out[i]= (uint8_t*) malloc(refStride[i]*h);
+ if ((src[i] == NULL) || (dst[i] == NULL) || (out[i] == NULL)) {
+ perror("Malloc");
+ res = -1;
+
+ goto end;
+ }
+ }
+
+ dstContext = outContext = NULL;
+ srcContext= sws_getContext(w, h, PIX_FMT_YUV420P, srcW, srcH, srcFormat, flags, NULL, NULL, NULL);
+ if (srcContext == NULL) {
+ fprintf(stderr, "Failed to get %s ---> %s\n",
+ sws_format_name(PIX_FMT_YUV420P),
+ sws_format_name(srcFormat));
+ res = -1;
+
+ goto end;
+ }
+ dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL, NULL);
+ if (dstContext == NULL) {
+ fprintf(stderr, "Failed to get %s ---> %s\n",
+ sws_format_name(srcFormat),
+ sws_format_name(dstFormat));
+ res = -1;
+
+ goto end;
+ }
+ outContext= sws_getContext(dstW, dstH, dstFormat, w, h, PIX_FMT_YUV420P, flags, NULL, NULL, NULL);
+ if (outContext == NULL) {
+ fprintf(stderr, "Failed to get %s ---> %s\n",
+ sws_format_name(dstFormat),
+ sws_format_name(PIX_FMT_YUV420P));
+ res = -1;
+
+ goto end;
+ }
+// printf("test %X %X %X -> %X %X %X\n", (int)ref[0], (int)ref[1], (int)ref[2],
+// (int)src[0], (int)src[1], (int)src[2]);
+
+ sws_scale(srcContext, ref, refStride, 0, h , src, srcStride);
+ sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
+ sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
+
+#if defined(ARCH_X86)
+ asm volatile ("emms\n\t");
+#endif
+
+ ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);
+ ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1);
+ ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1);
+
+ if(srcFormat == PIX_FMT_GRAY8 || dstFormat==PIX_FMT_GRAY8) ssdU=ssdV=0; //FIXME check that output is really gray
+
+ ssdY/= w*h;
+ ssdU/= w*h/4;
+ ssdV/= w*h/4;
+
+ if(ssdY>100 || ssdU>100 || ssdV>100){
+ printf(" %s %dx%d -> %s %4dx%4d flags=%2d SSD=%5lld,%5lld,%5lld\n",
+ sws_format_name(srcFormat), srcW, srcH,
+ sws_format_name(dstFormat), dstW, dstH,
+ flags,
+ ssdY, ssdU, ssdV);
+ }
+
+ end:
+
+ sws_freeContext(srcContext);
+ sws_freeContext(dstContext);
+ sws_freeContext(outContext);
+
+ for(i=0; i<3; i++){
+ free(src[i]);
+ free(dst[i]);
+ free(out[i]);
+ }
+
+ return res;
+}
+
+void fast_memcpy(void *a, void *b, int s){ //FIXME
+ memcpy(a, b, s);
+}
+
+static void selfTest(uint8_t *src[3], int stride[3], int w, int h){
+ enum PixelFormat srcFormat, dstFormat;
+ int srcW, srcH, dstW, dstH;
+ int flags;
+
+ for(srcFormat = 0; srcFormat < PIX_FMT_NB; srcFormat++) {
+ for(dstFormat = 0; dstFormat < PIX_FMT_NB; dstFormat++) {
+ printf("%s -> %s\n",
+ sws_format_name(srcFormat),
+ sws_format_name(dstFormat));
+
+ srcW= w;
+ srcH= h;
+ for(dstW=w - w/3; dstW<= 4*w/3; dstW+= w/3){
+ for(dstH=h - h/3; dstH<= 4*h/3; dstH+= h/3){
+ for(flags=1; flags<33; flags*=2) {
+ int res;
+
+ res = doTest(src, stride, w, h, srcFormat, dstFormat,
+ srcW, srcH, dstW, dstH, flags);
+ if (res < 0) {
+ dstW = 4 * w / 3;
+ dstH = 4 * h / 3;
+ flags = 33;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+#define W 96
+#define H 96
+
+int main(int argc, char **argv){
+ uint8_t rgb_data[W*H*4];
+ uint8_t *rgb_src[3]= {rgb_data, NULL, NULL};
+ int rgb_stride[3]={4*W, 0, 0};
+ uint8_t data[3][W*H];
+ uint8_t *src[3]= {data[0], data[1], data[2]};
+ int stride[3]={W, W, W};
+ int x, y;
+ struct SwsContext *sws;
+
+ sws= sws_getContext(W/12, H/12, PIX_FMT_RGB32, W, H, PIX_FMT_YUV420P, 2, NULL, NULL, NULL);
+
+ for(y=0; y<H; y++){
+ for(x=0; x<W*4; x++){
+ rgb_data[ x + y*4*W]= random();
+ }
+ }
+#if defined(ARCH_X86)
+ sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0);
+#else
+ sws_rgb2rgb_init(0);
+#endif
+ sws_scale(sws, rgb_src, rgb_stride, 0, H , src, stride);
+
+#if defined(ARCH_X86)
+ asm volatile ("emms\n\t");
+#endif
+
+ selfTest(src, stride, W, H);
+
+ return 123;
+}
diff --git a/contrib/ffmpeg/libswscale/swscale.c b/contrib/ffmpeg/libswscale/swscale.c
new file mode 100644
index 000000000..eb9092c19
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/swscale.c
@@ -0,0 +1,2864 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * the C code (not assembly, mmx, ...) of this file can be used
+ * under the LGPL license too
+ */
+
+/*
+ supported Input formats: YV12, I420/IYUV, YUY2, UYVY, BGR32, BGR24, BGR16, BGR15, RGB32, RGB24, Y8/Y800, YVU9/IF09
+ supported output formats: YV12, I420/IYUV, YUY2, UYVY, {BGR,RGB}{1,4,8,15,16,24,32}, Y8/Y800, YVU9/IF09
+ {BGR,RGB}{1,4,8,15,16} support dithering
+
+ unscaled special converters (YV12=I420=IYUV, Y800=Y8)
+ YV12 -> {BGR,RGB}{1,4,8,15,16,24,32}
+ x -> x
+ YUV9 -> YV12
+ YUV9/YV12 -> Y800
+ Y800 -> YUV9/YV12
+ BGR24 -> BGR32 & RGB24 -> RGB32
+ BGR32 -> BGR24 & RGB32 -> RGB24
+ BGR15 -> BGR16
+*/
+
+/*
+tested special converters (most are tested actually but i didnt write it down ...)
+ YV12 -> BGR16
+ YV12 -> YV12
+ BGR15 -> BGR16
+ BGR16 -> BGR16
+ YVU9 -> YV12
+
+untested special converters
+ YV12/I420 -> BGR15/BGR24/BGR32 (its the yuv2rgb stuff, so it should be ok)
+ YV12/I420 -> YV12/I420
+ YUY2/BGR15/BGR24/BGR32/RGB24/RGB32 -> same format
+ BGR24 -> BGR32 & RGB24 -> RGB32
+ BGR32 -> BGR24 & RGB32 -> RGB24
+ BGR24 -> YV12
+*/
+
+#include <inttypes.h>
+#include <string.h>
+#include <math.h>
+#include <stdio.h>
+#include <unistd.h>
+#include "config.h"
+#include <assert.h>
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#else
+#include <stdlib.h>
+#endif
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#endif
+#include "swscale.h"
+#include "swscale_internal.h"
+#include "x86_cpu.h"
+#include "bswap.h"
+#include "rgb2rgb.h"
+#ifdef USE_FASTMEMCPY
+#include "libvo/fastmemcpy.h"
+#endif
+
+#undef MOVNTQ
+#undef PAVGB
+
+//#undef HAVE_MMX2
+//#define HAVE_3DNOW
+//#undef HAVE_MMX
+//#undef ARCH_X86
+//#define WORDS_BIGENDIAN
+#define DITHER1XBPP
+
+#define FAST_BGR2YV12 // use 7 bit coeffs instead of 15bit
+
+#define RET 0xC3 //near return opcode for X86
+
+#ifdef MP_DEBUG
+#define ASSERT(x) assert(x);
+#else
+#define ASSERT(x) ;
+#endif
+
+#ifdef M_PI
+#define PI M_PI
+#else
+#define PI 3.14159265358979323846
+#endif
+
+#define isSupportedIn(x) ((x)==PIX_FMT_YUV420P || (x)==PIX_FMT_YUYV422 || (x)==PIX_FMT_UYVY422\
+ || (x)==PIX_FMT_RGB32|| (x)==PIX_FMT_BGR24|| (x)==PIX_FMT_BGR565|| (x)==PIX_FMT_BGR555\
+ || (x)==PIX_FMT_BGR32|| (x)==PIX_FMT_RGB24|| (x)==PIX_FMT_RGB565|| (x)==PIX_FMT_RGB555\
+ || (x)==PIX_FMT_GRAY8 || (x)==PIX_FMT_YUV410P\
+ || (x)==PIX_FMT_GRAY16BE || (x)==PIX_FMT_GRAY16LE\
+ || (x)==PIX_FMT_YUV444P || (x)==PIX_FMT_YUV422P || (x)==PIX_FMT_YUV411P)
+#define isSupportedOut(x) ((x)==PIX_FMT_YUV420P || (x)==PIX_FMT_YUYV422 || (x)==PIX_FMT_UYVY422\
+ || (x)==PIX_FMT_YUV444P || (x)==PIX_FMT_YUV422P || (x)==PIX_FMT_YUV411P\
+ || isRGB(x) || isBGR(x)\
+ || (x)==PIX_FMT_NV12 || (x)==PIX_FMT_NV21\
+ || (x)==PIX_FMT_GRAY16BE || (x)==PIX_FMT_GRAY16LE\
+ || (x)==PIX_FMT_GRAY8 || (x)==PIX_FMT_YUV410P)
+#define isPacked(x) ((x)==PIX_FMT_YUYV422 || (x)==PIX_FMT_UYVY422 ||isRGB(x) || isBGR(x))
+
+#define RGB2YUV_SHIFT 16
+#define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5))
+#define BV ((int)(-0.071*(1<<RGB2YUV_SHIFT)+0.5))
+#define BU ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define GY ((int)( 0.504*(1<<RGB2YUV_SHIFT)+0.5))
+#define GV ((int)(-0.368*(1<<RGB2YUV_SHIFT)+0.5))
+#define GU ((int)(-0.291*(1<<RGB2YUV_SHIFT)+0.5))
+#define RY ((int)( 0.257*(1<<RGB2YUV_SHIFT)+0.5))
+#define RV ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define RU ((int)(-0.148*(1<<RGB2YUV_SHIFT)+0.5))
+
+extern const int32_t Inverse_Table_6_9[8][4];
+
+/*
+NOTES
+Special versions: fast Y 1:1 scaling (no interpolation in y direction)
+
+TODO
+more intelligent missalignment avoidance for the horizontal scaler
+write special vertical cubic upscale version
+Optimize C code (yv12 / minmax)
+add support for packed pixel yuv input & output
+add support for Y8 output
+optimize bgr24 & bgr32
+add BGR4 output support
+write special BGR->BGR scaler
+*/
+
+#if defined(ARCH_X86)
+static uint64_t attribute_used __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
+static uint64_t attribute_used __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
+static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
+static uint64_t attribute_used __attribute__((aligned(8))) w02= 0x0002000200020002LL;
+static uint64_t attribute_used __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
+static uint64_t attribute_used __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
+static uint64_t attribute_used __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
+static uint64_t attribute_used __attribute__((aligned(8))) bm01010101=0x00FF00FF00FF00FFLL;
+
+static volatile uint64_t attribute_used __attribute__((aligned(8))) b5Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) g5Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) g6Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) r5Dither;
+
+static uint64_t __attribute__((aligned(8))) dither4[2]={
+ 0x0103010301030103LL,
+ 0x0200020002000200LL,};
+
+static uint64_t __attribute__((aligned(8))) dither8[2]={
+ 0x0602060206020602LL,
+ 0x0004000400040004LL,};
+
+static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
+static uint64_t attribute_used __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
+static uint64_t attribute_used __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
+static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
+static uint64_t attribute_used __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
+static uint64_t attribute_used __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
+
+static uint64_t attribute_used __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
+static uint64_t attribute_used __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
+static uint64_t attribute_used __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
+
+#ifdef FAST_BGR2YV12
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000000210041000DULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000FFEEFFDC0038ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00000038FFD2FFF8ULL;
+#else
+static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000020E540830C8BULL;
+static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000ED0FDAC23831ULL;
+static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00003831D0E6F6EAULL;
+#endif /* FAST_BGR2YV12 */
+static const uint64_t bgr2YOffset attribute_used __attribute__((aligned(8))) = 0x1010101010101010ULL;
+static const uint64_t bgr2UVOffset attribute_used __attribute__((aligned(8)))= 0x8080808080808080ULL;
+static const uint64_t w1111 attribute_used __attribute__((aligned(8))) = 0x0001000100010001ULL;
+#endif /* defined(ARCH_X86) */
+
+// clipping helper table for C implementations:
+static unsigned char clip_table[768];
+
+static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b);
+
+extern const uint8_t dither_2x2_4[2][8];
+extern const uint8_t dither_2x2_8[2][8];
+extern const uint8_t dither_8x8_32[8][8];
+extern const uint8_t dither_8x8_73[8][8];
+extern const uint8_t dither_8x8_220[8][8];
+
+char *sws_format_name(enum PixelFormat format)
+{
+ switch (format) {
+ case PIX_FMT_YUV420P:
+ return "yuv420p";
+ case PIX_FMT_YUYV422:
+ return "yuyv422";
+ case PIX_FMT_RGB24:
+ return "rgb24";
+ case PIX_FMT_BGR24:
+ return "bgr24";
+ case PIX_FMT_YUV422P:
+ return "yuv422p";
+ case PIX_FMT_YUV444P:
+ return "yuv444p";
+ case PIX_FMT_RGB32:
+ return "rgb32";
+ case PIX_FMT_YUV410P:
+ return "yuv410p";
+ case PIX_FMT_YUV411P:
+ return "yuv411p";
+ case PIX_FMT_RGB565:
+ return "rgb565";
+ case PIX_FMT_RGB555:
+ return "rgb555";
+ case PIX_FMT_GRAY16BE:
+ return "gray16be";
+ case PIX_FMT_GRAY16LE:
+ return "gray16le";
+ case PIX_FMT_GRAY8:
+ return "gray8";
+ case PIX_FMT_MONOWHITE:
+ return "mono white";
+ case PIX_FMT_MONOBLACK:
+ return "mono black";
+ case PIX_FMT_PAL8:
+ return "Palette";
+ case PIX_FMT_YUVJ420P:
+ return "yuvj420p";
+ case PIX_FMT_YUVJ422P:
+ return "yuvj422p";
+ case PIX_FMT_YUVJ444P:
+ return "yuvj444p";
+ case PIX_FMT_XVMC_MPEG2_MC:
+ return "xvmc_mpeg2_mc";
+ case PIX_FMT_XVMC_MPEG2_IDCT:
+ return "xvmc_mpeg2_idct";
+ case PIX_FMT_UYVY422:
+ return "uyvy422";
+ case PIX_FMT_UYYVYY411:
+ return "uyyvyy411";
+ case PIX_FMT_RGB32_1:
+ return "rgb32x";
+ case PIX_FMT_BGR32_1:
+ return "bgr32x";
+ case PIX_FMT_BGR32:
+ return "bgr32";
+ case PIX_FMT_BGR565:
+ return "bgr565";
+ case PIX_FMT_BGR555:
+ return "bgr555";
+ case PIX_FMT_BGR8:
+ return "bgr8";
+ case PIX_FMT_BGR4:
+ return "bgr4";
+ case PIX_FMT_BGR4_BYTE:
+ return "bgr4 byte";
+ case PIX_FMT_RGB8:
+ return "rgb8";
+ case PIX_FMT_RGB4:
+ return "rgb4";
+ case PIX_FMT_RGB4_BYTE:
+ return "rgb4 byte";
+ case PIX_FMT_NV12:
+ return "nv12";
+ case PIX_FMT_NV21:
+ return "nv21";
+ default:
+ return "Unknown format";
+ }
+}
+
+#if defined(ARCH_X86)
+void in_asm_used_var_warning_killer()
+{
+ volatile int i= bF8+bFC+w10+
+ bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+
+ M24A+M24B+M24C+w02 + b5Dither+g5Dither+r5Dither+g6Dither+dither4[0]+dither8[0]+bm01010101;
+ if(i) i=0;
+}
+#endif
+
+static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+{
+ //FIXME Optimize (just quickly writen not opti..)
+ int i;
+ for(i=0; i<dstW; i++)
+ {
+ int val=1<<18;
+ int j;
+ for(j=0; j<lumFilterSize; j++)
+ val += lumSrc[j][i] * lumFilter[j];
+
+ dest[i]= FFMIN(FFMAX(val>>19, 0), 255);
+ }
+
+ if(uDest != NULL)
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for(j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+
+ uDest[i]= FFMIN(FFMAX(u>>19, 0), 255);
+ vDest[i]= FFMIN(FFMAX(v>>19, 0), 255);
+ }
+}
+
+static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
+{
+ //FIXME Optimize (just quickly writen not opti..)
+ int i;
+ for(i=0; i<dstW; i++)
+ {
+ int val=1<<18;
+ int j;
+ for(j=0; j<lumFilterSize; j++)
+ val += lumSrc[j][i] * lumFilter[j];
+
+ dest[i]= FFMIN(FFMAX(val>>19, 0), 255);
+ }
+
+ if(uDest == NULL)
+ return;
+
+ if(dstFormat == PIX_FMT_NV12)
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for(j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+
+ uDest[2*i]= FFMIN(FFMAX(u>>19, 0), 255);
+ uDest[2*i+1]= FFMIN(FFMAX(v>>19, 0), 255);
+ }
+ else
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for(j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+
+ uDest[2*i]= FFMIN(FFMAX(v>>19, 0), 255);
+ uDest[2*i+1]= FFMIN(FFMAX(u>>19, 0), 255);
+ }
+}
+
+#define YSCALE_YUV_2_PACKEDX_C(type) \
+ for(i=0; i<(dstW>>1); i++){\
+ int j;\
+ int Y1=1<<18;\
+ int Y2=1<<18;\
+ int U=1<<18;\
+ int V=1<<18;\
+ type *r, *b, *g;\
+ const int i2= 2*i;\
+ \
+ for(j=0; j<lumFilterSize; j++)\
+ {\
+ Y1 += lumSrc[j][i2] * lumFilter[j];\
+ Y2 += lumSrc[j][i2+1] * lumFilter[j];\
+ }\
+ for(j=0; j<chrFilterSize; j++)\
+ {\
+ U += chrSrc[j][i] * chrFilter[j];\
+ V += chrSrc[j][i+2048] * chrFilter[j];\
+ }\
+ Y1>>=19;\
+ Y2>>=19;\
+ U >>=19;\
+ V >>=19;\
+ if((Y1|Y2|U|V)&256)\
+ {\
+ if(Y1>255) Y1=255;\
+ else if(Y1<0)Y1=0;\
+ if(Y2>255) Y2=255;\
+ else if(Y2<0)Y2=0;\
+ if(U>255) U=255;\
+ else if(U<0) U=0;\
+ if(V>255) V=255;\
+ else if(V<0) V=0;\
+ }
+
+#define YSCALE_YUV_2_RGBX_C(type) \
+ YSCALE_YUV_2_PACKEDX_C(type)\
+ r = c->table_rV[V];\
+ g = c->table_gU[U] + c->table_gV[V];\
+ b = c->table_bU[U];\
+
+#define YSCALE_YUV_2_PACKED2_C \
+ for(i=0; i<(dstW>>1); i++){\
+ const int i2= 2*i;\
+ int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>19;\
+ int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19;\
+ int U= (uvbuf0[i ]*uvalpha1+uvbuf1[i ]*uvalpha)>>19;\
+ int V= (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;\
+
+#define YSCALE_YUV_2_RGB2_C(type) \
+ YSCALE_YUV_2_PACKED2_C\
+ type *r, *b, *g;\
+ r = c->table_rV[V];\
+ g = c->table_gU[U] + c->table_gV[V];\
+ b = c->table_bU[U];\
+
+#define YSCALE_YUV_2_PACKED1_C \
+ for(i=0; i<(dstW>>1); i++){\
+ const int i2= 2*i;\
+ int Y1= buf0[i2 ]>>7;\
+ int Y2= buf0[i2+1]>>7;\
+ int U= (uvbuf1[i ])>>7;\
+ int V= (uvbuf1[i+2048])>>7;\
+
+#define YSCALE_YUV_2_RGB1_C(type) \
+ YSCALE_YUV_2_PACKED1_C\
+ type *r, *b, *g;\
+ r = c->table_rV[V];\
+ g = c->table_gU[U] + c->table_gV[V];\
+ b = c->table_bU[U];\
+
+#define YSCALE_YUV_2_PACKED1B_C \
+ for(i=0; i<(dstW>>1); i++){\
+ const int i2= 2*i;\
+ int Y1= buf0[i2 ]>>7;\
+ int Y2= buf0[i2+1]>>7;\
+ int U= (uvbuf0[i ] + uvbuf1[i ])>>8;\
+ int V= (uvbuf0[i+2048] + uvbuf1[i+2048])>>8;\
+
+#define YSCALE_YUV_2_RGB1B_C(type) \
+ YSCALE_YUV_2_PACKED1B_C\
+ type *r, *b, *g;\
+ r = c->table_rV[V];\
+ g = c->table_gU[U] + c->table_gV[V];\
+ b = c->table_bU[U];\
+
+#define YSCALE_YUV_2_ANYRGB_C(func, func2)\
+ switch(c->dstFormat)\
+ {\
+ case PIX_FMT_RGB32:\
+ case PIX_FMT_BGR32:\
+ func(uint32_t)\
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
+ } \
+ break;\
+ case PIX_FMT_RGB24:\
+ func(uint8_t)\
+ ((uint8_t*)dest)[0]= r[Y1];\
+ ((uint8_t*)dest)[1]= g[Y1];\
+ ((uint8_t*)dest)[2]= b[Y1];\
+ ((uint8_t*)dest)[3]= r[Y2];\
+ ((uint8_t*)dest)[4]= g[Y2];\
+ ((uint8_t*)dest)[5]= b[Y2];\
+ dest+=6;\
+ }\
+ break;\
+ case PIX_FMT_BGR24:\
+ func(uint8_t)\
+ ((uint8_t*)dest)[0]= b[Y1];\
+ ((uint8_t*)dest)[1]= g[Y1];\
+ ((uint8_t*)dest)[2]= r[Y1];\
+ ((uint8_t*)dest)[3]= b[Y2];\
+ ((uint8_t*)dest)[4]= g[Y2];\
+ ((uint8_t*)dest)[5]= r[Y2];\
+ dest+=6;\
+ }\
+ break;\
+ case PIX_FMT_RGB565:\
+ case PIX_FMT_BGR565:\
+ {\
+ const int dr1= dither_2x2_8[y&1 ][0];\
+ const int dg1= dither_2x2_4[y&1 ][0];\
+ const int db1= dither_2x2_8[(y&1)^1][0];\
+ const int dr2= dither_2x2_8[y&1 ][1];\
+ const int dg2= dither_2x2_4[y&1 ][1];\
+ const int db2= dither_2x2_8[(y&1)^1][1];\
+ func(uint16_t)\
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
+ }\
+ }\
+ break;\
+ case PIX_FMT_RGB555:\
+ case PIX_FMT_BGR555:\
+ {\
+ const int dr1= dither_2x2_8[y&1 ][0];\
+ const int dg1= dither_2x2_8[y&1 ][1];\
+ const int db1= dither_2x2_8[(y&1)^1][0];\
+ const int dr2= dither_2x2_8[y&1 ][1];\
+ const int dg2= dither_2x2_8[y&1 ][0];\
+ const int db2= dither_2x2_8[(y&1)^1][1];\
+ func(uint16_t)\
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
+ }\
+ }\
+ break;\
+ case PIX_FMT_RGB8:\
+ case PIX_FMT_BGR8:\
+ {\
+ const uint8_t * const d64= dither_8x8_73[y&7];\
+ const uint8_t * const d32= dither_8x8_32[y&7];\
+ func(uint8_t)\
+ ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];\
+ ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];\
+ }\
+ }\
+ break;\
+ case PIX_FMT_RGB4:\
+ case PIX_FMT_BGR4:\
+ {\
+ const uint8_t * const d64= dither_8x8_73 [y&7];\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ func(uint8_t)\
+ ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]\
+ + ((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);\
+ }\
+ }\
+ break;\
+ case PIX_FMT_RGB4_BYTE:\
+ case PIX_FMT_BGR4_BYTE:\
+ {\
+ const uint8_t * const d64= dither_8x8_73 [y&7];\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ func(uint8_t)\
+ ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];\
+ ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];\
+ }\
+ }\
+ break;\
+ case PIX_FMT_MONOBLACK:\
+ {\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ uint8_t *g= c->table_gU[128] + c->table_gV[128];\
+ for(i=0; i<dstW-7; i+=8){\
+ int acc;\
+ acc = g[((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19) + d128[0]];\
+ acc+= acc + g[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19) + d128[1]];\
+ acc+= acc + g[((buf0[i+2]*yalpha1+buf1[i+2]*yalpha)>>19) + d128[2]];\
+ acc+= acc + g[((buf0[i+3]*yalpha1+buf1[i+3]*yalpha)>>19) + d128[3]];\
+ acc+= acc + g[((buf0[i+4]*yalpha1+buf1[i+4]*yalpha)>>19) + d128[4]];\
+ acc+= acc + g[((buf0[i+5]*yalpha1+buf1[i+5]*yalpha)>>19) + d128[5]];\
+ acc+= acc + g[((buf0[i+6]*yalpha1+buf1[i+6]*yalpha)>>19) + d128[6]];\
+ acc+= acc + g[((buf0[i+7]*yalpha1+buf1[i+7]*yalpha)>>19) + d128[7]];\
+ ((uint8_t*)dest)[0]= acc;\
+ dest++;\
+ }\
+\
+/*\
+((uint8_t*)dest)-= dstW>>4;\
+{\
+ int acc=0;\
+ int left=0;\
+ static int top[1024];\
+ static int last_new[1024][1024];\
+ static int last_in3[1024][1024];\
+ static int drift[1024][1024];\
+ int topLeft=0;\
+ int shift=0;\
+ int count=0;\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ int error_new=0;\
+ int error_in3=0;\
+ int f=0;\
+ \
+ for(i=dstW>>1; i<dstW; i++){\
+ int in= ((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19);\
+ int in2 = (76309 * (in - 16) + 32768) >> 16;\
+ int in3 = (in2 < 0) ? 0 : ((in2 > 255) ? 255 : in2);\
+ int old= (left*7 + topLeft + top[i]*5 + top[i+1]*3)/20 + in3\
+ + (last_new[y][i] - in3)*f/256;\
+ int new= old> 128 ? 255 : 0;\
+\
+ error_new+= FFABS(last_new[y][i] - new);\
+ error_in3+= FFABS(last_in3[y][i] - in3);\
+ f= error_new - error_in3*4;\
+ if(f<0) f=0;\
+ if(f>256) f=256;\
+\
+ topLeft= top[i];\
+ left= top[i]= old - new;\
+ last_new[y][i]= new;\
+ last_in3[y][i]= in3;\
+\
+ acc+= acc + (new&1);\
+ if((i&7)==6){\
+ ((uint8_t*)dest)[0]= acc;\
+ ((uint8_t*)dest)++;\
+ }\
+ }\
+}\
+*/\
+ }\
+ break;\
+ case PIX_FMT_YUYV422:\
+ func2\
+ ((uint8_t*)dest)[2*i2+0]= Y1;\
+ ((uint8_t*)dest)[2*i2+1]= U;\
+ ((uint8_t*)dest)[2*i2+2]= Y2;\
+ ((uint8_t*)dest)[2*i2+3]= V;\
+ } \
+ break;\
+ case PIX_FMT_UYVY422:\
+ func2\
+ ((uint8_t*)dest)[2*i2+0]= U;\
+ ((uint8_t*)dest)[2*i2+1]= Y1;\
+ ((uint8_t*)dest)[2*i2+2]= V;\
+ ((uint8_t*)dest)[2*i2+3]= Y2;\
+ } \
+ break;\
+ }\
+
+
+static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, int dstW, int y)
+{
+ int i;
+ switch(c->dstFormat)
+ {
+ case PIX_FMT_BGR32:
+ case PIX_FMT_RGB32:
+ YSCALE_YUV_2_RGBX_C(uint32_t)
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];
+ }
+ break;
+ case PIX_FMT_RGB24:
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[0]= r[Y1];
+ ((uint8_t*)dest)[1]= g[Y1];
+ ((uint8_t*)dest)[2]= b[Y1];
+ ((uint8_t*)dest)[3]= r[Y2];
+ ((uint8_t*)dest)[4]= g[Y2];
+ ((uint8_t*)dest)[5]= b[Y2];
+ dest+=6;
+ }
+ break;
+ case PIX_FMT_BGR24:
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[0]= b[Y1];
+ ((uint8_t*)dest)[1]= g[Y1];
+ ((uint8_t*)dest)[2]= r[Y1];
+ ((uint8_t*)dest)[3]= b[Y2];
+ ((uint8_t*)dest)[4]= g[Y2];
+ ((uint8_t*)dest)[5]= r[Y2];
+ dest+=6;
+ }
+ break;
+ case PIX_FMT_RGB565:
+ case PIX_FMT_BGR565:
+ {
+ const int dr1= dither_2x2_8[y&1 ][0];
+ const int dg1= dither_2x2_4[y&1 ][0];
+ const int db1= dither_2x2_8[(y&1)^1][0];
+ const int dr2= dither_2x2_8[y&1 ][1];
+ const int dg2= dither_2x2_4[y&1 ][1];
+ const int db2= dither_2x2_8[(y&1)^1][1];
+ YSCALE_YUV_2_RGBX_C(uint16_t)
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];
+ }
+ }
+ break;
+ case PIX_FMT_RGB555:
+ case PIX_FMT_BGR555:
+ {
+ const int dr1= dither_2x2_8[y&1 ][0];
+ const int dg1= dither_2x2_8[y&1 ][1];
+ const int db1= dither_2x2_8[(y&1)^1][0];
+ const int dr2= dither_2x2_8[y&1 ][1];
+ const int dg2= dither_2x2_8[y&1 ][0];
+ const int db2= dither_2x2_8[(y&1)^1][1];
+ YSCALE_YUV_2_RGBX_C(uint16_t)
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];
+ }
+ }
+ break;
+ case PIX_FMT_RGB8:
+ case PIX_FMT_BGR8:
+ {
+ const uint8_t * const d64= dither_8x8_73[y&7];
+ const uint8_t * const d32= dither_8x8_32[y&7];
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];
+ ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];
+ }
+ }
+ break;
+ case PIX_FMT_RGB4:
+ case PIX_FMT_BGR4:
+ {
+ const uint8_t * const d64= dither_8x8_73 [y&7];
+ const uint8_t * const d128=dither_8x8_220[y&7];
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]
+ +((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);
+ }
+ }
+ break;
+ case PIX_FMT_RGB4_BYTE:
+ case PIX_FMT_BGR4_BYTE:
+ {
+ const uint8_t * const d64= dither_8x8_73 [y&7];
+ const uint8_t * const d128=dither_8x8_220[y&7];
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];
+ ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];
+ }
+ }
+ break;
+ case PIX_FMT_MONOBLACK:
+ {
+ const uint8_t * const d128=dither_8x8_220[y&7];
+ uint8_t *g= c->table_gU[128] + c->table_gV[128];
+ int acc=0;
+ for(i=0; i<dstW-1; i+=2){
+ int j;
+ int Y1=1<<18;
+ int Y2=1<<18;
+
+ for(j=0; j<lumFilterSize; j++)
+ {
+ Y1 += lumSrc[j][i] * lumFilter[j];
+ Y2 += lumSrc[j][i+1] * lumFilter[j];
+ }
+ Y1>>=19;
+ Y2>>=19;
+ if((Y1|Y2)&256)
+ {
+ if(Y1>255) Y1=255;
+ else if(Y1<0)Y1=0;
+ if(Y2>255) Y2=255;
+ else if(Y2<0)Y2=0;
+ }
+ acc+= acc + g[Y1+d128[(i+0)&7]];
+ acc+= acc + g[Y2+d128[(i+1)&7]];
+ if((i&7)==6){
+ ((uint8_t*)dest)[0]= acc;
+ dest++;
+ }
+ }
+ }
+ break;
+ case PIX_FMT_YUYV422:
+ YSCALE_YUV_2_PACKEDX_C(void)
+ ((uint8_t*)dest)[2*i2+0]= Y1;
+ ((uint8_t*)dest)[2*i2+1]= U;
+ ((uint8_t*)dest)[2*i2+2]= Y2;
+ ((uint8_t*)dest)[2*i2+3]= V;
+ }
+ break;
+ case PIX_FMT_UYVY422:
+ YSCALE_YUV_2_PACKEDX_C(void)
+ ((uint8_t*)dest)[2*i2+0]= U;
+ ((uint8_t*)dest)[2*i2+1]= Y1;
+ ((uint8_t*)dest)[2*i2+2]= V;
+ ((uint8_t*)dest)[2*i2+3]= Y2;
+ }
+ break;
+ }
+}
+
+
+//Note: we have C, X86, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one
+//Plain C versions
+#if !defined (HAVE_MMX) || defined (RUNTIME_CPUDETECT)
+#define COMPILE_C
+#endif
+
+#ifdef ARCH_POWERPC
+#if defined (HAVE_ALTIVEC) || defined (RUNTIME_CPUDETECT)
+#define COMPILE_ALTIVEC
+#endif //HAVE_ALTIVEC
+#endif //ARCH_POWERPC
+
+#if defined(ARCH_X86)
+
+#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
+#define COMPILE_MMX
+#endif
+
+#if defined (HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
+#define COMPILE_MMX2
+#endif
+
+#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
+#define COMPILE_3DNOW
+#endif
+#endif //ARCH_X86 || ARCH_X86_64
+
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_3DNOW
+
+#ifdef COMPILE_C
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_3DNOW
+#undef HAVE_ALTIVEC
+#define RENAME(a) a ## _C
+#include "swscale_template.c"
+#endif
+
+#ifdef ARCH_POWERPC
+#ifdef COMPILE_ALTIVEC
+#undef RENAME
+#define HAVE_ALTIVEC
+#define RENAME(a) a ## _altivec
+#include "swscale_template.c"
+#endif
+#endif //ARCH_POWERPC
+
+#if defined(ARCH_X86)
+
+//X86 versions
+/*
+#undef RENAME
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_3DNOW
+#define ARCH_X86
+#define RENAME(a) a ## _X86
+#include "swscale_template.c"
+*/
+//MMX versions
+#ifdef COMPILE_MMX
+#undef RENAME
+#define HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_3DNOW
+#define RENAME(a) a ## _MMX
+#include "swscale_template.c"
+#endif
+
+//MMX2 versions
+#ifdef COMPILE_MMX2
+#undef RENAME
+#define HAVE_MMX
+#define HAVE_MMX2
+#undef HAVE_3DNOW
+#define RENAME(a) a ## _MMX2
+#include "swscale_template.c"
+#endif
+
+//3DNOW versions
+#ifdef COMPILE_3DNOW
+#undef RENAME
+#define HAVE_MMX
+#undef HAVE_MMX2
+#define HAVE_3DNOW
+#define RENAME(a) a ## _3DNow
+#include "swscale_template.c"
+#endif
+
+#endif //ARCH_X86 || ARCH_X86_64
+
+// minor note: the HAVE_xyz is messed up after that line so don't use it
+
+static double getSplineCoeff(double a, double b, double c, double d, double dist)
+{
+// printf("%f %f %f %f %f\n", a,b,c,d,dist);
+ if(dist<=1.0) return ((d*dist + c)*dist + b)*dist +a;
+ else return getSplineCoeff( 0.0,
+ b+ 2.0*c + 3.0*d,
+ c + 3.0*d,
+ -b- 3.0*c - 6.0*d,
+ dist-1.0);
+}
+
+static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSize, int xInc,
+ int srcW, int dstW, int filterAlign, int one, int flags,
+ SwsVector *srcFilter, SwsVector *dstFilter, double param[2])
+{
+ int i;
+ int filterSize;
+ int filter2Size;
+ int minFilterSize;
+ double *filter=NULL;
+ double *filter2=NULL;
+#if defined(ARCH_X86)
+ if(flags & SWS_CPU_CAPS_MMX)
+ asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
+#endif
+
+ // Note the +1 is for the MMXscaler which reads over the end
+ *filterPos = av_malloc((dstW+1)*sizeof(int16_t));
+
+ if(FFABS(xInc - 0x10000) <10) // unscaled
+ {
+ int i;
+ filterSize= 1;
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+ for(i=0; i<dstW*filterSize; i++) filter[i]=0;
+
+ for(i=0; i<dstW; i++)
+ {
+ filter[i*filterSize]=1;
+ (*filterPos)[i]=i;
+ }
+
+ }
+ else if(flags&SWS_POINT) // lame looking point sampling mode
+ {
+ int i;
+ int xDstInSrc;
+ filterSize= 1;
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+
+ xDstInSrc= xInc/2 - 0x8000;
+ for(i=0; i<dstW; i++)
+ {
+ int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
+
+ (*filterPos)[i]= xx;
+ filter[i]= 1.0;
+ xDstInSrc+= xInc;
+ }
+ }
+ else if((xInc <= (1<<16) && (flags&SWS_AREA)) || (flags&SWS_FAST_BILINEAR)) // bilinear upscale
+ {
+ int i;
+ int xDstInSrc;
+ if (flags&SWS_BICUBIC) filterSize= 4;
+ else if(flags&SWS_X ) filterSize= 4;
+ else filterSize= 2; // SWS_BILINEAR / SWS_AREA
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+
+ xDstInSrc= xInc/2 - 0x8000;
+ for(i=0; i<dstW; i++)
+ {
+ int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
+ int j;
+
+ (*filterPos)[i]= xx;
+ //Bilinear upscale / linear interpolate / Area averaging
+ for(j=0; j<filterSize; j++)
+ {
+ double d= FFABS((xx<<16) - xDstInSrc)/(double)(1<<16);
+ double coeff= 1.0 - d;
+ if(coeff<0) coeff=0;
+ filter[i*filterSize + j]= coeff;
+ xx++;
+ }
+ xDstInSrc+= xInc;
+ }
+ }
+ else
+ {
+ double xDstInSrc;
+ double sizeFactor, filterSizeInSrc;
+ const double xInc1= (double)xInc / (double)(1<<16);
+
+ if (flags&SWS_BICUBIC) sizeFactor= 4.0;
+ else if(flags&SWS_X) sizeFactor= 8.0;
+ else if(flags&SWS_AREA) sizeFactor= 1.0; //downscale only, for upscale it is bilinear
+ else if(flags&SWS_GAUSS) sizeFactor= 8.0; // infinite ;)
+ else if(flags&SWS_LANCZOS) sizeFactor= param[0] != SWS_PARAM_DEFAULT ? 2.0*param[0] : 6.0;
+ else if(flags&SWS_SINC) sizeFactor= 20.0; // infinite ;)
+ else if(flags&SWS_SPLINE) sizeFactor= 20.0; // infinite ;)
+ else if(flags&SWS_BILINEAR) sizeFactor= 2.0;
+ else {
+ sizeFactor= 0.0; //GCC warning killer
+ ASSERT(0)
+ }
+
+ if(xInc1 <= 1.0) filterSizeInSrc= sizeFactor; // upscale
+ else filterSizeInSrc= sizeFactor*srcW / (double)dstW;
+
+ filterSize= (int)ceil(1 + filterSizeInSrc); // will be reduced later if possible
+ if(filterSize > srcW-2) filterSize=srcW-2;
+
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+
+ xDstInSrc= xInc1 / 2.0 - 0.5;
+ for(i=0; i<dstW; i++)
+ {
+ int xx= (int)(xDstInSrc - (filterSize-1)*0.5 + 0.5);
+ int j;
+ (*filterPos)[i]= xx;
+ for(j=0; j<filterSize; j++)
+ {
+ double d= FFABS(xx - xDstInSrc)/filterSizeInSrc*sizeFactor;
+ double coeff;
+ if(flags & SWS_BICUBIC)
+ {
+ double B= param[0] != SWS_PARAM_DEFAULT ? param[0] : 0.0;
+ double C= param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6;
+
+ if(d<1.0)
+ coeff = (12-9*B-6*C)*d*d*d + (-18+12*B+6*C)*d*d + 6-2*B;
+ else if(d<2.0)
+ coeff = (-B-6*C)*d*d*d + (6*B+30*C)*d*d + (-12*B-48*C)*d +8*B+24*C;
+ else
+ coeff=0.0;
+ }
+/* else if(flags & SWS_X)
+ {
+ double p= param ? param*0.01 : 0.3;
+ coeff = d ? sin(d*PI)/(d*PI) : 1.0;
+ coeff*= pow(2.0, - p*d*d);
+ }*/
+ else if(flags & SWS_X)
+ {
+ double A= param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0;
+
+ if(d<1.0)
+ coeff = cos(d*PI);
+ else
+ coeff=-1.0;
+ if(coeff<0.0) coeff= -pow(-coeff, A);
+ else coeff= pow( coeff, A);
+ coeff= coeff*0.5 + 0.5;
+ }
+ else if(flags & SWS_AREA)
+ {
+ double srcPixelSize= 1.0/xInc1;
+ if(d + srcPixelSize/2 < 0.5) coeff= 1.0;
+ else if(d - srcPixelSize/2 < 0.5) coeff= (0.5-d)/srcPixelSize + 0.5;
+ else coeff=0.0;
+ }
+ else if(flags & SWS_GAUSS)
+ {
+ double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
+ coeff = pow(2.0, - p*d*d);
+ }
+ else if(flags & SWS_SINC)
+ {
+ coeff = d ? sin(d*PI)/(d*PI) : 1.0;
+ }
+ else if(flags & SWS_LANCZOS)
+ {
+ double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
+ coeff = d ? sin(d*PI)*sin(d*PI/p)/(d*d*PI*PI/p) : 1.0;
+ if(d>p) coeff=0;
+ }
+ else if(flags & SWS_BILINEAR)
+ {
+ coeff= 1.0 - d;
+ if(coeff<0) coeff=0;
+ }
+ else if(flags & SWS_SPLINE)
+ {
+ double p=-2.196152422706632;
+ coeff = getSplineCoeff(1.0, 0.0, p, -p-1.0, d);
+ }
+ else {
+ coeff= 0.0; //GCC warning killer
+ ASSERT(0)
+ }
+
+ filter[i*filterSize + j]= coeff;
+ xx++;
+ }
+ xDstInSrc+= xInc1;
+ }
+ }
+
+ /* apply src & dst Filter to filter -> filter2
+ av_free(filter);
+ */
+ ASSERT(filterSize>0)
+ filter2Size= filterSize;
+ if(srcFilter) filter2Size+= srcFilter->length - 1;
+ if(dstFilter) filter2Size+= dstFilter->length - 1;
+ ASSERT(filter2Size>0)
+ filter2= av_malloc(filter2Size*dstW*sizeof(double));
+
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+ SwsVector scaleFilter;
+ SwsVector *outVec;
+
+ scaleFilter.coeff= filter + i*filterSize;
+ scaleFilter.length= filterSize;
+
+ if(srcFilter) outVec= sws_getConvVec(srcFilter, &scaleFilter);
+ else outVec= &scaleFilter;
+
+ ASSERT(outVec->length == filter2Size)
+ //FIXME dstFilter
+
+ for(j=0; j<outVec->length; j++)
+ {
+ filter2[i*filter2Size + j]= outVec->coeff[j];
+ }
+
+ (*filterPos)[i]+= (filterSize-1)/2 - (filter2Size-1)/2;
+
+ if(outVec != &scaleFilter) sws_freeVec(outVec);
+ }
+ av_free(filter); filter=NULL;
+
+ /* try to reduce the filter-size (step1 find size and shift left) */
+ // Assume its near normalized (*0.5 or *2.0 is ok but * 0.001 is not)
+ minFilterSize= 0;
+ for(i=dstW-1; i>=0; i--)
+ {
+ int min= filter2Size;
+ int j;
+ double cutOff=0.0;
+
+ /* get rid off near zero elements on the left by shifting left */
+ for(j=0; j<filter2Size; j++)
+ {
+ int k;
+ cutOff += FFABS(filter2[i*filter2Size]);
+
+ if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
+
+ /* preserve Monotonicity because the core can't handle the filter otherwise */
+ if(i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break;
+
+ // Move filter coeffs left
+ for(k=1; k<filter2Size; k++)
+ filter2[i*filter2Size + k - 1]= filter2[i*filter2Size + k];
+ filter2[i*filter2Size + k - 1]= 0.0;
+ (*filterPos)[i]++;
+ }
+
+ cutOff=0.0;
+ /* count near zeros on the right */
+ for(j=filter2Size-1; j>0; j--)
+ {
+ cutOff += FFABS(filter2[i*filter2Size + j]);
+
+ if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
+ min--;
+ }
+
+ if(min>minFilterSize) minFilterSize= min;
+ }
+
+ if (flags & SWS_CPU_CAPS_ALTIVEC) {
+ // we can handle the special case 4,
+ // so we don't want to go to the full 8
+ if (minFilterSize < 5)
+ filterAlign = 4;
+
+ // we really don't want to waste our time
+ // doing useless computation, so fall-back on
+ // the scalar C code for very small filter.
+ // vectorizing is worth it only if you have
+ // decent-sized vector.
+ if (minFilterSize < 3)
+ filterAlign = 1;
+ }
+
+ if (flags & SWS_CPU_CAPS_MMX) {
+ // special case for unscaled vertical filtering
+ if(minFilterSize == 1 && filterAlign == 2)
+ filterAlign= 1;
+ }
+
+ ASSERT(minFilterSize > 0)
+ filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1));
+ ASSERT(filterSize > 0)
+ filter= av_malloc(filterSize*dstW*sizeof(double));
+ if(filterSize >= MAX_FILTER_SIZE)
+ return -1;
+ *outFilterSize= filterSize;
+
+ if(flags&SWS_PRINT_INFO)
+ MSG_V("SwScaler: reducing / aligning filtersize %d -> %d\n", filter2Size, filterSize);
+ /* try to reduce the filter-size (step2 reduce it) */
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+
+ for(j=0; j<filterSize; j++)
+ {
+ if(j>=filter2Size) filter[i*filterSize + j]= 0.0;
+ else filter[i*filterSize + j]= filter2[i*filter2Size + j];
+ }
+ }
+ av_free(filter2); filter2=NULL;
+
+
+ //FIXME try to align filterpos if possible
+
+ //fix borders
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+ if((*filterPos)[i] < 0)
+ {
+ // Move filter coeffs left to compensate for filterPos
+ for(j=1; j<filterSize; j++)
+ {
+ int left= FFMAX(j + (*filterPos)[i], 0);
+ filter[i*filterSize + left] += filter[i*filterSize + j];
+ filter[i*filterSize + j]=0;
+ }
+ (*filterPos)[i]= 0;
+ }
+
+ if((*filterPos)[i] + filterSize > srcW)
+ {
+ int shift= (*filterPos)[i] + filterSize - srcW;
+ // Move filter coeffs right to compensate for filterPos
+ for(j=filterSize-2; j>=0; j--)
+ {
+ int right= FFMIN(j + shift, filterSize-1);
+ filter[i*filterSize +right] += filter[i*filterSize +j];
+ filter[i*filterSize +j]=0;
+ }
+ (*filterPos)[i]= srcW - filterSize;
+ }
+ }
+
+ // Note the +1 is for the MMXscaler which reads over the end
+ /* align at 16 for AltiVec (needed by hScale_altivec_real) */
+ *outFilter= av_malloc(*outFilterSize*(dstW+1)*sizeof(int16_t));
+ memset(*outFilter, 0, *outFilterSize*(dstW+1)*sizeof(int16_t));
+
+ /* Normalize & Store in outFilter */
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+ double error=0;
+ double sum=0;
+ double scale= one;
+
+ for(j=0; j<filterSize; j++)
+ {
+ sum+= filter[i*filterSize + j];
+ }
+ scale/= sum;
+ for(j=0; j<*outFilterSize; j++)
+ {
+ double v= filter[i*filterSize + j]*scale + error;
+ int intV= floor(v + 0.5);
+ (*outFilter)[i*(*outFilterSize) + j]= intV;
+ error = v - intV;
+ }
+ }
+
+ (*filterPos)[dstW]= (*filterPos)[dstW-1]; // the MMX scaler will read over the end
+ for(i=0; i<*outFilterSize; i++)
+ {
+ int j= dstW*(*outFilterSize);
+ (*outFilter)[j + i]= (*outFilter)[j + i - (*outFilterSize)];
+ }
+
+ av_free(filter);
+ return 0;
+}
+
+#ifdef COMPILE_MMX2
+static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits)
+{
+ uint8_t *fragmentA;
+ long imm8OfPShufW1A;
+ long imm8OfPShufW2A;
+ long fragmentLengthA;
+ uint8_t *fragmentB;
+ long imm8OfPShufW1B;
+ long imm8OfPShufW2B;
+ long fragmentLengthB;
+ int fragmentPos;
+
+ int xpos, i;
+
+ // create an optimized horizontal scaling routine
+
+ //code fragment
+
+ asm volatile(
+ "jmp 9f \n\t"
+ // Begin
+ "0: \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
+ "movd 1(%%"REG_c", %%"REG_S"), %%mm1\n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "pshufw $0xFF, %%mm1, %%mm1 \n\t"
+ "1: \n\t"
+ "pshufw $0xFF, %%mm0, %%mm0 \n\t"
+ "2: \n\t"
+ "psubw %%mm1, %%mm0 \n\t"
+ "movl 8(%%"REG_b", %%"REG_a"), %%esi\n\t"
+ "pmullw %%mm3, %%mm0 \n\t"
+ "psllw $7, %%mm1 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+
+ "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ // End
+ "9: \n\t"
+// "int $3\n\t"
+ "lea 0b, %0 \n\t"
+ "lea 1b, %1 \n\t"
+ "lea 2b, %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea 9b, %3 \n\t"
+ "sub %0, %3 \n\t"
+
+
+ :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
+ "=r" (fragmentLengthA)
+ );
+
+ asm volatile(
+ "jmp 9f \n\t"
+ // Begin
+ "0: \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "pshufw $0xFF, %%mm0, %%mm1 \n\t"
+ "1: \n\t"
+ "pshufw $0xFF, %%mm0, %%mm0 \n\t"
+ "2: \n\t"
+ "psubw %%mm1, %%mm0 \n\t"
+ "movl 8(%%"REG_b", %%"REG_a"), %%esi\n\t"
+ "pmullw %%mm3, %%mm0 \n\t"
+ "psllw $7, %%mm1 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+
+ "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ // End
+ "9: \n\t"
+// "int $3\n\t"
+ "lea 0b, %0 \n\t"
+ "lea 1b, %1 \n\t"
+ "lea 2b, %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea 9b, %3 \n\t"
+ "sub %0, %3 \n\t"
+
+
+ :"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
+ "=r" (fragmentLengthB)
+ );
+
+ xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers
+ fragmentPos=0;
+
+ for(i=0; i<dstW/numSplits; i++)
+ {
+ int xx=xpos>>16;
+
+ if((i&3) == 0)
+ {
+ int a=0;
+ int b=((xpos+xInc)>>16) - xx;
+ int c=((xpos+xInc*2)>>16) - xx;
+ int d=((xpos+xInc*3)>>16) - xx;
+
+ filter[i ] = (( xpos & 0xFFFF) ^ 0xFFFF)>>9;
+ filter[i+1] = (((xpos+xInc ) & 0xFFFF) ^ 0xFFFF)>>9;
+ filter[i+2] = (((xpos+xInc*2) & 0xFFFF) ^ 0xFFFF)>>9;
+ filter[i+3] = (((xpos+xInc*3) & 0xFFFF) ^ 0xFFFF)>>9;
+ filterPos[i/2]= xx;
+
+ if(d+1<4)
+ {
+ int maxShift= 3-(d+1);
+ int shift=0;
+
+ memcpy(funnyCode + fragmentPos, fragmentB, fragmentLengthB);
+
+ funnyCode[fragmentPos + imm8OfPShufW1B]=
+ (a+1) | ((b+1)<<2) | ((c+1)<<4) | ((d+1)<<6);
+ funnyCode[fragmentPos + imm8OfPShufW2B]=
+ a | (b<<2) | (c<<4) | (d<<6);
+
+ if(i+3>=dstW) shift=maxShift; //avoid overread
+ else if((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //Align
+
+ if(shift && i>=shift)
+ {
+ funnyCode[fragmentPos + imm8OfPShufW1B]+= 0x55*shift;
+ funnyCode[fragmentPos + imm8OfPShufW2B]+= 0x55*shift;
+ filterPos[i/2]-=shift;
+ }
+
+ fragmentPos+= fragmentLengthB;
+ }
+ else
+ {
+ int maxShift= 3-d;
+ int shift=0;
+
+ memcpy(funnyCode + fragmentPos, fragmentA, fragmentLengthA);
+
+ funnyCode[fragmentPos + imm8OfPShufW1A]=
+ funnyCode[fragmentPos + imm8OfPShufW2A]=
+ a | (b<<2) | (c<<4) | (d<<6);
+
+ if(i+4>=dstW) shift=maxShift; //avoid overread
+ else if((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //partial align
+
+ if(shift && i>=shift)
+ {
+ funnyCode[fragmentPos + imm8OfPShufW1A]+= 0x55*shift;
+ funnyCode[fragmentPos + imm8OfPShufW2A]+= 0x55*shift;
+ filterPos[i/2]-=shift;
+ }
+
+ fragmentPos+= fragmentLengthA;
+ }
+
+ funnyCode[fragmentPos]= RET;
+ }
+ xpos+=xInc;
+ }
+ filterPos[i/2]= xpos>>16; // needed to jump to the next part
+}
+#endif /* COMPILE_MMX2 */
+
+static void globalInit(void){
+ // generating tables:
+ int i;
+ for(i=0; i<768; i++){
+ int c= FFMIN(FFMAX(i-256, 0), 255);
+ clip_table[i]=c;
+ }
+}
+
+static SwsFunc getSwsFunc(int flags){
+
+#ifdef RUNTIME_CPUDETECT
+#if defined(ARCH_X86)
+ // ordered per speed fasterst first
+ if(flags & SWS_CPU_CAPS_MMX2)
+ return swScale_MMX2;
+ else if(flags & SWS_CPU_CAPS_3DNOW)
+ return swScale_3DNow;
+ else if(flags & SWS_CPU_CAPS_MMX)
+ return swScale_MMX;
+ else
+ return swScale_C;
+
+#else
+#ifdef ARCH_POWERPC
+ if(flags & SWS_CPU_CAPS_ALTIVEC)
+ return swScale_altivec;
+ else
+ return swScale_C;
+#endif
+ return swScale_C;
+#endif /* defined(ARCH_X86) */
+#else //RUNTIME_CPUDETECT
+#ifdef HAVE_MMX2
+ return swScale_MMX2;
+#elif defined (HAVE_3DNOW)
+ return swScale_3DNow;
+#elif defined (HAVE_MMX)
+ return swScale_MMX;
+#elif defined (HAVE_ALTIVEC)
+ return swScale_altivec;
+#else
+ return swScale_C;
+#endif
+#endif //!RUNTIME_CPUDETECT
+}
+
+static int PlanarToNV12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+ /* Copy Y plane */
+ if(dstStride[0]==srcStride[0] && srcStride[0] > 0)
+ memcpy(dst, src[0], srcSliceH*dstStride[0]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst;
+ for(i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, c->srcW);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ dst = dstParam[1] + dstStride[1]*srcSliceY/2;
+ if (c->dstFormat == PIX_FMT_NV12)
+ interleaveBytes( src[1],src[2],dst,c->srcW/2,srcSliceH/2,srcStride[1],srcStride[2],dstStride[0] );
+ else
+ interleaveBytes( src[2],src[1],dst,c->srcW/2,srcSliceH/2,srcStride[2],srcStride[1],dstStride[0] );
+
+ return srcSliceH;
+}
+
+static int PlanarToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+ yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+
+ return srcSliceH;
+}
+
+static int PlanarToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+ yv12touyvy( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+
+ return srcSliceH;
+}
+
+/* {RGB,BGR}{15,16,24,32} -> {RGB,BGR}{15,16,24,32} */
+static int rgb2rgbWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ const int srcFormat= c->srcFormat;
+ const int dstFormat= c->dstFormat;
+ const int srcBpp= (fmt_depth(srcFormat) + 7) >> 3;
+ const int dstBpp= (fmt_depth(dstFormat) + 7) >> 3;
+ const int srcId= fmt_depth(srcFormat) >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */
+ const int dstId= fmt_depth(dstFormat) >> 2;
+ void (*conv)(const uint8_t *src, uint8_t *dst, long src_size)=NULL;
+
+ /* BGR -> BGR */
+ if( (isBGR(srcFormat) && isBGR(dstFormat))
+ || (isRGB(srcFormat) && isRGB(dstFormat))){
+ switch(srcId | (dstId<<4)){
+ case 0x34: conv= rgb16to15; break;
+ case 0x36: conv= rgb24to15; break;
+ case 0x38: conv= rgb32to15; break;
+ case 0x43: conv= rgb15to16; break;
+ case 0x46: conv= rgb24to16; break;
+ case 0x48: conv= rgb32to16; break;
+ case 0x63: conv= rgb15to24; break;
+ case 0x64: conv= rgb16to24; break;
+ case 0x68: conv= rgb32to24; break;
+ case 0x83: conv= rgb15to32; break;
+ case 0x84: conv= rgb16to32; break;
+ case 0x86: conv= rgb24to32; break;
+ default: MSG_ERR("swScaler: internal error %s -> %s converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
+ }
+ }else if( (isBGR(srcFormat) && isRGB(dstFormat))
+ || (isRGB(srcFormat) && isBGR(dstFormat))){
+ switch(srcId | (dstId<<4)){
+ case 0x33: conv= rgb15tobgr15; break;
+ case 0x34: conv= rgb16tobgr15; break;
+ case 0x36: conv= rgb24tobgr15; break;
+ case 0x38: conv= rgb32tobgr15; break;
+ case 0x43: conv= rgb15tobgr16; break;
+ case 0x44: conv= rgb16tobgr16; break;
+ case 0x46: conv= rgb24tobgr16; break;
+ case 0x48: conv= rgb32tobgr16; break;
+ case 0x63: conv= rgb15tobgr24; break;
+ case 0x64: conv= rgb16tobgr24; break;
+ case 0x66: conv= rgb24tobgr24; break;
+ case 0x68: conv= rgb32tobgr24; break;
+ case 0x83: conv= rgb15tobgr32; break;
+ case 0x84: conv= rgb16tobgr32; break;
+ case 0x86: conv= rgb24tobgr32; break;
+ case 0x88: conv= rgb32tobgr32; break;
+ default: MSG_ERR("swScaler: internal error %s -> %s converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
+ }
+ }else{
+ MSG_ERR("swScaler: internal error %s -> %s converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat));
+ }
+
+ if(dstStride[0]*srcBpp == srcStride[0]*dstBpp)
+ conv(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+
+ for(i=0; i<srcSliceH; i++)
+ {
+ conv(srcPtr, dstPtr, c->srcW*srcBpp);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ return srcSliceH;
+}
+
+static int bgr24toyv12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ rgb24toyv12(
+ src[0],
+ dst[0]+ srcSliceY *dstStride[0],
+ dst[1]+(srcSliceY>>1)*dstStride[1],
+ dst[2]+(srcSliceY>>1)*dstStride[2],
+ c->srcW, srcSliceH,
+ dstStride[0], dstStride[1], srcStride[0]);
+ return srcSliceH;
+}
+
+static int yvu9toyv12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int i;
+
+ /* copy Y */
+ if(srcStride[0]==dstStride[0] && srcStride[0] > 0)
+ memcpy(dst[0]+ srcSliceY*dstStride[0], src[0], srcStride[0]*srcSliceH);
+ else{
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+
+ for(i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, c->srcW);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+
+ if(c->dstFormat==PIX_FMT_YUV420P){
+ planar2x(src[1], dst[1], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[1]);
+ planar2x(src[2], dst[2], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[2]);
+ }else{
+ planar2x(src[1], dst[2], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[2]);
+ planar2x(src[2], dst[1], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[1]);
+ }
+ return srcSliceH;
+}
+
+/* unscaled copy like stuff (assumes nearly identical formats) */
+static int simpleCopy(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ if(isPacked(c->srcFormat))
+ {
+ if(dstStride[0]==srcStride[0] && srcStride[0] > 0)
+ memcpy(dst[0] + dstStride[0]*srcSliceY, src[0], srcSliceH*dstStride[0]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+ int length=0;
+
+ /* universal length finder */
+ while(length+c->srcW <= FFABS(dstStride[0])
+ && length+c->srcW <= FFABS(srcStride[0])) length+= c->srcW;
+ ASSERT(length!=0);
+
+ for(i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, length);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ }
+ else
+ { /* Planar YUV or gray */
+ int plane;
+ for(plane=0; plane<3; plane++)
+ {
+ int length= plane==0 ? c->srcW : -((-c->srcW )>>c->chrDstHSubSample);
+ int y= plane==0 ? srcSliceY: -((-srcSliceY)>>c->chrDstVSubSample);
+ int height= plane==0 ? srcSliceH: -((-srcSliceH)>>c->chrDstVSubSample);
+
+ if((isGray(c->srcFormat) || isGray(c->dstFormat)) && plane>0)
+ {
+ if(!isGray(c->dstFormat))
+ memset(dst[plane], 128, dstStride[plane]*height);
+ }
+ else
+ {
+ if(dstStride[plane]==srcStride[plane] && srcStride[plane] > 0)
+ memcpy(dst[plane] + dstStride[plane]*y, src[plane], height*dstStride[plane]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[plane];
+ uint8_t *dstPtr= dst[plane] + dstStride[plane]*y;
+ for(i=0; i<height; i++)
+ {
+ memcpy(dstPtr, srcPtr, length);
+ srcPtr+= srcStride[plane];
+ dstPtr+= dstStride[plane];
+ }
+ }
+ }
+ }
+ }
+ return srcSliceH;
+}
+
+static int gray16togray(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ int length= c->srcW;
+ int y= srcSliceY;
+ int height= srcSliceH;
+ int i, j;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*y;
+
+ if(!isGray(c->dstFormat)){
+ int height= -((-srcSliceH)>>c->chrDstVSubSample);
+ memset(dst[1], 128, dstStride[1]*height);
+ memset(dst[2], 128, dstStride[2]*height);
+ }
+ if(c->srcFormat == PIX_FMT_GRAY16LE) srcPtr++;
+ for(i=0; i<height; i++)
+ {
+ for(j=0; j<length; j++) dstPtr[j] = srcPtr[j<<1];
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ return srcSliceH;
+}
+
+static int graytogray16(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ int length= c->srcW;
+ int y= srcSliceY;
+ int height= srcSliceH;
+ int i, j;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*y;
+ for(i=0; i<height; i++)
+ {
+ for(j=0; j<length; j++)
+ {
+ dstPtr[j<<1] = srcPtr[j];
+ dstPtr[(j<<1)+1] = srcPtr[j];
+ }
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ return srcSliceH;
+}
+
+static int gray16swap(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ int length= c->srcW;
+ int y= srcSliceY;
+ int height= srcSliceH;
+ int i, j;
+ uint16_t *srcPtr= src[0];
+ uint16_t *dstPtr= dst[0] + dstStride[0]*y/2;
+ for(i=0; i<height; i++)
+ {
+ for(j=0; j<length; j++) dstPtr[j] = bswap_16(srcPtr[j]);
+ srcPtr+= srcStride[0]/2;
+ dstPtr+= dstStride[0]/2;
+ }
+ return srcSliceH;
+}
+
+
+static void getSubSampleFactors(int *h, int *v, int format){
+ switch(format){
+ case PIX_FMT_UYVY422:
+ case PIX_FMT_YUYV422:
+ *h=1;
+ *v=0;
+ break;
+ case PIX_FMT_YUV420P:
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
+ case PIX_FMT_GRAY8: //FIXME remove after different subsamplings are fully implemented
+ case PIX_FMT_NV12:
+ case PIX_FMT_NV21:
+ *h=1;
+ *v=1;
+ break;
+ case PIX_FMT_YUV410P:
+ *h=2;
+ *v=2;
+ break;
+ case PIX_FMT_YUV444P:
+ *h=0;
+ *v=0;
+ break;
+ case PIX_FMT_YUV422P:
+ *h=1;
+ *v=0;
+ break;
+ case PIX_FMT_YUV411P:
+ *h=2;
+ *v=0;
+ break;
+ default:
+ *h=0;
+ *v=0;
+ break;
+ }
+}
+
+static uint16_t roundToInt16(int64_t f){
+ int r= (f + (1<<15))>>16;
+ if(r<-0x7FFF) return 0x8000;
+ else if(r> 0x7FFF) return 0x7FFF;
+ else return r;
+}
+
+/**
+ * @param inv_table the yuv2rgb coeffs, normally Inverse_Table_6_9[x]
+ * @param fullRange if 1 then the luma range is 0..255 if 0 its 16..235
+ * @return -1 if not supported
+ */
+int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation){
+ int64_t crv = inv_table[0];
+ int64_t cbu = inv_table[1];
+ int64_t cgu = -inv_table[2];
+ int64_t cgv = -inv_table[3];
+ int64_t cy = 1<<16;
+ int64_t oy = 0;
+
+ if(isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
+ memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4);
+ memcpy(c->dstColorspaceTable, table, sizeof(int)*4);
+
+ c->brightness= brightness;
+ c->contrast = contrast;
+ c->saturation= saturation;
+ c->srcRange = srcRange;
+ c->dstRange = dstRange;
+
+ c->uOffset= 0x0400040004000400LL;
+ c->vOffset= 0x0400040004000400LL;
+
+ if(!srcRange){
+ cy= (cy*255) / 219;
+ oy= 16<<16;
+ }
+
+ cy = (cy *contrast )>>16;
+ crv= (crv*contrast * saturation)>>32;
+ cbu= (cbu*contrast * saturation)>>32;
+ cgu= (cgu*contrast * saturation)>>32;
+ cgv= (cgv*contrast * saturation)>>32;
+
+ oy -= 256*brightness;
+
+ c->yCoeff= roundToInt16(cy *8192) * 0x0001000100010001ULL;
+ c->vrCoeff= roundToInt16(crv*8192) * 0x0001000100010001ULL;
+ c->ubCoeff= roundToInt16(cbu*8192) * 0x0001000100010001ULL;
+ c->vgCoeff= roundToInt16(cgv*8192) * 0x0001000100010001ULL;
+ c->ugCoeff= roundToInt16(cgu*8192) * 0x0001000100010001ULL;
+ c->yOffset= roundToInt16(oy * 8) * 0x0001000100010001ULL;
+
+ yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness, contrast, saturation);
+ //FIXME factorize
+
+#ifdef COMPILE_ALTIVEC
+ if (c->flags & SWS_CPU_CAPS_ALTIVEC)
+ yuv2rgb_altivec_init_tables (c, inv_table, brightness, contrast, saturation);
+#endif
+ return 0;
+}
+
+/**
+ * @return -1 if not supported
+ */
+int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation){
+ if(isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
+
+ *inv_table = c->srcColorspaceTable;
+ *table = c->dstColorspaceTable;
+ *srcRange = c->srcRange;
+ *dstRange = c->dstRange;
+ *brightness= c->brightness;
+ *contrast = c->contrast;
+ *saturation= c->saturation;
+
+ return 0;
+}
+
+static int handle_jpeg(int *format)
+{
+ switch (*format) {
+ case PIX_FMT_YUVJ420P:
+ *format = PIX_FMT_YUV420P;
+ return 1;
+ case PIX_FMT_YUVJ422P:
+ *format = PIX_FMT_YUV422P;
+ return 1;
+ case PIX_FMT_YUVJ444P:
+ *format = PIX_FMT_YUV444P;
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+SwsContext *sws_getContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, int flags,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param){
+
+ SwsContext *c;
+ int i;
+ int usesVFilter, usesHFilter;
+ int unscaled, needsDither;
+ int srcRange, dstRange;
+ SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
+#if defined(ARCH_X86)
+ if(flags & SWS_CPU_CAPS_MMX)
+ asm volatile("emms\n\t"::: "memory");
+#endif
+
+#ifndef RUNTIME_CPUDETECT //ensure that the flags match the compiled variant if cpudetect is off
+ flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC);
+#ifdef HAVE_MMX2
+ flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2;
+#elif defined (HAVE_3DNOW)
+ flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_3DNOW;
+#elif defined (HAVE_MMX)
+ flags |= SWS_CPU_CAPS_MMX;
+#elif defined (HAVE_ALTIVEC)
+ flags |= SWS_CPU_CAPS_ALTIVEC;
+#endif
+#endif /* RUNTIME_CPUDETECT */
+ if(clip_table[512] != 255) globalInit();
+ if(rgb15to16 == NULL) sws_rgb2rgb_init(flags);
+
+ unscaled = (srcW == dstW && srcH == dstH);
+ needsDither= (isBGR(dstFormat) || isRGB(dstFormat))
+ && (fmt_depth(dstFormat))<24
+ && ((fmt_depth(dstFormat))<(fmt_depth(srcFormat)) || (!(isRGB(srcFormat) || isBGR(srcFormat))));
+
+ srcRange = handle_jpeg(&srcFormat);
+ dstRange = handle_jpeg(&dstFormat);
+
+ if(!isSupportedIn(srcFormat))
+ {
+ MSG_ERR("swScaler: %s is not supported as input format\n", sws_format_name(srcFormat));
+ return NULL;
+ }
+ if(!isSupportedOut(dstFormat))
+ {
+ MSG_ERR("swScaler: %s is not supported as output format\n", sws_format_name(dstFormat));
+ return NULL;
+ }
+
+ /* sanity check */
+ if(srcW<4 || srcH<1 || dstW<8 || dstH<1) //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code
+ {
+ MSG_ERR("swScaler: %dx%d -> %dx%d is invalid scaling dimension\n",
+ srcW, srcH, dstW, dstH);
+ return NULL;
+ }
+
+ if(!dstFilter) dstFilter= &dummyFilter;
+ if(!srcFilter) srcFilter= &dummyFilter;
+
+ c= av_malloc(sizeof(SwsContext));
+ memset(c, 0, sizeof(SwsContext));
+
+ c->srcW= srcW;
+ c->srcH= srcH;
+ c->dstW= dstW;
+ c->dstH= dstH;
+ c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW;
+ c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH;
+ c->flags= flags;
+ c->dstFormat= dstFormat;
+ c->srcFormat= srcFormat;
+ c->vRounder= 4* 0x0001000100010001ULL;
+
+ usesHFilter= usesVFilter= 0;
+ if(dstFilter->lumV!=NULL && dstFilter->lumV->length>1) usesVFilter=1;
+ if(dstFilter->lumH!=NULL && dstFilter->lumH->length>1) usesHFilter=1;
+ if(dstFilter->chrV!=NULL && dstFilter->chrV->length>1) usesVFilter=1;
+ if(dstFilter->chrH!=NULL && dstFilter->chrH->length>1) usesHFilter=1;
+ if(srcFilter->lumV!=NULL && srcFilter->lumV->length>1) usesVFilter=1;
+ if(srcFilter->lumH!=NULL && srcFilter->lumH->length>1) usesHFilter=1;
+ if(srcFilter->chrV!=NULL && srcFilter->chrV->length>1) usesVFilter=1;
+ if(srcFilter->chrH!=NULL && srcFilter->chrH->length>1) usesHFilter=1;
+
+ getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat);
+ getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat);
+
+ // reuse chroma for 2 pixles rgb/bgr unless user wants full chroma interpolation
+ if((isBGR(dstFormat) || isRGB(dstFormat)) && !(flags&SWS_FULL_CHR_H_INT)) c->chrDstHSubSample=1;
+
+ // drop some chroma lines if the user wants it
+ c->vChrDrop= (flags&SWS_SRC_V_CHR_DROP_MASK)>>SWS_SRC_V_CHR_DROP_SHIFT;
+ c->chrSrcVSubSample+= c->vChrDrop;
+
+ // drop every 2. pixel for chroma calculation unless user wants full chroma
+ if((isBGR(srcFormat) || isRGB(srcFormat)) && !(flags&SWS_FULL_CHR_H_INP))
+ c->chrSrcHSubSample=1;
+
+ if(param){
+ c->param[0] = param[0];
+ c->param[1] = param[1];
+ }else{
+ c->param[0] =
+ c->param[1] = SWS_PARAM_DEFAULT;
+ }
+
+ c->chrIntHSubSample= c->chrDstHSubSample;
+ c->chrIntVSubSample= c->chrSrcVSubSample;
+
+ // note the -((-x)>>y) is so that we allways round toward +inf
+ c->chrSrcW= -((-srcW) >> c->chrSrcHSubSample);
+ c->chrSrcH= -((-srcH) >> c->chrSrcVSubSample);
+ c->chrDstW= -((-dstW) >> c->chrDstHSubSample);
+ c->chrDstH= -((-dstH) >> c->chrDstVSubSample);
+
+ sws_setColorspaceDetails(c, Inverse_Table_6_9[SWS_CS_DEFAULT], srcRange, Inverse_Table_6_9[SWS_CS_DEFAULT] /* FIXME*/, dstRange, 0, 1<<16, 1<<16);
+
+ /* unscaled special Cases */
+ if(unscaled && !usesHFilter && !usesVFilter)
+ {
+ /* yv12_to_nv12 */
+ if(srcFormat == PIX_FMT_YUV420P && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21))
+ {
+ c->swScale= PlanarToNV12Wrapper;
+ }
+ /* yuv2bgr */
+ if((srcFormat==PIX_FMT_YUV420P || srcFormat==PIX_FMT_YUV422P) && (isBGR(dstFormat) || isRGB(dstFormat)))
+ {
+ c->swScale= yuv2rgb_get_func_ptr(c);
+ }
+
+ if( srcFormat==PIX_FMT_YUV410P && dstFormat==PIX_FMT_YUV420P )
+ {
+ c->swScale= yvu9toyv12Wrapper;
+ }
+
+ /* bgr24toYV12 */
+ if(srcFormat==PIX_FMT_BGR24 && dstFormat==PIX_FMT_YUV420P)
+ c->swScale= bgr24toyv12Wrapper;
+
+ /* rgb/bgr -> rgb/bgr (no dither needed forms) */
+ if( (isBGR(srcFormat) || isRGB(srcFormat))
+ && (isBGR(dstFormat) || isRGB(dstFormat))
+ && !needsDither)
+ c->swScale= rgb2rgbWrapper;
+
+ /* LQ converters if -sws 0 or -sws 4*/
+ if(c->flags&(SWS_FAST_BILINEAR|SWS_POINT)){
+ /* rgb/bgr -> rgb/bgr (dither needed forms) */
+ if( (isBGR(srcFormat) || isRGB(srcFormat))
+ && (isBGR(dstFormat) || isRGB(dstFormat))
+ && needsDither)
+ c->swScale= rgb2rgbWrapper;
+
+ /* yv12_to_yuy2 */
+ if(srcFormat == PIX_FMT_YUV420P &&
+ (dstFormat == PIX_FMT_YUYV422 || dstFormat == PIX_FMT_UYVY422))
+ {
+ if (dstFormat == PIX_FMT_YUYV422)
+ c->swScale= PlanarToYuy2Wrapper;
+ else
+ c->swScale= PlanarToUyvyWrapper;
+ }
+ }
+
+#ifdef COMPILE_ALTIVEC
+ if ((c->flags & SWS_CPU_CAPS_ALTIVEC) &&
+ ((srcFormat == PIX_FMT_YUV420P &&
+ (dstFormat == PIX_FMT_YUYV422 || dstFormat == PIX_FMT_UYVY422)))) {
+ // unscaled YV12 -> packed YUV, we want speed
+ if (dstFormat == PIX_FMT_YUYV422)
+ c->swScale= yv12toyuy2_unscaled_altivec;
+ else
+ c->swScale= yv12touyvy_unscaled_altivec;
+ }
+#endif
+
+ /* simple copy */
+ if( srcFormat == dstFormat
+ || (isPlanarYUV(srcFormat) && isGray(dstFormat))
+ || (isPlanarYUV(dstFormat) && isGray(srcFormat))
+ )
+ {
+ c->swScale= simpleCopy;
+ }
+
+ /* gray16{le,be} conversions */
+ if(isGray16(srcFormat) && (isPlanarYUV(dstFormat) || (dstFormat == PIX_FMT_GRAY8)))
+ {
+ c->swScale= gray16togray;
+ }
+ if((isPlanarYUV(srcFormat) || (srcFormat == PIX_FMT_GRAY8)) && isGray16(dstFormat))
+ {
+ c->swScale= graytogray16;
+ }
+ if(srcFormat != dstFormat && isGray16(srcFormat) && isGray16(dstFormat))
+ {
+ c->swScale= gray16swap;
+ }
+
+ if(c->swScale){
+ if(flags&SWS_PRINT_INFO)
+ MSG_INFO("SwScaler: using unscaled %s -> %s special converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat));
+ return c;
+ }
+ }
+
+ if(flags & SWS_CPU_CAPS_MMX2)
+ {
+ c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0;
+ if(!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR))
+ {
+ if(flags&SWS_PRINT_INFO)
+ MSG_INFO("SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n");
+ }
+ if(usesHFilter) c->canMMX2BeUsed=0;
+ }
+ else
+ c->canMMX2BeUsed=0;
+
+ c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW;
+ c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH;
+
+ // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
+ // but only for the FAST_BILINEAR mode otherwise do correct scaling
+ // n-2 is the last chrominance sample available
+ // this is not perfect, but noone shuld notice the difference, the more correct variant
+ // would be like the vertical one, but that would require some special code for the
+ // first and last pixel
+ if(flags&SWS_FAST_BILINEAR)
+ {
+ if(c->canMMX2BeUsed)
+ {
+ c->lumXInc+= 20;
+ c->chrXInc+= 20;
+ }
+ //we don't use the x86asm scaler if mmx is available
+ else if(flags & SWS_CPU_CAPS_MMX)
+ {
+ c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20;
+ c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20;
+ }
+ }
+
+ /* precalculate horizontal scaler filter coefficients */
+ {
+ const int filterAlign=
+ (flags & SWS_CPU_CAPS_MMX) ? 4 :
+ (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
+
+ initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
+ srcW , dstW, filterAlign, 1<<14,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
+ srcFilter->lumH, dstFilter->lumH, c->param);
+ initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc,
+ c->chrSrcW, c->chrDstW, filterAlign, 1<<14,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
+ srcFilter->chrH, dstFilter->chrH, c->param);
+
+#define MAX_FUNNY_CODE_SIZE 10000
+#if defined(COMPILE_MMX2)
+// can't downscale !!!
+ if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
+ {
+#ifdef MAP_ANONYMOUS
+ c->funnyYCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ c->funnyUVCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+#else
+ c->funnyYCode = av_malloc(MAX_FUNNY_CODE_SIZE);
+ c->funnyUVCode = av_malloc(MAX_FUNNY_CODE_SIZE);
+#endif
+
+ c->lumMmx2Filter = av_malloc((dstW /8+8)*sizeof(int16_t));
+ c->chrMmx2Filter = av_malloc((c->chrDstW /4+8)*sizeof(int16_t));
+ c->lumMmx2FilterPos= av_malloc((dstW /2/8+8)*sizeof(int32_t));
+ c->chrMmx2FilterPos= av_malloc((c->chrDstW/2/4+8)*sizeof(int32_t));
+
+ initMMX2HScaler( dstW, c->lumXInc, c->funnyYCode , c->lumMmx2Filter, c->lumMmx2FilterPos, 8);
+ initMMX2HScaler(c->chrDstW, c->chrXInc, c->funnyUVCode, c->chrMmx2Filter, c->chrMmx2FilterPos, 4);
+ }
+#endif /* defined(COMPILE_MMX2) */
+ } // Init Horizontal stuff
+
+
+
+ /* precalculate vertical scaler filter coefficients */
+ {
+ const int filterAlign=
+ (flags & SWS_CPU_CAPS_MMX) && (flags & SWS_ACCURATE_RND) ? 2 :
+ (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
+
+ initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
+ srcH , dstH, filterAlign, (1<<12)-4,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
+ srcFilter->lumV, dstFilter->lumV, c->param);
+ initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
+ c->chrSrcH, c->chrDstH, filterAlign, (1<<12)-4,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
+ srcFilter->chrV, dstFilter->chrV, c->param);
+
+#ifdef HAVE_ALTIVEC
+ c->vYCoeffsBank = av_malloc(sizeof (vector signed short)*c->vLumFilterSize*c->dstH);
+ c->vCCoeffsBank = av_malloc(sizeof (vector signed short)*c->vChrFilterSize*c->chrDstH);
+
+ for (i=0;i<c->vLumFilterSize*c->dstH;i++) {
+ int j;
+ short *p = (short *)&c->vYCoeffsBank[i];
+ for (j=0;j<8;j++)
+ p[j] = c->vLumFilter[i];
+ }
+
+ for (i=0;i<c->vChrFilterSize*c->chrDstH;i++) {
+ int j;
+ short *p = (short *)&c->vCCoeffsBank[i];
+ for (j=0;j<8;j++)
+ p[j] = c->vChrFilter[i];
+ }
+#endif
+ }
+
+ // Calculate Buffer Sizes so that they won't run out while handling these damn slices
+ c->vLumBufSize= c->vLumFilterSize;
+ c->vChrBufSize= c->vChrFilterSize;
+ for(i=0; i<dstH; i++)
+ {
+ int chrI= i*c->chrDstH / dstH;
+ int nextSlice= FFMAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1,
+ ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample));
+
+ nextSlice>>= c->chrSrcVSubSample;
+ nextSlice<<= c->chrSrcVSubSample;
+ if(c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice)
+ c->vLumBufSize= nextSlice - c->vLumFilterPos[i ];
+ if(c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>c->chrSrcVSubSample))
+ c->vChrBufSize= (nextSlice>>c->chrSrcVSubSample) - c->vChrFilterPos[chrI];
+ }
+
+ // allocate pixbufs (we use dynamic allocation because otherwise we would need to
+ c->lumPixBuf= av_malloc(c->vLumBufSize*2*sizeof(int16_t*));
+ c->chrPixBuf= av_malloc(c->vChrBufSize*2*sizeof(int16_t*));
+ //Note we need at least one pixel more at the end because of the mmx code (just in case someone wanna replace the 4000/8000)
+ /* align at 16 bytes for AltiVec */
+ for(i=0; i<c->vLumBufSize; i++)
+ c->lumPixBuf[i]= c->lumPixBuf[i+c->vLumBufSize]= av_malloc(4000);
+ for(i=0; i<c->vChrBufSize; i++)
+ c->chrPixBuf[i]= c->chrPixBuf[i+c->vChrBufSize]= av_malloc(8000);
+
+ //try to avoid drawing green stuff between the right end and the stride end
+ for(i=0; i<c->vLumBufSize; i++) memset(c->lumPixBuf[i], 0, 4000);
+ for(i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, 8000);
+
+ ASSERT(c->chrDstH <= dstH)
+
+ if(flags&SWS_PRINT_INFO)
+ {
+#ifdef DITHER1XBPP
+ char *dither= " dithered";
+#else
+ char *dither= "";
+#endif
+ if(flags&SWS_FAST_BILINEAR)
+ MSG_INFO("SwScaler: FAST_BILINEAR scaler, ");
+ else if(flags&SWS_BILINEAR)
+ MSG_INFO("SwScaler: BILINEAR scaler, ");
+ else if(flags&SWS_BICUBIC)
+ MSG_INFO("SwScaler: BICUBIC scaler, ");
+ else if(flags&SWS_X)
+ MSG_INFO("SwScaler: Experimental scaler, ");
+ else if(flags&SWS_POINT)
+ MSG_INFO("SwScaler: Nearest Neighbor / POINT scaler, ");
+ else if(flags&SWS_AREA)
+ MSG_INFO("SwScaler: Area Averageing scaler, ");
+ else if(flags&SWS_BICUBLIN)
+ MSG_INFO("SwScaler: luma BICUBIC / chroma BILINEAR scaler, ");
+ else if(flags&SWS_GAUSS)
+ MSG_INFO("SwScaler: Gaussian scaler, ");
+ else if(flags&SWS_SINC)
+ MSG_INFO("SwScaler: Sinc scaler, ");
+ else if(flags&SWS_LANCZOS)
+ MSG_INFO("SwScaler: Lanczos scaler, ");
+ else if(flags&SWS_SPLINE)
+ MSG_INFO("SwScaler: Bicubic spline scaler, ");
+ else
+ MSG_INFO("SwScaler: ehh flags invalid?! ");
+
+ if(dstFormat==PIX_FMT_BGR555 || dstFormat==PIX_FMT_BGR565)
+ MSG_INFO("from %s to%s %s ",
+ sws_format_name(srcFormat), dither, sws_format_name(dstFormat));
+ else
+ MSG_INFO("from %s to %s ",
+ sws_format_name(srcFormat), sws_format_name(dstFormat));
+
+ if(flags & SWS_CPU_CAPS_MMX2)
+ MSG_INFO("using MMX2\n");
+ else if(flags & SWS_CPU_CAPS_3DNOW)
+ MSG_INFO("using 3DNOW\n");
+ else if(flags & SWS_CPU_CAPS_MMX)
+ MSG_INFO("using MMX\n");
+ else if(flags & SWS_CPU_CAPS_ALTIVEC)
+ MSG_INFO("using AltiVec\n");
+ else
+ MSG_INFO("using C\n");
+ }
+
+ if(flags & SWS_PRINT_INFO)
+ {
+ if(flags & SWS_CPU_CAPS_MMX)
+ {
+ if(c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR))
+ MSG_V("SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n");
+ else
+ {
+ if(c->hLumFilterSize==4)
+ MSG_V("SwScaler: using 4-tap MMX scaler for horizontal luminance scaling\n");
+ else if(c->hLumFilterSize==8)
+ MSG_V("SwScaler: using 8-tap MMX scaler for horizontal luminance scaling\n");
+ else
+ MSG_V("SwScaler: using n-tap MMX scaler for horizontal luminance scaling\n");
+
+ if(c->hChrFilterSize==4)
+ MSG_V("SwScaler: using 4-tap MMX scaler for horizontal chrominance scaling\n");
+ else if(c->hChrFilterSize==8)
+ MSG_V("SwScaler: using 8-tap MMX scaler for horizontal chrominance scaling\n");
+ else
+ MSG_V("SwScaler: using n-tap MMX scaler for horizontal chrominance scaling\n");
+ }
+ }
+ else
+ {
+#if defined(ARCH_X86)
+ MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n");
+#else
+ if(flags & SWS_FAST_BILINEAR)
+ MSG_V("SwScaler: using FAST_BILINEAR C scaler for horizontal scaling\n");
+ else
+ MSG_V("SwScaler: using C scaler for horizontal scaling\n");
+#endif
+ }
+ if(isPlanarYUV(dstFormat))
+ {
+ if(c->vLumFilterSize==1)
+ MSG_V("SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else
+ MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ }
+ else
+ {
+ if(c->vLumFilterSize==1 && c->vChrFilterSize==2)
+ MSG_V("SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
+ "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",(flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else if(c->vLumFilterSize==2 && c->vChrFilterSize==2)
+ MSG_V("SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else
+ MSG_V("SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ }
+
+ if(dstFormat==PIX_FMT_BGR24)
+ MSG_V("SwScaler: using %s YV12->BGR24 Converter\n",
+ (flags & SWS_CPU_CAPS_MMX2) ? "MMX2" : ((flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"));
+ else if(dstFormat==PIX_FMT_RGB32)
+ MSG_V("SwScaler: using %s YV12->BGR32 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else if(dstFormat==PIX_FMT_BGR565)
+ MSG_V("SwScaler: using %s YV12->BGR16 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else if(dstFormat==PIX_FMT_BGR555)
+ MSG_V("SwScaler: using %s YV12->BGR15 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+
+ MSG_V("SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
+ }
+ if(flags & SWS_PRINT_INFO)
+ {
+ MSG_DBG2("SwScaler:Lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
+ c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
+ MSG_DBG2("SwScaler:Chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
+ c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc);
+ }
+
+ c->swScale= getSwsFunc(flags);
+ return c;
+}
+
+/**
+ * swscale warper, so we don't need to export the SwsContext.
+ * assumes planar YUV to be in YUV order instead of YVU
+ */
+int sws_scale_ordered(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) {
+ MSG_ERR("swScaler: slices start in the middle!\n");
+ return 0;
+ }
+ if (c->sliceDir == 0) {
+ if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1;
+ }
+
+ // copy strides, so they can safely be modified
+ if (c->sliceDir == 1) {
+ // slices go from top to bottom
+ int srcStride2[3]= {srcStride[0], srcStride[1], srcStride[2]};
+ int dstStride2[3]= {dstStride[0], dstStride[1], dstStride[2]};
+ return c->swScale(c, src, srcStride2, srcSliceY, srcSliceH, dst, dstStride2);
+ } else {
+ // slices go from bottom to top => we flip the image internally
+ uint8_t* src2[3]= {src[0] + (srcSliceH-1)*srcStride[0],
+ src[1] + ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[1],
+ src[2] + ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[2]
+ };
+ uint8_t* dst2[3]= {dst[0] + (c->dstH-1)*dstStride[0],
+ dst[1] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[1],
+ dst[2] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[2]};
+ int srcStride2[3]= {-srcStride[0], -srcStride[1], -srcStride[2]};
+ int dstStride2[3]= {-dstStride[0], -dstStride[1], -dstStride[2]};
+
+ return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2);
+ }
+}
+
+/**
+ * swscale warper, so we don't need to export the SwsContext
+ */
+int sws_scale(SwsContext *c, uint8_t* srcParam[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ uint8_t *src[3];
+ uint8_t *dst[3];
+ src[0] = srcParam[0]; src[1] = srcParam[1]; src[2] = srcParam[2];
+ dst[0] = dstParam[0]; dst[1] = dstParam[1]; dst[2] = dstParam[2];
+//printf("sws: slice %d %d\n", srcSliceY, srcSliceH);
+
+ return c->swScale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
+}
+
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+ float lumaSharpen, float chromaSharpen,
+ float chromaHShift, float chromaVShift,
+ int verbose)
+{
+ SwsFilter *filter= av_malloc(sizeof(SwsFilter));
+
+ if(lumaGBlur!=0.0){
+ filter->lumH= sws_getGaussianVec(lumaGBlur, 3.0);
+ filter->lumV= sws_getGaussianVec(lumaGBlur, 3.0);
+ }else{
+ filter->lumH= sws_getIdentityVec();
+ filter->lumV= sws_getIdentityVec();
+ }
+
+ if(chromaGBlur!=0.0){
+ filter->chrH= sws_getGaussianVec(chromaGBlur, 3.0);
+ filter->chrV= sws_getGaussianVec(chromaGBlur, 3.0);
+ }else{
+ filter->chrH= sws_getIdentityVec();
+ filter->chrV= sws_getIdentityVec();
+ }
+
+ if(chromaSharpen!=0.0){
+ SwsVector *id= sws_getIdentityVec();
+ sws_scaleVec(filter->chrH, -chromaSharpen);
+ sws_scaleVec(filter->chrV, -chromaSharpen);
+ sws_addVec(filter->chrH, id);
+ sws_addVec(filter->chrV, id);
+ sws_freeVec(id);
+ }
+
+ if(lumaSharpen!=0.0){
+ SwsVector *id= sws_getIdentityVec();
+ sws_scaleVec(filter->lumH, -lumaSharpen);
+ sws_scaleVec(filter->lumV, -lumaSharpen);
+ sws_addVec(filter->lumH, id);
+ sws_addVec(filter->lumV, id);
+ sws_freeVec(id);
+ }
+
+ if(chromaHShift != 0.0)
+ sws_shiftVec(filter->chrH, (int)(chromaHShift+0.5));
+
+ if(chromaVShift != 0.0)
+ sws_shiftVec(filter->chrV, (int)(chromaVShift+0.5));
+
+ sws_normalizeVec(filter->chrH, 1.0);
+ sws_normalizeVec(filter->chrV, 1.0);
+ sws_normalizeVec(filter->lumH, 1.0);
+ sws_normalizeVec(filter->lumV, 1.0);
+
+ if(verbose) sws_printVec(filter->chrH);
+ if(verbose) sws_printVec(filter->lumH);
+
+ return filter;
+}
+
+/**
+ * returns a normalized gaussian curve used to filter stuff
+ * quality=3 is high quality, lowwer is lowwer quality
+ */
+SwsVector *sws_getGaussianVec(double variance, double quality){
+ const int length= (int)(variance*quality + 0.5) | 1;
+ int i;
+ double *coeff= av_malloc(length*sizeof(double));
+ double middle= (length-1)*0.5;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for(i=0; i<length; i++)
+ {
+ double dist= i-middle;
+ coeff[i]= exp( -dist*dist/(2*variance*variance) ) / sqrt(2*variance*PI);
+ }
+
+ sws_normalizeVec(vec, 1.0);
+
+ return vec;
+}
+
+SwsVector *sws_getConstVec(double c, int length){
+ int i;
+ double *coeff= av_malloc(length*sizeof(double));
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for(i=0; i<length; i++)
+ coeff[i]= c;
+
+ return vec;
+}
+
+
+SwsVector *sws_getIdentityVec(void){
+ return sws_getConstVec(1.0, 1);
+}
+
+double sws_dcVec(SwsVector *a){
+ int i;
+ double sum=0;
+
+ for(i=0; i<a->length; i++)
+ sum+= a->coeff[i];
+
+ return sum;
+}
+
+void sws_scaleVec(SwsVector *a, double scalar){
+ int i;
+
+ for(i=0; i<a->length; i++)
+ a->coeff[i]*= scalar;
+}
+
+void sws_normalizeVec(SwsVector *a, double height){
+ sws_scaleVec(a, height/sws_dcVec(a));
+}
+
+static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b){
+ int length= a->length + b->length - 1;
+ double *coeff= av_malloc(length*sizeof(double));
+ int i, j;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for(i=0; i<length; i++) coeff[i]= 0.0;
+
+ for(i=0; i<a->length; i++)
+ {
+ for(j=0; j<b->length; j++)
+ {
+ coeff[i+j]+= a->coeff[i]*b->coeff[j];
+ }
+ }
+
+ return vec;
+}
+
+static SwsVector *sws_sumVec(SwsVector *a, SwsVector *b){
+ int length= FFMAX(a->length, b->length);
+ double *coeff= av_malloc(length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for(i=0; i<length; i++) coeff[i]= 0.0;
+
+ for(i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
+ for(i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]+= b->coeff[i];
+
+ return vec;
+}
+
+static SwsVector *sws_diffVec(SwsVector *a, SwsVector *b){
+ int length= FFMAX(a->length, b->length);
+ double *coeff= av_malloc(length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for(i=0; i<length; i++) coeff[i]= 0.0;
+
+ for(i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
+ for(i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]-= b->coeff[i];
+
+ return vec;
+}
+
+/* shift left / or right if "shift" is negative */
+static SwsVector *sws_getShiftedVec(SwsVector *a, int shift){
+ int length= a->length + FFABS(shift)*2;
+ double *coeff= av_malloc(length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= length;
+
+ for(i=0; i<length; i++) coeff[i]= 0.0;
+
+ for(i=0; i<a->length; i++)
+ {
+ coeff[i + (length-1)/2 - (a->length-1)/2 - shift]= a->coeff[i];
+ }
+
+ return vec;
+}
+
+void sws_shiftVec(SwsVector *a, int shift){
+ SwsVector *shifted= sws_getShiftedVec(a, shift);
+ av_free(a->coeff);
+ a->coeff= shifted->coeff;
+ a->length= shifted->length;
+ av_free(shifted);
+}
+
+void sws_addVec(SwsVector *a, SwsVector *b){
+ SwsVector *sum= sws_sumVec(a, b);
+ av_free(a->coeff);
+ a->coeff= sum->coeff;
+ a->length= sum->length;
+ av_free(sum);
+}
+
+void sws_subVec(SwsVector *a, SwsVector *b){
+ SwsVector *diff= sws_diffVec(a, b);
+ av_free(a->coeff);
+ a->coeff= diff->coeff;
+ a->length= diff->length;
+ av_free(diff);
+}
+
+void sws_convVec(SwsVector *a, SwsVector *b){
+ SwsVector *conv= sws_getConvVec(a, b);
+ av_free(a->coeff);
+ a->coeff= conv->coeff;
+ a->length= conv->length;
+ av_free(conv);
+}
+
+SwsVector *sws_cloneVec(SwsVector *a){
+ double *coeff= av_malloc(a->length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
+
+ vec->coeff= coeff;
+ vec->length= a->length;
+
+ for(i=0; i<a->length; i++) coeff[i]= a->coeff[i];
+
+ return vec;
+}
+
+void sws_printVec(SwsVector *a){
+ int i;
+ double max=0;
+ double min=0;
+ double range;
+
+ for(i=0; i<a->length; i++)
+ if(a->coeff[i]>max) max= a->coeff[i];
+
+ for(i=0; i<a->length; i++)
+ if(a->coeff[i]<min) min= a->coeff[i];
+
+ range= max - min;
+
+ for(i=0; i<a->length; i++)
+ {
+ int x= (int)((a->coeff[i]-min)*60.0/range +0.5);
+ MSG_DBG2("%1.3f ", a->coeff[i]);
+ for(;x>0; x--) MSG_DBG2(" ");
+ MSG_DBG2("|\n");
+ }
+}
+
+void sws_freeVec(SwsVector *a){
+ if(!a) return;
+ av_free(a->coeff);
+ a->coeff=NULL;
+ a->length=0;
+ av_free(a);
+}
+
+void sws_freeFilter(SwsFilter *filter){
+ if(!filter) return;
+
+ if(filter->lumH) sws_freeVec(filter->lumH);
+ if(filter->lumV) sws_freeVec(filter->lumV);
+ if(filter->chrH) sws_freeVec(filter->chrH);
+ if(filter->chrV) sws_freeVec(filter->chrV);
+ av_free(filter);
+}
+
+
+void sws_freeContext(SwsContext *c){
+ int i;
+ if(!c) return;
+
+ if(c->lumPixBuf)
+ {
+ for(i=0; i<c->vLumBufSize; i++)
+ {
+ av_free(c->lumPixBuf[i]);
+ c->lumPixBuf[i]=NULL;
+ }
+ av_free(c->lumPixBuf);
+ c->lumPixBuf=NULL;
+ }
+
+ if(c->chrPixBuf)
+ {
+ for(i=0; i<c->vChrBufSize; i++)
+ {
+ av_free(c->chrPixBuf[i]);
+ c->chrPixBuf[i]=NULL;
+ }
+ av_free(c->chrPixBuf);
+ c->chrPixBuf=NULL;
+ }
+
+ av_free(c->vLumFilter);
+ c->vLumFilter = NULL;
+ av_free(c->vChrFilter);
+ c->vChrFilter = NULL;
+ av_free(c->hLumFilter);
+ c->hLumFilter = NULL;
+ av_free(c->hChrFilter);
+ c->hChrFilter = NULL;
+#ifdef HAVE_ALTIVEC
+ av_free(c->vYCoeffsBank);
+ c->vYCoeffsBank = NULL;
+ av_free(c->vCCoeffsBank);
+ c->vCCoeffsBank = NULL;
+#endif
+
+ av_free(c->vLumFilterPos);
+ c->vLumFilterPos = NULL;
+ av_free(c->vChrFilterPos);
+ c->vChrFilterPos = NULL;
+ av_free(c->hLumFilterPos);
+ c->hLumFilterPos = NULL;
+ av_free(c->hChrFilterPos);
+ c->hChrFilterPos = NULL;
+
+#if defined(ARCH_X86)
+#ifdef MAP_ANONYMOUS
+ if(c->funnyYCode) munmap(c->funnyYCode, MAX_FUNNY_CODE_SIZE);
+ if(c->funnyUVCode) munmap(c->funnyUVCode, MAX_FUNNY_CODE_SIZE);
+#else
+ av_free(c->funnyYCode);
+ av_free(c->funnyUVCode);
+#endif
+ c->funnyYCode=NULL;
+ c->funnyUVCode=NULL;
+#endif /* defined(ARCH_X86) */
+
+ av_free(c->lumMmx2Filter);
+ c->lumMmx2Filter=NULL;
+ av_free(c->chrMmx2Filter);
+ c->chrMmx2Filter=NULL;
+ av_free(c->lumMmx2FilterPos);
+ c->lumMmx2FilterPos=NULL;
+ av_free(c->chrMmx2FilterPos);
+ c->chrMmx2FilterPos=NULL;
+ av_free(c->yuvTable);
+ c->yuvTable=NULL;
+
+ av_free(c);
+}
+
+/**
+ * Checks if context is valid or reallocs a new one instead.
+ * If context is NULL, just calls sws_getContext() to get a new one.
+ * Otherwise, checks if the parameters are the same already saved in context.
+ * If that is the case, returns the current context.
+ * Otherwise, frees context and gets a new one.
+ *
+ * Be warned that srcFilter, dstFilter are not checked, they are
+ * asumed to remain valid.
+ */
+struct SwsContext *sws_getCachedContext(struct SwsContext *context,
+ int srcW, int srcH, int srcFormat,
+ int dstW, int dstH, int dstFormat, int flags,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param)
+{
+ if (context != NULL) {
+ if ((context->srcW != srcW) || (context->srcH != srcH) ||
+ (context->srcFormat != srcFormat) ||
+ (context->dstW != dstW) || (context->dstH != dstH) ||
+ (context->dstFormat != dstFormat) || (context->flags != flags) ||
+ (context->param != param))
+ {
+ sws_freeContext(context);
+ context = NULL;
+ }
+ }
+ if (context == NULL) {
+ return sws_getContext(srcW, srcH, srcFormat,
+ dstW, dstH, dstFormat, flags,
+ srcFilter, dstFilter, param);
+ }
+ return context;
+}
+
diff --git a/contrib/ffmpeg/libswscale/swscale.h b/contrib/ffmpeg/libswscale/swscale.h
new file mode 100644
index 000000000..06088b8e4
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/swscale.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWSCALE_H
+#define SWSCALE_H
+
+/**
+ * @file swscale.h
+ * @brief
+ * external api for the swscale stuff
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define LIBSWSCALE_VERSION_INT ((0<<16)+(5<<8)+0)
+#define LIBSWSCALE_VERSION 0.5.0
+#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT
+
+#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION)
+
+/* values for the flags, the stuff on the command line is different */
+#define SWS_FAST_BILINEAR 1
+#define SWS_BILINEAR 2
+#define SWS_BICUBIC 4
+#define SWS_X 8
+#define SWS_POINT 0x10
+#define SWS_AREA 0x20
+#define SWS_BICUBLIN 0x40
+#define SWS_GAUSS 0x80
+#define SWS_SINC 0x100
+#define SWS_LANCZOS 0x200
+#define SWS_SPLINE 0x400
+
+#define SWS_SRC_V_CHR_DROP_MASK 0x30000
+#define SWS_SRC_V_CHR_DROP_SHIFT 16
+
+#define SWS_PARAM_DEFAULT 123456
+
+#define SWS_PRINT_INFO 0x1000
+
+//the following 3 flags are not completly implemented
+//internal chrominace subsamling info
+#define SWS_FULL_CHR_H_INT 0x2000
+//input subsampling info
+#define SWS_FULL_CHR_H_INP 0x4000
+#define SWS_DIRECT_BGR 0x8000
+#define SWS_ACCURATE_RND 0x40000
+
+#define SWS_CPU_CAPS_MMX 0x80000000
+#define SWS_CPU_CAPS_MMX2 0x20000000
+#define SWS_CPU_CAPS_3DNOW 0x40000000
+#define SWS_CPU_CAPS_ALTIVEC 0x10000000
+
+#define SWS_MAX_REDUCE_CUTOFF 0.002
+
+#define SWS_CS_ITU709 1
+#define SWS_CS_FCC 4
+#define SWS_CS_ITU601 5
+#define SWS_CS_ITU624 5
+#define SWS_CS_SMPTE170M 5
+#define SWS_CS_SMPTE240M 7
+#define SWS_CS_DEFAULT 5
+
+
+
+// when used for filters they must have an odd number of elements
+// coeffs cannot be shared between vectors
+typedef struct {
+ double *coeff;
+ int length;
+} SwsVector;
+
+// vectors can be shared
+typedef struct {
+ SwsVector *lumH;
+ SwsVector *lumV;
+ SwsVector *chrH;
+ SwsVector *chrV;
+} SwsFilter;
+
+struct SwsContext;
+
+void sws_freeContext(struct SwsContext *swsContext);
+
+struct SwsContext *sws_getContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, int flags,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param);
+int sws_scale(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]);
+int sws_scale_ordered(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]);
+
+
+int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation);
+int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation);
+SwsVector *sws_getGaussianVec(double variance, double quality);
+SwsVector *sws_getConstVec(double c, int length);
+SwsVector *sws_getIdentityVec(void);
+void sws_scaleVec(SwsVector *a, double scalar);
+void sws_normalizeVec(SwsVector *a, double height);
+void sws_convVec(SwsVector *a, SwsVector *b);
+void sws_addVec(SwsVector *a, SwsVector *b);
+void sws_subVec(SwsVector *a, SwsVector *b);
+void sws_shiftVec(SwsVector *a, int shift);
+SwsVector *sws_cloneVec(SwsVector *a);
+
+void sws_printVec(SwsVector *a);
+void sws_freeVec(SwsVector *a);
+
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+ float lumaSarpen, float chromaSharpen,
+ float chromaHShift, float chromaVShift,
+ int verbose);
+void sws_freeFilter(SwsFilter *filter);
+
+struct SwsContext *sws_getCachedContext(struct SwsContext *context,
+ int srcW, int srcH, int srcFormat,
+ int dstW, int dstH, int dstFormat, int flags,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/ffmpeg/libswscale/swscale_altivec_template.c b/contrib/ffmpeg/libswscale/swscale_altivec_template.c
new file mode 100644
index 000000000..d65c28538
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/swscale_altivec_template.c
@@ -0,0 +1,548 @@
+/*
+ * AltiVec-enhanced yuv2yuvX
+ *
+ * Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
+ * based on the equivalent C code in "postproc/swscale.c"
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifdef CONFIG_DARWIN
+#define AVV(x...) (x)
+#else
+#define AVV(x...) {x}
+#endif
+
+#define vzero vec_splat_s32(0)
+
+static inline void
+altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) {
+ register int i;
+ vector unsigned int altivec_vectorShiftInt19 =
+ vec_add(vec_splat_u32(10),vec_splat_u32(9));
+ if ((unsigned long)dest % 16) {
+ /* badly aligned store, we force store alignement */
+ /* and will handle load misalignement on val w/ vec_perm */
+ vector unsigned char perm1;
+ vector signed int v1;
+ for (i = 0 ; (i < dstW) &&
+ (((unsigned long)dest + i) % 16) ; i++) {
+ int t = val[i] >> 19;
+ dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
+ }
+ perm1 = vec_lvsl(i << 2, val);
+ v1 = vec_ld(i << 2, val);
+ for ( ; i < (dstW - 15); i+=16) {
+ int offset = i << 2;
+ vector signed int v2 = vec_ld(offset + 16, val);
+ vector signed int v3 = vec_ld(offset + 32, val);
+ vector signed int v4 = vec_ld(offset + 48, val);
+ vector signed int v5 = vec_ld(offset + 64, val);
+ vector signed int v12 = vec_perm(v1,v2,perm1);
+ vector signed int v23 = vec_perm(v2,v3,perm1);
+ vector signed int v34 = vec_perm(v3,v4,perm1);
+ vector signed int v45 = vec_perm(v4,v5,perm1);
+
+ vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
+ vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
+ vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19);
+ vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19);
+ vector unsigned short vs1 = vec_packsu(vA, vB);
+ vector unsigned short vs2 = vec_packsu(vC, vD);
+ vector unsigned char vf = vec_packsu(vs1, vs2);
+ vec_st(vf, i, dest);
+ v1 = v5;
+ }
+ } else { // dest is properly aligned, great
+ for (i = 0; i < (dstW - 15); i+=16) {
+ int offset = i << 2;
+ vector signed int v1 = vec_ld(offset, val);
+ vector signed int v2 = vec_ld(offset + 16, val);
+ vector signed int v3 = vec_ld(offset + 32, val);
+ vector signed int v4 = vec_ld(offset + 48, val);
+ vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19);
+ vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19);
+ vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19);
+ vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19);
+ vector unsigned short vs1 = vec_packsu(v5, v6);
+ vector unsigned short vs2 = vec_packsu(v7, v8);
+ vector unsigned char vf = vec_packsu(vs1, vs2);
+ vec_st(vf, i, dest);
+ }
+ }
+ for ( ; i < dstW ; i++) {
+ int t = val[i] >> 19;
+ dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
+ }
+}
+
+static inline void
+yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+{
+ const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
+ register int i, j;
+ {
+ int __attribute__ ((aligned (16))) val[dstW];
+
+ for (i = 0; i < (dstW -7); i+=4) {
+ vec_st(vini, i << 2, val);
+ }
+ for (; i < dstW; i++) {
+ val[i] = (1 << 18);
+ }
+
+ for (j = 0; j < lumFilterSize; j++) {
+ vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter);
+ vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter);
+ vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
+ vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter
+
+ perm = vec_lvsl(0, lumSrc[j]);
+ l1 = vec_ld(0, lumSrc[j]);
+
+ for (i = 0; i < (dstW - 7); i+=8) {
+ int offset = i << 2;
+ vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]);
+
+ vector signed int v1 = vec_ld(offset, val);
+ vector signed int v2 = vec_ld(offset + 16, val);
+
+ vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7]
+
+ vector signed int i1 = vec_mule(vLumFilter, ls);
+ vector signed int i2 = vec_mulo(vLumFilter, ls);
+
+ vector signed int vf1 = vec_mergeh(i1, i2);
+ vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j]
+
+ vector signed int vo1 = vec_add(v1, vf1);
+ vector signed int vo2 = vec_add(v2, vf2);
+
+ vec_st(vo1, offset, val);
+ vec_st(vo2, offset + 16, val);
+
+ l1 = l2;
+ }
+ for ( ; i < dstW; i++) {
+ val[i] += lumSrc[j][i] * lumFilter[j];
+ }
+ }
+ altivec_packIntArrayToCharArray(val,dest,dstW);
+ }
+ if (uDest != 0) {
+ int __attribute__ ((aligned (16))) u[chrDstW];
+ int __attribute__ ((aligned (16))) v[chrDstW];
+
+ for (i = 0; i < (chrDstW -7); i+=4) {
+ vec_st(vini, i << 2, u);
+ vec_st(vini, i << 2, v);
+ }
+ for (; i < chrDstW; i++) {
+ u[i] = (1 << 18);
+ v[i] = (1 << 18);
+ }
+
+ for (j = 0; j < chrFilterSize; j++) {
+ vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter);
+ vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter);
+ vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0);
+ vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter
+
+ perm = vec_lvsl(0, chrSrc[j]);
+ l1 = vec_ld(0, chrSrc[j]);
+ l1_V = vec_ld(2048 << 1, chrSrc[j]);
+
+ for (i = 0; i < (chrDstW - 7); i+=8) {
+ int offset = i << 2;
+ vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]);
+ vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]);
+
+ vector signed int v1 = vec_ld(offset, u);
+ vector signed int v2 = vec_ld(offset + 16, u);
+ vector signed int v1_V = vec_ld(offset, v);
+ vector signed int v2_V = vec_ld(offset + 16, v);
+
+ vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7]
+ vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055]
+
+ vector signed int i1 = vec_mule(vChrFilter, ls);
+ vector signed int i2 = vec_mulo(vChrFilter, ls);
+ vector signed int i1_V = vec_mule(vChrFilter, ls_V);
+ vector signed int i2_V = vec_mulo(vChrFilter, ls_V);
+
+ vector signed int vf1 = vec_mergeh(i1, i2);
+ vector signed int vf2 = vec_mergel(i1, i2); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
+ vector signed int vf1_V = vec_mergeh(i1_V, i2_V);
+ vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
+
+ vector signed int vo1 = vec_add(v1, vf1);
+ vector signed int vo2 = vec_add(v2, vf2);
+ vector signed int vo1_V = vec_add(v1_V, vf1_V);
+ vector signed int vo2_V = vec_add(v2_V, vf2_V);
+
+ vec_st(vo1, offset, u);
+ vec_st(vo2, offset + 16, u);
+ vec_st(vo1_V, offset, v);
+ vec_st(vo2_V, offset + 16, v);
+
+ l1 = l2;
+ l1_V = l2_V;
+ }
+ for ( ; i < chrDstW; i++) {
+ u[i] += chrSrc[j][i] * chrFilter[j];
+ v[i] += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+ }
+ altivec_packIntArrayToCharArray(u,uDest,chrDstW);
+ altivec_packIntArrayToCharArray(v,vDest,chrDstW);
+ }
+}
+
+static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) {
+ register int i;
+ int __attribute__ ((aligned (16))) tempo[4];
+
+ if (filterSize % 4) {
+ for(i=0; i<dstW; i++) {
+ register int j;
+ register int srcPos = filterPos[i];
+ register int val = 0;
+ for(j=0; j<filterSize; j++) {
+ val += ((int)src[srcPos + j])*filter[filterSize*i + j];
+ }
+ dst[i] = FFMIN(FFMAX(0, val>>7), (1<<15)-1);
+ }
+ }
+ else
+ switch (filterSize) {
+ case 4:
+ {
+ for(i=0; i<dstW; i++) {
+ register int srcPos = filterPos[i];
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char src_v1, src_vF;
+ vector signed short src_v, filter_v;
+ vector signed int val_vEven, val_s;
+ if ((((int)src + srcPos)% 16) > 12) {
+ src_v1 = vec_ld(srcPos + 16, src);
+ }
+ src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+ src_v = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ // now put our elements in the even slots
+ src_v = vec_mergeh(src_v, (vector signed short)vzero);
+
+ filter_v = vec_ld(i << 3, filter);
+ // the 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2)
+
+ // the neat trick : we only care for half the elements,
+ // high or low depending on (i<<3)%16 (it's 0 or 8 here),
+ // and we're going to use vec_mule, so we chose
+ // carefully how to "unpack" the elements into the even slots
+ if ((i << 3) % 16)
+ filter_v = vec_mergel(filter_v,(vector signed short)vzero);
+ else
+ filter_v = vec_mergeh(filter_v,(vector signed short)vzero);
+
+ val_vEven = vec_mule(src_v, filter_v);
+ val_s = vec_sums(val_vEven, vzero);
+ vec_st(val_s, 0, tempo);
+ dst[i] = FFMIN(FFMAX(0, tempo[3]>>7), (1<<15)-1);
+ }
+ }
+ break;
+
+ case 8:
+ {
+ for(i=0; i<dstW; i++) {
+ register int srcPos = filterPos[i];
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char src_v1, src_vF;
+ vector signed short src_v, filter_v;
+ vector signed int val_v, val_s;
+ if ((((int)src + srcPos)% 16) > 8) {
+ src_v1 = vec_ld(srcPos + 16, src);
+ }
+ src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+ src_v = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ filter_v = vec_ld(i << 4, filter);
+ // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2)
+
+ val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
+ val_s = vec_sums(val_v, vzero);
+ vec_st(val_s, 0, tempo);
+ dst[i] = FFMIN(FFMAX(0, tempo[3]>>7), (1<<15)-1);
+ }
+ }
+ break;
+
+ case 16:
+ {
+ for(i=0; i<dstW; i++) {
+ register int srcPos = filterPos[i];
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char src_v1 = vec_ld(srcPos + 16, src);
+ vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+ vector signed short src_vA = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ vector signed short src_vB = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+
+ vector signed short filter_v0 = vec_ld(i << 5, filter);
+ vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
+ // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
+
+ vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
+ vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
+
+ vector signed int val_s = vec_sums(val_v, vzero);
+
+ vec_st(val_s, 0, tempo);
+ dst[i] = FFMIN(FFMAX(0, tempo[3]>>7), (1<<15)-1);
+ }
+ }
+ break;
+
+ default:
+ {
+ for(i=0; i<dstW; i++) {
+ register int j;
+ register int srcPos = filterPos[i];
+
+ vector signed int val_s, val_v = (vector signed int)vzero;
+ vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter);
+ vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter);
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char permS = vec_lvsl(srcPos, src);
+
+ for (j = 0 ; j < filterSize - 15; j += 16) {
+ vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src);
+ vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
+
+ vector signed short src_vA = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ vector signed short src_vB = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+
+ vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+ vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter);
+ vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF);
+ vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF);
+
+ vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
+ val_v = vec_msums(src_vB, filter_v1, val_acc);
+
+ filter_v0R = filter_v2R;
+ src_v0 = src_v1;
+ }
+
+ if (j < (filterSize-7)) {
+ // loading src_v0 is useless, it's already done above
+ //vector unsigned char src_v0 = vec_ld(srcPos + j, src);
+ vector unsigned char src_v1, src_vF;
+ vector signed short src_v, filter_v1R, filter_v;
+ if ((((int)src + srcPos)% 16) > 8) {
+ src_v1 = vec_ld(srcPos + j + 16, src);
+ }
+ src_vF = vec_perm(src_v0, src_v1, permS);
+
+ src_v = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ // loading filter_v0R is useless, it's already done above
+ //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
+ filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+ filter_v = vec_perm(filter_v0R, filter_v1R, permF);
+
+ val_v = vec_msums(src_v, filter_v, val_v);
+ }
+
+ val_s = vec_sums(val_v, vzero);
+
+ vec_st(val_s, 0, tempo);
+ dst[i] = FFMIN(FFMAX(0, tempo[3]>>7), (1<<15)-1);
+ }
+
+ }
+ }
+}
+
+static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
+ uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
+ // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+ uint8_t *ysrc = src[0];
+ uint8_t *usrc = src[1];
+ uint8_t *vsrc = src[2];
+ const int width = c->srcW;
+ const int height = srcSliceH;
+ const int lumStride = srcStride[0];
+ const int chromStride = srcStride[1];
+ const int dstStride = dstStride_a[0];
+ const vector unsigned char yperm = vec_lvsl(0, ysrc);
+ const int vertLumPerChroma = 2;
+ register unsigned int y;
+
+ if(width&15){
+ yv12toyuy2( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride);
+ return srcSliceH;
+ }
+
+ /* this code assume:
+
+ 1) dst is 16 bytes-aligned
+ 2) dstStride is a multiple of 16
+ 3) width is a multiple of 16
+ 4) lum&chrom stride are multiple of 8
+ */
+
+ for(y=0; y<height; y++)
+ {
+ int i;
+ for (i = 0; i < width - 31; i+= 32) {
+ const unsigned int j = i >> 1;
+ vector unsigned char v_yA = vec_ld(i, ysrc);
+ vector unsigned char v_yB = vec_ld(i + 16, ysrc);
+ vector unsigned char v_yC = vec_ld(i + 32, ysrc);
+ vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
+ vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
+ vector unsigned char v_uA = vec_ld(j, usrc);
+ vector unsigned char v_uB = vec_ld(j + 16, usrc);
+ vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
+ vector unsigned char v_vA = vec_ld(j, vsrc);
+ vector unsigned char v_vB = vec_ld(j + 16, vsrc);
+ vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
+ vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+ vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
+ vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
+ vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
+ vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
+ vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
+ vec_st(v_yuy2_0, (i << 1), dst);
+ vec_st(v_yuy2_1, (i << 1) + 16, dst);
+ vec_st(v_yuy2_2, (i << 1) + 32, dst);
+ vec_st(v_yuy2_3, (i << 1) + 48, dst);
+ }
+ if (i < width) {
+ const unsigned int j = i >> 1;
+ vector unsigned char v_y1 = vec_ld(i, ysrc);
+ vector unsigned char v_u = vec_ld(j, usrc);
+ vector unsigned char v_v = vec_ld(j, vsrc);
+ vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+ vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
+ vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
+ vec_st(v_yuy2_0, (i << 1), dst);
+ vec_st(v_yuy2_1, (i << 1) + 16, dst);
+ }
+ if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
+
+ return srcSliceH;
+}
+
+static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
+ uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
+ // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+ uint8_t *ysrc = src[0];
+ uint8_t *usrc = src[1];
+ uint8_t *vsrc = src[2];
+ const int width = c->srcW;
+ const int height = srcSliceH;
+ const int lumStride = srcStride[0];
+ const int chromStride = srcStride[1];
+ const int dstStride = dstStride_a[0];
+ const int vertLumPerChroma = 2;
+ const vector unsigned char yperm = vec_lvsl(0, ysrc);
+ register unsigned int y;
+
+ if(width&15){
+ yv12touyvy( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride);
+ return srcSliceH;
+ }
+
+ /* this code assume:
+
+ 1) dst is 16 bytes-aligned
+ 2) dstStride is a multiple of 16
+ 3) width is a multiple of 16
+ 4) lum&chrom stride are multiple of 8
+ */
+
+ for(y=0; y<height; y++)
+ {
+ int i;
+ for (i = 0; i < width - 31; i+= 32) {
+ const unsigned int j = i >> 1;
+ vector unsigned char v_yA = vec_ld(i, ysrc);
+ vector unsigned char v_yB = vec_ld(i + 16, ysrc);
+ vector unsigned char v_yC = vec_ld(i + 32, ysrc);
+ vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
+ vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
+ vector unsigned char v_uA = vec_ld(j, usrc);
+ vector unsigned char v_uB = vec_ld(j + 16, usrc);
+ vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
+ vector unsigned char v_vA = vec_ld(j, vsrc);
+ vector unsigned char v_vB = vec_ld(j + 16, vsrc);
+ vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
+ vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+ vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
+ vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
+ vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
+ vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2);
+ vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2);
+ vec_st(v_uyvy_0, (i << 1), dst);
+ vec_st(v_uyvy_1, (i << 1) + 16, dst);
+ vec_st(v_uyvy_2, (i << 1) + 32, dst);
+ vec_st(v_uyvy_3, (i << 1) + 48, dst);
+ }
+ if (i < width) {
+ const unsigned int j = i >> 1;
+ vector unsigned char v_y1 = vec_ld(i, ysrc);
+ vector unsigned char v_u = vec_ld(j, usrc);
+ vector unsigned char v_v = vec_ld(j, vsrc);
+ vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+ vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
+ vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
+ vec_st(v_uyvy_0, (i << 1), dst);
+ vec_st(v_uyvy_1, (i << 1) + 16, dst);
+ }
+ if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
+ return srcSliceH;
+}
diff --git a/contrib/ffmpeg/libswscale/swscale_internal.h b/contrib/ffmpeg/libswscale/swscale_internal.h
new file mode 100644
index 000000000..837b6eaf5
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/swscale_internal.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWSCALE_INTERNAL_H
+#define SWSCALE_INTERNAL_H
+
+#ifdef HAVE_ALTIVEC_H
+#include <altivec.h>
+#endif
+
+#include "avutil.h"
+
+#ifdef CONFIG_DARWIN
+#define AVV(x...) (x)
+#else
+#define AVV(x...) {x}
+#endif
+
+#define MSG_WARN(args...) av_log(NULL, AV_LOG_DEBUG, ##args )
+#define MSG_FATAL(args...) av_log(NULL, AV_LOG_ERROR, ##args )
+#define MSG_ERR(args...) av_log(NULL, AV_LOG_ERROR, ##args )
+#define MSG_V(args...) av_log(NULL, AV_LOG_INFO, ##args )
+#define MSG_DBG2(args...) av_log(NULL, AV_LOG_DEBUG, ##args )
+#define MSG_INFO(args...) av_log(NULL, AV_LOG_INFO, ##args )
+
+#define MAX_FILTER_SIZE 256
+
+typedef int (*SwsFunc)(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]);
+
+/* this struct should be aligned on at least 32-byte boundary */
+typedef struct SwsContext{
+ /**
+ *
+ * Note the src,dst,srcStride,dstStride will be copied, in the sws_scale() warper so they can freely be modified here
+ */
+ SwsFunc swScale;
+ int srcW, srcH, dstH;
+ int chrSrcW, chrSrcH, chrDstW, chrDstH;
+ int lumXInc, chrXInc;
+ int lumYInc, chrYInc;
+ int dstFormat, srcFormat; ///< format 4:2:0 type is allways YV12
+ int origDstFormat, origSrcFormat; ///< format
+ int chrSrcHSubSample, chrSrcVSubSample;
+ int chrIntHSubSample, chrIntVSubSample;
+ int chrDstHSubSample, chrDstVSubSample;
+ int vChrDrop;
+ int sliceDir;
+ double param[2];
+
+ int16_t **lumPixBuf;
+ int16_t **chrPixBuf;
+ int16_t *hLumFilter;
+ int16_t *hLumFilterPos;
+ int16_t *hChrFilter;
+ int16_t *hChrFilterPos;
+ int16_t *vLumFilter;
+ int16_t *vLumFilterPos;
+ int16_t *vChrFilter;
+ int16_t *vChrFilterPos;
+
+ uint8_t formatConvBuffer[4000]; //FIXME dynamic alloc, but we have to change alot of code for this to be usefull
+
+ int hLumFilterSize;
+ int hChrFilterSize;
+ int vLumFilterSize;
+ int vChrFilterSize;
+ int vLumBufSize;
+ int vChrBufSize;
+
+ uint8_t *funnyYCode;
+ uint8_t *funnyUVCode;
+ int32_t *lumMmx2FilterPos;
+ int32_t *chrMmx2FilterPos;
+ int16_t *lumMmx2Filter;
+ int16_t *chrMmx2Filter;
+
+ int canMMX2BeUsed;
+
+ int lastInLumBuf;
+ int lastInChrBuf;
+ int lumBufIndex;
+ int chrBufIndex;
+ int dstY;
+ int flags;
+ void * yuvTable; // pointer to the yuv->rgb table start so it can be freed()
+ void * table_rV[256];
+ void * table_gU[256];
+ int table_gV[256];
+ void * table_bU[256];
+
+ //Colorspace stuff
+ int contrast, brightness, saturation; // for sws_getColorspaceDetails
+ int srcColorspaceTable[4];
+ int dstColorspaceTable[4];
+ int srcRange, dstRange;
+
+#define RED_DITHER "0*8"
+#define GREEN_DITHER "1*8"
+#define BLUE_DITHER "2*8"
+#define Y_COEFF "3*8"
+#define VR_COEFF "4*8"
+#define UB_COEFF "5*8"
+#define VG_COEFF "6*8"
+#define UG_COEFF "7*8"
+#define Y_OFFSET "8*8"
+#define U_OFFSET "9*8"
+#define V_OFFSET "10*8"
+#define LUM_MMX_FILTER_OFFSET "11*8"
+#define CHR_MMX_FILTER_OFFSET "11*8+4*4*256"
+#define DSTW_OFFSET "11*8+4*4*256*2" //do not change, its hardcoded in the asm
+#define ESP_OFFSET "11*8+4*4*256*2+8"
+#define VROUNDER_OFFSET "11*8+4*4*256*2+16"
+#define U_TEMP "11*8+4*4*256*2+24"
+#define V_TEMP "11*8+4*4*256*2+32"
+
+ uint64_t redDither __attribute__((aligned(8)));
+ uint64_t greenDither __attribute__((aligned(8)));
+ uint64_t blueDither __attribute__((aligned(8)));
+
+ uint64_t yCoeff __attribute__((aligned(8)));
+ uint64_t vrCoeff __attribute__((aligned(8)));
+ uint64_t ubCoeff __attribute__((aligned(8)));
+ uint64_t vgCoeff __attribute__((aligned(8)));
+ uint64_t ugCoeff __attribute__((aligned(8)));
+ uint64_t yOffset __attribute__((aligned(8)));
+ uint64_t uOffset __attribute__((aligned(8)));
+ uint64_t vOffset __attribute__((aligned(8)));
+ int32_t lumMmxFilter[4*MAX_FILTER_SIZE];
+ int32_t chrMmxFilter[4*MAX_FILTER_SIZE];
+ int dstW;
+ uint64_t esp __attribute__((aligned(8)));
+ uint64_t vRounder __attribute__((aligned(8)));
+ uint64_t u_temp __attribute__((aligned(8)));
+ uint64_t v_temp __attribute__((aligned(8)));
+
+#ifdef HAVE_ALTIVEC
+
+ vector signed short CY;
+ vector signed short CRV;
+ vector signed short CBU;
+ vector signed short CGU;
+ vector signed short CGV;
+ vector signed short OY;
+ vector unsigned short CSHIFT;
+ vector signed short *vYCoeffsBank, *vCCoeffsBank;
+
+#endif
+
+} SwsContext;
+//FIXME check init (where 0)
+
+SwsFunc yuv2rgb_get_func_ptr (SwsContext *c);
+int yuv2rgb_c_init_tables (SwsContext *c, const int inv_table[4], int fullRange, int brightness, int contrast, int saturation);
+
+char *sws_format_name(int format);
+
+//FIXME replace this with something faster
+#define isPlanarYUV(x) ((x)==PIX_FMT_YUV410P || (x)==PIX_FMT_YUV420P \
+ || (x)==PIX_FMT_YUV411P || (x)==PIX_FMT_YUV422P \
+ || (x)==PIX_FMT_YUV444P || (x)==PIX_FMT_NV12 \
+ || (x)==PIX_FMT_NV21)
+#define isYUV(x) ((x)==PIX_FMT_UYVY422 || (x)==PIX_FMT_YUYV422 || isPlanarYUV(x))
+#define isGray(x) ((x)==PIX_FMT_GRAY8 || (x)==PIX_FMT_GRAY16BE || (x)==PIX_FMT_GRAY16LE)
+#define isGray16(x) ((x)==PIX_FMT_GRAY16BE || (x)==PIX_FMT_GRAY16LE)
+#define isRGB(x) ((x)==PIX_FMT_BGR32 || (x)==PIX_FMT_RGB24 \
+ || (x)==PIX_FMT_RGB565 || (x)==PIX_FMT_RGB555 \
+ || (x)==PIX_FMT_RGB8 || (x)==PIX_FMT_RGB4 \
+ || (x)==PIX_FMT_MONOBLACK)
+#define isBGR(x) ((x)==PIX_FMT_RGB32 || (x)==PIX_FMT_BGR24 \
+ || (x)==PIX_FMT_BGR565 || (x)==PIX_FMT_BGR555 \
+ || (x)==PIX_FMT_BGR8 || (x)==PIX_FMT_BGR4 \
+ || (x)==PIX_FMT_MONOBLACK)
+
+static inline int fmt_depth(int fmt)
+{
+ switch(fmt) {
+ case PIX_FMT_BGRA:
+ case PIX_FMT_ABGR:
+ case PIX_FMT_RGBA:
+ case PIX_FMT_ARGB:
+ return 32;
+ case PIX_FMT_BGR24:
+ case PIX_FMT_RGB24:
+ return 24;
+ case PIX_FMT_BGR565:
+ case PIX_FMT_RGB565:
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
+ return 16;
+ case PIX_FMT_BGR555:
+ case PIX_FMT_RGB555:
+ return 15;
+ case PIX_FMT_BGR8:
+ case PIX_FMT_RGB8:
+ return 8;
+ case PIX_FMT_BGR4:
+ case PIX_FMT_RGB4:
+ case PIX_FMT_BGR4_BYTE:
+ case PIX_FMT_RGB4_BYTE:
+ return 4;
+ case PIX_FMT_MONOBLACK:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+#endif
diff --git a/contrib/ffmpeg/libswscale/swscale_template.c b/contrib/ffmpeg/libswscale/swscale_template.c
new file mode 100644
index 000000000..e725a3bb0
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/swscale_template.c
@@ -0,0 +1,3241 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * the C code (not assembly, mmx, ...) of this file can be used
+ * under the LGPL license too
+ */
+
+#undef REAL_MOVNTQ
+#undef MOVNTQ
+#undef PAVGB
+#undef PREFETCH
+#undef PREFETCHW
+#undef EMMS
+#undef SFENCE
+
+#ifdef HAVE_3DNOW
+/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
+#define EMMS "femms"
+#else
+#define EMMS "emms"
+#endif
+
+#ifdef HAVE_3DNOW
+#define PREFETCH "prefetch"
+#define PREFETCHW "prefetchw"
+#elif defined ( HAVE_MMX2 )
+#define PREFETCH "prefetchnta"
+#define PREFETCHW "prefetcht0"
+#else
+#define PREFETCH " # nop"
+#define PREFETCHW " # nop"
+#endif
+
+#ifdef HAVE_MMX2
+#define SFENCE "sfence"
+#else
+#define SFENCE " # nop"
+#endif
+
+#ifdef HAVE_MMX2
+#define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
+#elif defined (HAVE_3DNOW)
+#define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
+#endif
+
+#ifdef HAVE_MMX2
+#define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
+#else
+#define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
+#endif
+#define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
+
+#ifdef HAVE_ALTIVEC
+#include "swscale_altivec_template.c"
+#endif
+
+#define YSCALEYUV2YV12X(x, offset, dest, width) \
+ asm volatile(\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ ASMALIGN(4) /* FIXME Unroll? */\
+ "1: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm3 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ " jnz 1b \n\t"\
+ "psraw $3, %%mm3 \n\t"\
+ "psraw $3, %%mm4 \n\t"\
+ "packuswb %%mm4, %%mm3 \n\t"\
+ MOVNTQ(%%mm3, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "cmp %2, %%"REG_a" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "jb 1b \n\t"\
+ :: "r" (&c->redDither),\
+ "r" (dest), "g" (width)\
+ : "%"REG_a, "%"REG_d, "%"REG_S\
+ );
+
+#define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
+ asm volatile(\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ "pxor %%mm4, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ ASMALIGN(4) \
+ "1: \n\t"\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm0\n\t" /* srcData */\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
+ "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm1\n\t" /* srcData */\
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklwd %%mm1, %%mm0 \n\t"\
+ "punpckhwd %%mm1, %%mm3 \n\t"\
+ "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm3 \n\t"\
+ "paddd %%mm0, %%mm4 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm3\n\t" /* srcData */\
+ "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
+ "add $16, %%"REG_d" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklwd %%mm3, %%mm2 \n\t"\
+ "punpckhwd %%mm3, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm2 \n\t"\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "paddd %%mm2, %%mm6 \n\t"\
+ "paddd %%mm0, %%mm7 \n\t"\
+ " jnz 1b \n\t"\
+ "psrad $16, %%mm4 \n\t"\
+ "psrad $16, %%mm5 \n\t"\
+ "psrad $16, %%mm6 \n\t"\
+ "psrad $16, %%mm7 \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
+ "packssdw %%mm5, %%mm4 \n\t"\
+ "packssdw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm0, %%mm4 \n\t"\
+ "paddw %%mm0, %%mm6 \n\t"\
+ "psraw $3, %%mm4 \n\t"\
+ "psraw $3, %%mm6 \n\t"\
+ "packuswb %%mm6, %%mm4 \n\t"\
+ MOVNTQ(%%mm4, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "cmp %2, %%"REG_a" \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "pxor %%mm4, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "jb 1b \n\t"\
+ :: "r" (&c->redDither),\
+ "r" (dest), "g" (width)\
+ : "%"REG_a, "%"REG_d, "%"REG_S\
+ );
+
+#define YSCALEYUV2YV121 \
+ "mov %2, %%"REG_a" \n\t"\
+ ASMALIGN(4) /* FIXME Unroll? */\
+ "1: \n\t"\
+ "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
+ "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
+ "psraw $7, %%mm0 \n\t"\
+ "psraw $7, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ MOVNTQ(%%mm0, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "jnc 1b \n\t"
+
+/*
+ :: "m" (-lumFilterSize), "m" (-chrFilterSize),
+ "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
+ "r" (dest), "m" (dstW),
+ "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
+ : "%eax", "%ebx", "%ecx", "%edx", "%esi"
+*/
+#define YSCALEYUV2PACKEDX \
+ asm volatile(\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN(4)\
+ "nop \n\t"\
+ "1: \n\t"\
+ "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ ASMALIGN(4)\
+ "2: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm3 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ " jnz 2b \n\t"\
+\
+ "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
+ "movq %%mm1, %%mm7 \n\t"\
+ ASMALIGN(4)\
+ "2: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm1 \n\t"\
+ "paddw %%mm5, %%mm7 \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ " jnz 2b \n\t"\
+
+#define YSCALEYUV2PACKEDX_END\
+ :: "r" (&c->redDither), \
+ "m" (dummy), "m" (dummy), "m" (dummy),\
+ "r" (dest), "m" (dstW)\
+ : "%"REG_a, "%"REG_d, "%"REG_S\
+ );
+
+#define YSCALEYUV2PACKEDX_ACCURATE \
+ asm volatile(\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN(4)\
+ "nop \n\t"\
+ "1: \n\t"\
+ "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pxor %%mm4, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ ASMALIGN(4)\
+ "2: \n\t"\
+ "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
+ "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
+ "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklwd %%mm1, %%mm0 \n\t"\
+ "punpckhwd %%mm1, %%mm3 \n\t"\
+ "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm3 \n\t"\
+ "paddd %%mm0, %%mm4 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
+ "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
+ "add $16, %%"REG_d" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklwd %%mm3, %%mm2 \n\t"\
+ "punpckhwd %%mm3, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm2 \n\t"\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "paddd %%mm2, %%mm6 \n\t"\
+ "paddd %%mm0, %%mm7 \n\t"\
+ " jnz 2b \n\t"\
+ "psrad $16, %%mm4 \n\t"\
+ "psrad $16, %%mm5 \n\t"\
+ "psrad $16, %%mm6 \n\t"\
+ "psrad $16, %%mm7 \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
+ "packssdw %%mm5, %%mm4 \n\t"\
+ "packssdw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm0, %%mm4 \n\t"\
+ "paddw %%mm0, %%mm6 \n\t"\
+ "movq %%mm4, "U_TEMP"(%0) \n\t"\
+ "movq %%mm6, "V_TEMP"(%0) \n\t"\
+\
+ "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pxor %%mm1, %%mm1 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ ASMALIGN(4)\
+ "2: \n\t"\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
+ "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklwd %%mm4, %%mm0 \n\t"\
+ "punpckhwd %%mm4, %%mm3 \n\t"\
+ "movq 8(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
+ "pmaddwd %%mm4, %%mm0 \n\t"\
+ "pmaddwd %%mm4, %%mm3 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
+ "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
+ "add $16, %%"REG_d" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklwd %%mm3, %%mm2 \n\t"\
+ "punpckhwd %%mm3, %%mm0 \n\t"\
+ "pmaddwd %%mm4, %%mm2 \n\t"\
+ "pmaddwd %%mm4, %%mm0 \n\t"\
+ "paddd %%mm2, %%mm7 \n\t"\
+ "paddd %%mm0, %%mm6 \n\t"\
+ " jnz 2b \n\t"\
+ "psrad $16, %%mm1 \n\t"\
+ "psrad $16, %%mm5 \n\t"\
+ "psrad $16, %%mm7 \n\t"\
+ "psrad $16, %%mm6 \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
+ "packssdw %%mm5, %%mm1 \n\t"\
+ "packssdw %%mm6, %%mm7 \n\t"\
+ "paddw %%mm0, %%mm1 \n\t"\
+ "paddw %%mm0, %%mm7 \n\t"\
+ "movq "U_TEMP"(%0), %%mm3 \n\t"\
+ "movq "V_TEMP"(%0), %%mm4 \n\t"\
+
+#define YSCALEYUV2RGBX \
+ "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
+ "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
+ "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
+ "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#if 0
+#define FULL_YSCALEYUV2RGB \
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movd %6, %%mm6 \n\t" /*yalpha1*/\
+ "punpcklwd %%mm6, %%mm6 \n\t"\
+ "punpcklwd %%mm6, %%mm6 \n\t"\
+ "movd %7, %%mm5 \n\t" /*uvalpha1*/\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
+ "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
+\
+\
+ "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
+ "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
+ "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
+\
+\
+ "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
+ "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
+ "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
+ "paddw %%mm1, %%mm3 \n\t" /* B*/\
+ "paddw %%mm1, %%mm0 \n\t" /* R*/\
+ "packuswb %%mm3, %%mm3 \n\t"\
+\
+ "packuswb %%mm0, %%mm0 \n\t"\
+ "paddw %%mm4, %%mm2 \n\t"\
+ "paddw %%mm2, %%mm1 \n\t" /* G*/\
+\
+ "packuswb %%mm1, %%mm1 \n\t"
+#endif
+
+#define REAL_YSCALEYUV2PACKED(index, c) \
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
+ "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
+ "psraw $3, %%mm0 \n\t"\
+ "psraw $3, %%mm1 \n\t"\
+ "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
+ "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
+ "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+
+#define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
+
+#define REAL_YSCALEYUV2RGB(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
+ "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
+
+#define REAL_YSCALEYUV2PACKED1(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
+ "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $7, %%mm3 \n\t" \
+ "psraw $7, %%mm4 \n\t" \
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $7, %%mm1 \n\t" \
+ "psraw $7, %%mm7 \n\t" \
+
+#define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
+
+#define REAL_YSCALEYUV2RGB1(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
+ "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
+
+#define REAL_YSCALEYUV2PACKED1b(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+ "psrlw $8, %%mm3 \n\t" \
+ "psrlw $8, %%mm4 \n\t" \
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $7, %%mm1 \n\t" \
+ "psraw $7, %%mm7 \n\t"
+#define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
+
+// do vertical chrominance interpolation
+#define REAL_YSCALEYUV2RGB1b(index, c) \
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+ "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
+ "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
+
+#define REAL_WRITEBGR32(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+\
+ MOVNTQ(%%mm0, (dst, index, 4))\
+ MOVNTQ(%%mm2, 8(dst, index, 4))\
+ MOVNTQ(%%mm1, 16(dst, index, 4))\
+ MOVNTQ(%%mm3, 24(dst, index, 4))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
+
+#define REAL_WRITEBGR16(dst, dstw, index) \
+ "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
+ "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
+ "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
+ "psrlq $3, %%mm2 \n\t"\
+\
+ "movq %%mm2, %%mm1 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm5, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm5, %%mm1 \n\t"\
+\
+ "psllq $3, %%mm3 \n\t"\
+ "psllq $3, %%mm4 \n\t"\
+\
+ "por %%mm3, %%mm2 \n\t"\
+ "por %%mm4, %%mm1 \n\t"\
+\
+ MOVNTQ(%%mm2, (dst, index, 2))\
+ MOVNTQ(%%mm1, 8(dst, index, 2))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
+
+#define REAL_WRITEBGR15(dst, dstw, index) \
+ "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
+ "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
+ "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
+ "psrlq $3, %%mm2 \n\t"\
+ "psrlq $1, %%mm5 \n\t"\
+\
+ "movq %%mm2, %%mm1 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm5, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm5, %%mm1 \n\t"\
+\
+ "psllq $2, %%mm3 \n\t"\
+ "psllq $2, %%mm4 \n\t"\
+\
+ "por %%mm3, %%mm2 \n\t"\
+ "por %%mm4, %%mm1 \n\t"\
+\
+ MOVNTQ(%%mm2, (dst, index, 2))\
+ MOVNTQ(%%mm1, 8(dst, index, 2))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
+
+#define WRITEBGR24OLD(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+\
+ "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
+ "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
+ "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
+ "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
+ "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
+ "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
+ "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
+ "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
+\
+ "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
+ "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
+ "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
+ "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
+ "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
+ "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
+ "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
+ "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
+ "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
+ "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
+ "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
+ "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
+\
+ "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
+ "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
+ "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
+ "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
+ "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
+ "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
+ "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
+ "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
+\
+ MOVNTQ(%%mm0, (dst))\
+ MOVNTQ(%%mm2, 8(dst))\
+ MOVNTQ(%%mm3, 16(dst))\
+ "add $24, "#dst" \n\t"\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+
+#define WRITEBGR24MMX(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+\
+ "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
+ "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
+ "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
+ "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
+\
+ "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
+ "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
+ "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
+ "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
+\
+ "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
+ "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
+ "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
+ "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
+\
+ "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
+ "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
+ "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
+ "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
+ MOVNTQ(%%mm0, (dst))\
+\
+ "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
+ "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
+ "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
+ "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
+ MOVNTQ(%%mm6, 8(dst))\
+\
+ "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
+ "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
+ "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
+ MOVNTQ(%%mm5, 16(dst))\
+\
+ "add $24, "#dst" \n\t"\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+
+#define WRITEBGR24MMX2(dst, dstw, index) \
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq "MANGLE(M24A)", %%mm0 \n\t"\
+ "movq "MANGLE(M24C)", %%mm7 \n\t"\
+ "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
+ "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
+ "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
+\
+ "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
+ "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
+ "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
+\
+ "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
+ "por %%mm1, %%mm6 \n\t"\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, (dst))\
+\
+ "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
+ "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
+ "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
+ "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
+\
+ "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
+ "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
+ "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
+\
+ "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, 8(dst))\
+\
+ "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
+ "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
+ "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
+\
+ "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
+ "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
+ "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
+\
+ "por %%mm1, %%mm3 \n\t"\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, 16(dst))\
+\
+ "add $24, "#dst" \n\t"\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+
+#ifdef HAVE_MMX2
+#undef WRITEBGR24
+#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
+#else
+#undef WRITEBGR24
+#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
+#endif
+
+#define REAL_WRITEYUY2(dst, dstw, index) \
+ "packuswb %%mm3, %%mm3 \n\t"\
+ "packuswb %%mm4, %%mm4 \n\t"\
+ "packuswb %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm4, %%mm3 \n\t"\
+ "movq %%mm1, %%mm7 \n\t"\
+ "punpcklbw %%mm3, %%mm1 \n\t"\
+ "punpckhbw %%mm3, %%mm7 \n\t"\
+\
+ MOVNTQ(%%mm1, (dst, index, 2))\
+ MOVNTQ(%%mm7, 8(dst, index, 2))\
+\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
+#define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
+
+
+static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
+{
+#ifdef HAVE_MMX
+ if(c->flags & SWS_ACCURATE_RND){
+ if(uDest){
+ YSCALEYUV2YV12X_ACCURATE( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
+ YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
+ }
+
+ YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
+ }else{
+ if(uDest){
+ YSCALEYUV2YV12X( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
+ YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
+ }
+
+ YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
+ }
+#else
+#ifdef HAVE_ALTIVEC
+yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+#else //HAVE_ALTIVEC
+yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+#endif //!HAVE_ALTIVEC
+#endif
+}
+
+static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
+{
+yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, dstW, chrDstW, dstFormat);
+}
+
+static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
+{
+#ifdef HAVE_MMX
+ if(uDest != NULL)
+ {
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
+ "g" (-chrDstW)
+ : "%"REG_a
+ );
+
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
+ "g" (-chrDstW)
+ : "%"REG_a
+ );
+ }
+
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (lumSrc + dstW), "r" (dest + dstW),
+ "g" (-dstW)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<dstW; i++)
+ {
+ int val= lumSrc[i]>>7;
+
+ if(val&256){
+ if(val<0) val=0;
+ else val=255;
+ }
+
+ dest[i]= val;
+ }
+
+ if(uDest != NULL)
+ for(i=0; i<chrDstW; i++)
+ {
+ int u=chrSrc[i]>>7;
+ int v=chrSrc[i + 2048]>>7;
+
+ if((u|v)&256){
+ if(u<0) u=0;
+ else if (u>255) u=255;
+ if(v<0) v=0;
+ else if (v>255) v=255;
+ }
+
+ uDest[i]= u;
+ vDest[i]= v;
+ }
+#endif
+}
+
+
+/**
+ * vertical scale YV12 to RGB
+ */
+static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, long dstW, long dstY)
+{
+#ifdef HAVE_MMX
+ long dummy=0;
+ if(c->flags & SWS_ACCURATE_RND){
+ switch(c->dstFormat){
+ case PIX_FMT_RGB32:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ WRITEBGR32(%4, %5, %%REGa)
+
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_BGR24:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
+ "add %4, %%"REG_c" \n\t"
+ WRITEBGR24(%%REGc, %5, %%REGa)
+
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
+ );
+ return;
+ case PIX_FMT_BGR555:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR15(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_BGR565:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_YUYV422:
+ YSCALEYUV2PACKEDX_ACCURATE
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+
+ "psraw $3, %%mm3 \n\t"
+ "psraw $3, %%mm4 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm7 \n\t"
+ WRITEYUY2(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ }
+ }else{
+ switch(c->dstFormat)
+ {
+ case PIX_FMT_RGB32:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ WRITEBGR32(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_BGR24:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
+ "add %4, %%"REG_c" \n\t"
+ WRITEBGR24(%%REGc, %5, %%REGa)
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
+ );
+ return;
+ case PIX_FMT_BGR555:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR15(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_BGR565:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_YUYV422:
+ YSCALEYUV2PACKEDX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+
+ "psraw $3, %%mm3 \n\t"
+ "psraw $3, %%mm4 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm7 \n\t"
+ WRITEYUY2(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ }
+ }
+#endif
+#ifdef HAVE_ALTIVEC
+ /* The following list of supported dstFormat values should
+ match what's found in the body of altivec_yuv2packedX() */
+ if(c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
+ c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
+ c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)
+ altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, dstW, dstY);
+ else
+#endif
+ yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, dstW, dstY);
+}
+
+/**
+ * vertical bilinear scale YV12 to RGB
+ */
+static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
+ uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
+{
+ int yalpha1=yalpha^4095;
+ int uvalpha1=uvalpha^4095;
+ int i;
+
+#if 0 //isn't used
+ if(flags&SWS_FULL_CHR_H_INT)
+ {
+ switch(dstFormat)
+ {
+#ifdef HAVE_MMX
+ case PIX_FMT_RGB32:
+ asm volatile(
+
+
+FULL_YSCALEYUV2RGB
+ "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
+ "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
+
+ "movq %%mm3, %%mm1 \n\t"
+ "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
+ "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
+
+ MOVNTQ(%%mm3, (%4, %%REGa, 4))
+ MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
+
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a
+ );
+ break;
+ case PIX_FMT_BGR24:
+ asm volatile(
+
+FULL_YSCALEYUV2RGB
+
+ // lsb ... msb
+ "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
+ "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
+
+ "movq %%mm3, %%mm1 \n\t"
+ "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
+ "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
+
+ "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
+ "psrlq $8, %%mm3 \n\t" // GR0BGR00
+ "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
+ "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
+ "por %%mm2, %%mm3 \n\t" // BGRBGR00
+ "movq %%mm1, %%mm2 \n\t"
+ "psllq $48, %%mm1 \n\t" // 000000BG
+ "por %%mm1, %%mm3 \n\t" // BGRBGRBG
+
+ "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
+ "psrld $16, %%mm2 \n\t" // R000R000
+ "psrlq $24, %%mm1 \n\t" // 0BGR0000
+ "por %%mm2, %%mm1 \n\t" // RBGRR000
+
+ "mov %4, %%"REG_b" \n\t"
+ "add %%"REG_a", %%"REG_b" \n\t"
+
+#ifdef HAVE_MMX2
+ //FIXME Alignment
+ "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
+ "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
+#else
+ "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
+ "psrlq $32, %%mm3 \n\t"
+ "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
+ "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
+#endif
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a, "%"REG_b
+ );
+ break;
+ case PIX_FMT_BGR555:
+ asm volatile(
+
+FULL_YSCALEYUV2RGB
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
+ "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
+#endif
+ "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
+ "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
+ "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
+
+ "psrlw $3, %%mm3 \n\t"
+ "psllw $2, %%mm1 \n\t"
+ "psllw $7, %%mm0 \n\t"
+ "pand "MANGLE(g15Mask)", %%mm1 \n\t"
+ "pand "MANGLE(r15Mask)", %%mm0 \n\t"
+
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+
+ MOVNTQ(%%mm0, (%4, %%REGa, 2))
+
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a
+ );
+ break;
+ case PIX_FMT_BGR565:
+ asm volatile(
+
+FULL_YSCALEYUV2RGB
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
+ "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
+#endif
+ "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
+ "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
+ "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
+
+ "psrlw $3, %%mm3 \n\t"
+ "psllw $3, %%mm1 \n\t"
+ "psllw $8, %%mm0 \n\t"
+ "pand "MANGLE(g16Mask)", %%mm1 \n\t"
+ "pand "MANGLE(r16Mask)", %%mm0 \n\t"
+
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+
+ MOVNTQ(%%mm0, (%4, %%REGa, 2))
+
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a
+ );
+ break;
+#endif
+ case PIX_FMT_BGR32:
+#ifndef HAVE_MMX
+ case PIX_FMT_RGB32:
+#endif
+ if(dstFormat==PIX_FMT_RGB32)
+ {
+ int i;
+#ifdef WORDS_BIGENDIAN
+ dest++;
+#endif
+ for(i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+ dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
+ dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
+ dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
+ dest+= 4;
+ }
+ }
+ else if(dstFormat==PIX_FMT_BGR24)
+ {
+ int i;
+ for(i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+ dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
+ dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
+ dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
+ dest+= 3;
+ }
+ }
+ else if(dstFormat==PIX_FMT_BGR565)
+ {
+ int i;
+ for(i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+
+ ((uint16_t*)dest)[i] =
+ clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
+ clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
+ clip_table16r[(Y + yuvtab_3343[V]) >>13];
+ }
+ }
+ else if(dstFormat==PIX_FMT_BGR555)
+ {
+ int i;
+ for(i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+
+ ((uint16_t*)dest)[i] =
+ clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
+ clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
+ clip_table15r[(Y + yuvtab_3343[V]) >>13];
+ }
+ }
+ }//FULL_UV_IPOL
+ else
+ {
+#endif // if 0
+#ifdef HAVE_MMX
+ switch(c->dstFormat)
+ {
+//Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
+ case PIX_FMT_RGB32:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR24:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR555:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR565:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_YUYV422:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2PACKED(%%REGBP, %5)
+ WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ default: break;
+ }
+#endif //HAVE_MMX
+YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
+}
+
+/**
+ * YV12 to RGB without scaling or interpolating
+ */
+static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
+ uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
+{
+ const int yalpha1=0;
+ int i;
+
+ uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
+ const int yalpha= 4096; //FIXME ...
+
+ if(flags&SWS_FULL_CHR_H_INT)
+ {
+ RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
+ return;
+ }
+
+#ifdef HAVE_MMX
+ if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
+ {
+ switch(dstFormat)
+ {
+ case PIX_FMT_RGB32:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR24:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR555:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+ WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR565:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_YUYV422:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2PACKED1(%%REGBP, %5)
+ WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ }
+ }
+ else
+ {
+ switch(dstFormat)
+ {
+ case PIX_FMT_RGB32:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR24:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR555:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+ WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR565:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_YUYV422:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2PACKED1b(%%REGBP, %5)
+ WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ }
+ }
+#endif
+ if( uvalpha < 2048 )
+ {
+ YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
+ }else{
+ YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
+ }
+}
+
+//FIXME yuy2* can read upto 7 samples to much
+
+static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
+{
+#ifdef HAVE_MMX
+ asm volatile(
+ "movq "MANGLE(bm01010101)", %%mm2\n\t"
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",2), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
+ "pand %%mm2, %%mm0 \n\t"
+ "pand %%mm2, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" (-width), "r" (src+width*2), "r" (dst+width)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ dst[i]= src[2*i];
+#endif
+}
+
+static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
+{
+ assert(src1 == src2);
+#ifdef HAVE_MMX
+ asm volatile(
+ "movq "MANGLE(bm01010101)", %%mm4\n\t"
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",4), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "pand %%mm4, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "movd %%mm1, (%2, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ {
+ dstU[i]= src1[4*i + 1];
+ dstV[i]= src1[4*i + 3];
+ }
+#endif
+}
+
+//this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
+static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
+{
+#ifdef HAVE_MMX
+ asm volatile(
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",2), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" (-width), "r" (src+width*2), "r" (dst+width)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ dst[i]= src[2*i+1];
+#endif
+}
+
+static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
+{
+ assert(src1 == src2);
+#ifdef HAVE_MMX
+ asm volatile(
+ "movq "MANGLE(bm01010101)", %%mm4\n\t"
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",4), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+ "pand %%mm4, %%mm0 \n\t"
+ "pand %%mm4, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "pand %%mm4, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "movd %%mm1, (%2, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
+ : "%"REG_a
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ {
+ dstU[i]= src1[4*i + 0];
+ dstV[i]= src1[4*i + 2];
+ }
+#endif
+}
+
+static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int b= ((uint32_t*)src)[i]&0xFF;
+ int g= (((uint32_t*)src)[i]>>8)&0xFF;
+ int r= (((uint32_t*)src)[i]>>16)&0xFF;
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
+ }
+}
+
+static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ assert(src1 == src2);
+ for(i=0; i<width; i++)
+ {
+ const int a= ((uint32_t*)src1)[2*i+0];
+ const int e= ((uint32_t*)src1)[2*i+1];
+ const int l= (a&0xFF00FF) + (e&0xFF00FF);
+ const int h= (a&0x00FF00) + (e&0x00FF00);
+ const int b= l&0x3FF;
+ const int g= h>>8;
+ const int r= l>>16;
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ }
+}
+
+static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
+{
+#ifdef HAVE_MMX
+ asm volatile(
+ "mov %2, %%"REG_a" \n\t"
+ "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_d") \n\t"
+ "movd (%0, %%"REG_d"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "packssdw %%mm2, %%mm0 \n\t"
+ "psraw $7, %%mm0 \n\t"
+
+ "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "add $24, %%"REG_d" \n\t"
+ "packssdw %%mm2, %%mm4 \n\t"
+ "psraw $7, %%mm4 \n\t"
+
+ "packuswb %%mm4, %%mm0 \n\t"
+ "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
+
+ "movq %%mm0, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src+width*3), "r" (dst+width), "g" (-width)
+ : "%"REG_a, "%"REG_d
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int b= src[i*3+0];
+ int g= src[i*3+1];
+ int r= src[i*3+2];
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
+ }
+#endif
+}
+
+static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
+{
+ assert(src1 == src2);
+#ifdef HAVE_MMX
+ asm volatile(
+ "mov %3, %%"REG_a" \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
+ "add %%"REG_d", %%"REG_d" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_d") \n\t"
+#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
+ "movq (%0, %%"REG_d"), %%mm0 \n\t"
+ "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm0 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB(%%mm1, %%mm0)
+ PAVGB(%%mm3, %%mm2)
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+#else
+ "movd (%0, %%"REG_d"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "paddw %%mm4, %%mm2 \n\t"
+ "psrlw $1, %%mm0 \n\t"
+ "psrlw $1, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm0, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
+ "psraw $7, %%mm0 \n\t"
+
+#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
+ "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
+ "movq %%mm4, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm4 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB(%%mm1, %%mm4)
+ PAVGB(%%mm3, %%mm2)
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+#else
+ "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "paddw %%mm2, %%mm4 \n\t"
+ "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
+ "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "paddw %%mm5, %%mm2 \n\t"
+ "movq "MANGLE(w1111)", %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm4, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+#ifndef FAST_BGR2YV12
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "add $24, %%"REG_d" \n\t"
+ "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
+ "psraw $7, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm1 \n\t"
+ "punpckldq %%mm4, %%mm0 \n\t"
+ "punpckhdq %%mm4, %%mm1 \n\t"
+ "packsswb %%mm1, %%mm0 \n\t"
+ "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
+
+ "movd %%mm0, (%1, %%"REG_a") \n\t"
+ "punpckhdq %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%2, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src1+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
+ : "%"REG_a, "%"REG_d
+ );
+#else
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int b= src1[6*i + 0] + src1[6*i + 3];
+ int g= src1[6*i + 1] + src1[6*i + 4];
+ int r= src1[6*i + 2] + src1[6*i + 5];
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ }
+#endif
+}
+
+static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int b= d&0x1F;
+ int g= (d>>5)&0x3F;
+ int r= (d>>11)&0x1F;
+
+ dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
+ }
+}
+
+static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ assert(src1==src2);
+ for(i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+
+ int dl= (d0&0x07E0F81F);
+ int dh= ((d0>>5)&0x07C0F83F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int b= d&0x7F;
+ int r= (d>>11)&0x7F;
+ int g= d>>21;
+ dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
+ dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
+ }
+}
+
+static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int b= d&0x1F;
+ int g= (d>>5)&0x1F;
+ int r= (d>>10)&0x1F;
+
+ dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
+ }
+}
+
+static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ assert(src1==src2);
+ for(i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+
+ int dl= (d0&0x03E07C1F);
+ int dh= ((d0>>5)&0x03E0F81F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int b= d&0x7F;
+ int r= (d>>10)&0x7F;
+ int g= d>>21;
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
+ }
+}
+
+
+static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int r= ((uint32_t*)src)[i]&0xFF;
+ int g= (((uint32_t*)src)[i]>>8)&0xFF;
+ int b= (((uint32_t*)src)[i]>>16)&0xFF;
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
+ }
+}
+
+static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ assert(src1==src2);
+ for(i=0; i<width; i++)
+ {
+ const int a= ((uint32_t*)src1)[2*i+0];
+ const int e= ((uint32_t*)src1)[2*i+1];
+ const int l= (a&0xFF00FF) + (e&0xFF00FF);
+ const int h= (a&0x00FF00) + (e&0x00FF00);
+ const int r= l&0x3FF;
+ const int g= h>>8;
+ const int b= l>>16;
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ }
+}
+
+static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int r= src[i*3+0];
+ int g= src[i*3+1];
+ int b= src[i*3+2];
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
+ }
+}
+
+static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ assert(src1==src2);
+ for(i=0; i<width; i++)
+ {
+ int r= src1[6*i + 0] + src1[6*i + 3];
+ int g= src1[6*i + 1] + src1[6*i + 4];
+ int b= src1[6*i + 2] + src1[6*i + 5];
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ }
+}
+
+static inline void RENAME(rgb16ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int r= d&0x1F;
+ int g= (d>>5)&0x3F;
+ int b= (d>>11)&0x1F;
+
+ dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
+ }
+}
+
+static inline void RENAME(rgb16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ assert(src1 == src2);
+ for(i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+
+ int dl= (d0&0x07E0F81F);
+ int dh= ((d0>>5)&0x07C0F83F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int r= d&0x7F;
+ int b= (d>>11)&0x7F;
+ int g= d>>21;
+ dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
+ dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
+ }
+}
+
+static inline void RENAME(rgb15ToY)(uint8_t *dst, uint8_t *src, int width)
+{
+ int i;
+ for(i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int r= d&0x1F;
+ int g= (d>>5)&0x1F;
+ int b= (d>>10)&0x1F;
+
+ dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
+ }
+}
+
+static inline void RENAME(rgb15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+{
+ int i;
+ assert(src1 == src2);
+ for(i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+
+ int dl= (d0&0x03E07C1F);
+ int dh= ((d0>>5)&0x03E0F81F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int g= d&0x7F;
+ int r= (d>>10)&0x7F;
+ int b= d>>21;
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
+ }
+}
+
+// Bilinear / Bicubic scaling
+static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
+ int16_t *filter, int16_t *filterPos, long filterSize)
+{
+#ifdef HAVE_MMX
+ assert(filterSize % 4 == 0 && filterSize>0);
+ if(filterSize==4) // allways true for upscaling, sometimes for down too
+ {
+ long counter= -2*dstW;
+ filter-= counter*2;
+ filterPos-= counter/2;
+ dst-= counter/2;
+ asm volatile(
+#if defined(PIC)
+ "push %%"REG_b" \n\t"
+#endif
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(w02)", %%mm6 \n\t"
+ "push %%"REG_BP" \n\t" // we use 7 regs here ...
+ "mov %%"REG_a", %%"REG_BP" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movzwl (%2, %%"REG_BP"), %%eax \n\t"
+ "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
+ "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
+ "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
+ "movd (%3, %%"REG_a"), %%mm0 \n\t"
+ "movd (%3, %%"REG_b"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm0 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm3 \n\t"
+ "packssdw %%mm3, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "packssdw %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%4, %%"REG_BP") \n\t"
+ "add $4, %%"REG_BP" \n\t"
+ " jnc 1b \n\t"
+
+ "pop %%"REG_BP" \n\t"
+#if defined(PIC)
+ "pop %%"REG_b" \n\t"
+#endif
+ : "+a" (counter)
+ : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
+#if !defined(PIC)
+ : "%"REG_b
+#endif
+ );
+ }
+ else if(filterSize==8)
+ {
+ long counter= -2*dstW;
+ filter-= counter*4;
+ filterPos-= counter/2;
+ dst-= counter/2;
+ asm volatile(
+#if defined(PIC)
+ "push %%"REG_b" \n\t"
+#endif
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(w02)", %%mm6 \n\t"
+ "push %%"REG_BP" \n\t" // we use 7 regs here ...
+ "mov %%"REG_a", %%"REG_BP" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movzwl (%2, %%"REG_BP"), %%eax \n\t"
+ "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
+ "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
+ "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
+ "movd (%3, %%"REG_a"), %%mm0 \n\t"
+ "movd (%3, %%"REG_b"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm0 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+
+ "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
+ "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
+ "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
+ "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm4 \n\t"
+ "pmaddwd %%mm2, %%mm5 \n\t"
+ "paddd %%mm4, %%mm0 \n\t"
+ "paddd %%mm5, %%mm3 \n\t"
+
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm3 \n\t"
+ "packssdw %%mm3, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "packssdw %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%4, %%"REG_BP") \n\t"
+ "add $4, %%"REG_BP" \n\t"
+ " jnc 1b \n\t"
+
+ "pop %%"REG_BP" \n\t"
+#if defined(PIC)
+ "pop %%"REG_b" \n\t"
+#endif
+ : "+a" (counter)
+ : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
+#if !defined(PIC)
+ : "%"REG_b
+#endif
+ );
+ }
+ else
+ {
+ uint8_t *offset = src+filterSize;
+ long counter= -2*dstW;
+// filter-= counter*filterSize/2;
+ filterPos-= counter/2;
+ dst-= counter/2;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(w02)", %%mm6 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "mov %2, %%"REG_c" \n\t"
+ "movzwl (%%"REG_c", %0), %%eax \n\t"
+ "movzwl 2(%%"REG_c", %0), %%edx \n\t"
+ "mov %5, %%"REG_c" \n\t"
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t"
+ "2: \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "movq (%1, %6), %%mm3 \n\t"
+ "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
+ "movd (%%"REG_c", %%"REG_d"), %%mm2\n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm0 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "paddd %%mm3, %%mm5 \n\t"
+ "paddd %%mm0, %%mm4 \n\t"
+ "add $8, %1 \n\t"
+ "add $4, %%"REG_c" \n\t"
+ "cmp %4, %%"REG_c" \n\t"
+ " jb 2b \n\t"
+ "add %6, %1 \n\t"
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm5 \n\t"
+ "packssdw %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "packssdw %%mm4, %%mm4 \n\t"
+ "mov %3, %%"REG_a" \n\t"
+ "movd %%mm4, (%%"REG_a", %0) \n\t"
+ "add $4, %0 \n\t"
+ " jnc 1b \n\t"
+
+ : "+r" (counter), "+r" (filter)
+ : "m" (filterPos), "m" (dst), "m"(offset),
+ "m" (src), "r" (filterSize*2)
+ : "%"REG_a, "%"REG_c, "%"REG_d
+ );
+ }
+#else
+#ifdef HAVE_ALTIVEC
+ hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
+#else
+ int i;
+ for(i=0; i<dstW; i++)
+ {
+ int j;
+ int srcPos= filterPos[i];
+ int val=0;
+// printf("filterPos: %d\n", filterPos[i]);
+ for(j=0; j<filterSize; j++)
+ {
+// printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
+ val += ((int)src[srcPos + j])*filter[filterSize*i + j];
+ }
+// filter += hFilterSize;
+ dst[i] = FFMIN(FFMAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
+// dst[i] = val>>7;
+ }
+#endif
+#endif
+}
+ // *** horizontal scale Y line to temp buffer
+static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
+ int flags, int canMMX2BeUsed, int16_t *hLumFilter,
+ int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
+ int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
+ int32_t *mmx2FilterPos)
+{
+ if(srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
+ {
+ RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
+ {
+ RENAME(uyvyToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==PIX_FMT_RGB32)
+ {
+ RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==PIX_FMT_BGR24)
+ {
+ RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==PIX_FMT_BGR565)
+ {
+ RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==PIX_FMT_BGR555)
+ {
+ RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==PIX_FMT_BGR32)
+ {
+ RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==PIX_FMT_RGB24)
+ {
+ RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==PIX_FMT_RGB565)
+ {
+ RENAME(rgb16ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+ else if(srcFormat==PIX_FMT_RGB555)
+ {
+ RENAME(rgb15ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
+ }
+
+#ifdef HAVE_MMX
+ // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
+ if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
+#else
+ if(!(flags&SWS_FAST_BILINEAR))
+#endif
+ {
+ RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
+ }
+ else // Fast Bilinear upscale / crap downscale
+ {
+#if defined(ARCH_X86)
+#ifdef HAVE_MMX2
+ int i;
+#if defined(PIC)
+ uint64_t ebxsave __attribute__((aligned(8)));
+#endif
+ if(canMMX2BeUsed)
+ {
+ asm volatile(
+#if defined(PIC)
+ "mov %%"REG_b", %5 \n\t"
+#endif
+ "pxor %%mm7, %%mm7 \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "mov %1, %%"REG_D" \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "mov %3, %%"REG_b" \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
+
+#ifdef ARCH_X86_64
+
+#define FUNNY_Y_CODE \
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
+ "add %%"REG_S", %%"REG_c" \n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+
+#else
+
+#define FUNNY_Y_CODE \
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+
+#endif
+
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+FUNNY_Y_CODE
+
+#if defined(PIC)
+ "mov %5, %%"REG_b" \n\t"
+#endif
+ :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
+ "m" (funnyYCode)
+#if defined(PIC)
+ ,"m" (ebxsave)
+#endif
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
+#if !defined(PIC)
+ ,"%"REG_b
+#endif
+ );
+ for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
+ }
+ else
+ {
+#endif
+ long xInc_shr16 = xInc >> 16;
+ uint16_t xInc_mask = xInc & 0xffff;
+ //NO MMX just normal asm ...
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "xor %%"REG_d", %%"REG_d" \n\t" // xx
+ "xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ASMALIGN(4)
+ "1: \n\t"
+ "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
+ "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
+ "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
+ "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
+
+ "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
+ "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
+ "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
+ "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
+
+
+ "add $2, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+
+ :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
+ : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
+ );
+#ifdef HAVE_MMX2
+ } //if MMX2 can't be used
+#endif
+#else
+ int i;
+ unsigned int xpos=0;
+ for(i=0;i<dstWidth;i++)
+ {
+ register unsigned int xx=xpos>>16;
+ register unsigned int xalpha=(xpos&0xFFFF)>>9;
+ dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
+ xpos+=xInc;
+ }
+#endif
+ }
+}
+
+inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
+ int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
+ int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
+ int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
+ int32_t *mmx2FilterPos)
+{
+ if(srcFormat==PIX_FMT_YUYV422)
+ {
+ RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==PIX_FMT_UYVY422)
+ {
+ RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==PIX_FMT_RGB32)
+ {
+ RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==PIX_FMT_BGR24)
+ {
+ RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==PIX_FMT_BGR565)
+ {
+ RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==PIX_FMT_BGR555)
+ {
+ RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==PIX_FMT_BGR32)
+ {
+ RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==PIX_FMT_RGB24)
+ {
+ RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==PIX_FMT_RGB565)
+ {
+ RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(srcFormat==PIX_FMT_RGB555)
+ {
+ RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
+ }
+ else if(isGray(srcFormat))
+ {
+ return;
+ }
+
+#ifdef HAVE_MMX
+ // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
+ if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
+#else
+ if(!(flags&SWS_FAST_BILINEAR))
+#endif
+ {
+ RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
+ RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
+ }
+ else // Fast Bilinear upscale / crap downscale
+ {
+#if defined(ARCH_X86)
+#ifdef HAVE_MMX2
+ int i;
+#if defined(PIC)
+ uint64_t ebxsave __attribute__((aligned(8)));
+#endif
+ if(canMMX2BeUsed)
+ {
+ asm volatile(
+#if defined(PIC)
+ "mov %%"REG_b", %6 \n\t"
+#endif
+ "pxor %%mm7, %%mm7 \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "mov %1, %%"REG_D" \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "mov %3, %%"REG_b" \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
+
+#ifdef ARCH_X86_64
+
+#define FUNNY_UV_CODE \
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
+ "add %%"REG_S", %%"REG_c" \n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+
+#else
+
+#define FUNNY_UV_CODE \
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+
+#endif
+
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "mov %5, %%"REG_c" \n\t" // src
+ "mov %1, %%"REG_D" \n\t" // buf1
+ "add $4096, %%"REG_D" \n\t"
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
+
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+FUNNY_UV_CODE
+
+#if defined(PIC)
+ "mov %6, %%"REG_b" \n\t"
+#endif
+ :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
+ "m" (funnyUVCode), "m" (src2)
+#if defined(PIC)
+ ,"m" (ebxsave)
+#endif
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
+#if !defined(PIC)
+ ,"%"REG_b
+#endif
+ );
+ for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
+ {
+// printf("%d %d %d\n", dstWidth, i, srcW);
+ dst[i] = src1[srcW-1]*128;
+ dst[i+2048] = src2[srcW-1]*128;
+ }
+ }
+ else
+ {
+#endif
+ long xInc_shr16 = (long) (xInc >> 16);
+ uint16_t xInc_mask = xInc & 0xffff;
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "xor %%"REG_d", %%"REG_d" \n\t" // xx
+ "xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ASMALIGN(4)
+ "1: \n\t"
+ "mov %0, %%"REG_S" \n\t"
+ "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
+ "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
+
+ "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
+ "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
+
+ "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
+ "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
+ "add $1, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+/* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
+ which is needed to support GCC-4.0 */
+#if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+ :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
+#else
+ :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
+#endif
+ "r" (src2)
+ : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
+ );
+#ifdef HAVE_MMX2
+ } //if MMX2 can't be used
+#endif
+#else
+ int i;
+ unsigned int xpos=0;
+ for(i=0;i<dstWidth;i++)
+ {
+ register unsigned int xx=xpos>>16;
+ register unsigned int xalpha=(xpos&0xFFFF)>>9;
+ dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
+ dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
+/* slower
+ dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
+ dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
+*/
+ xpos+=xInc;
+ }
+#endif
+ }
+}
+
+static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ /* load a few things into local vars to make the code more readable? and faster */
+ const int srcW= c->srcW;
+ const int dstW= c->dstW;
+ const int dstH= c->dstH;
+ const int chrDstW= c->chrDstW;
+ const int chrSrcW= c->chrSrcW;
+ const int lumXInc= c->lumXInc;
+ const int chrXInc= c->chrXInc;
+ const int dstFormat= c->dstFormat;
+ const int srcFormat= c->srcFormat;
+ const int flags= c->flags;
+ const int canMMX2BeUsed= c->canMMX2BeUsed;
+ int16_t *vLumFilterPos= c->vLumFilterPos;
+ int16_t *vChrFilterPos= c->vChrFilterPos;
+ int16_t *hLumFilterPos= c->hLumFilterPos;
+ int16_t *hChrFilterPos= c->hChrFilterPos;
+ int16_t *vLumFilter= c->vLumFilter;
+ int16_t *vChrFilter= c->vChrFilter;
+ int16_t *hLumFilter= c->hLumFilter;
+ int16_t *hChrFilter= c->hChrFilter;
+ int32_t *lumMmxFilter= c->lumMmxFilter;
+ int32_t *chrMmxFilter= c->chrMmxFilter;
+ const int vLumFilterSize= c->vLumFilterSize;
+ const int vChrFilterSize= c->vChrFilterSize;
+ const int hLumFilterSize= c->hLumFilterSize;
+ const int hChrFilterSize= c->hChrFilterSize;
+ int16_t **lumPixBuf= c->lumPixBuf;
+ int16_t **chrPixBuf= c->chrPixBuf;
+ const int vLumBufSize= c->vLumBufSize;
+ const int vChrBufSize= c->vChrBufSize;
+ uint8_t *funnyYCode= c->funnyYCode;
+ uint8_t *funnyUVCode= c->funnyUVCode;
+ uint8_t *formatConvBuffer= c->formatConvBuffer;
+ const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
+ const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
+ int lastDstY;
+
+ /* vars whch will change and which we need to storw back in the context */
+ int dstY= c->dstY;
+ int lumBufIndex= c->lumBufIndex;
+ int chrBufIndex= c->chrBufIndex;
+ int lastInLumBuf= c->lastInLumBuf;
+ int lastInChrBuf= c->lastInChrBuf;
+
+ if(isPacked(c->srcFormat)){
+ src[0]=
+ src[1]=
+ src[2]= src[0];
+ srcStride[0]=
+ srcStride[1]=
+ srcStride[2]= srcStride[0];
+ }
+ srcStride[1]<<= c->vChrDrop;
+ srcStride[2]<<= c->vChrDrop;
+
+// printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
+// (int)dst[0], (int)dst[1], (int)dst[2]);
+
+#if 0 //self test FIXME move to a vfilter or something
+{
+static volatile int i=0;
+i++;
+if(srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
+ selfTest(src, srcStride, c->srcW, c->srcH);
+i--;
+}
+#endif
+
+//printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
+//dstStride[0],dstStride[1],dstStride[2]);
+
+ if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
+ {
+ static int firstTime=1; //FIXME move this into the context perhaps
+ if(flags & SWS_PRINT_INFO && firstTime)
+ {
+ MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
+ "SwScaler: ->cannot do aligned memory acesses anymore\n");
+ firstTime=0;
+ }
+ }
+
+ /* Note the user might start scaling the picture in the middle so this will not get executed
+ this is not really intended but works currently, so ppl might do it */
+ if(srcSliceY ==0){
+ lumBufIndex=0;
+ chrBufIndex=0;
+ dstY=0;
+ lastInLumBuf= -1;
+ lastInChrBuf= -1;
+ }
+
+ lastDstY= dstY;
+
+ for(;dstY < dstH; dstY++){
+ unsigned char *dest =dst[0]+dstStride[0]*dstY;
+ const int chrDstY= dstY>>c->chrDstVSubSample;
+ unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
+ unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
+
+ const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
+ const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
+ const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
+ const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
+
+//printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
+// dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
+ //handle holes (FAST_BILINEAR & weird filters)
+ if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
+ if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
+//printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
+ ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
+ ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
+
+ // Do we have enough lines in this slice to output the dstY line
+ if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
+ {
+ //Do horizontal scaling
+ while(lastInLumBuf < lastLumSrcY)
+ {
+ uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
+ lumBufIndex++;
+// printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
+ ASSERT(lumBufIndex < 2*vLumBufSize)
+ ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
+ ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
+// printf("%d %d\n", lumBufIndex, vLumBufSize);
+ RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
+ flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
+ funnyYCode, c->srcFormat, formatConvBuffer,
+ c->lumMmx2Filter, c->lumMmx2FilterPos);
+ lastInLumBuf++;
+ }
+ while(lastInChrBuf < lastChrSrcY)
+ {
+ uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
+ uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
+ chrBufIndex++;
+ ASSERT(chrBufIndex < 2*vChrBufSize)
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
+ //FIXME replace parameters through context struct (some at least)
+
+ if(!(isGray(srcFormat) || isGray(dstFormat)))
+ RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
+ flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
+ funnyUVCode, c->srcFormat, formatConvBuffer,
+ c->chrMmx2Filter, c->chrMmx2FilterPos);
+ lastInChrBuf++;
+ }
+ //wrap buf index around to stay inside the ring buffer
+ if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
+ if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
+ }
+ else // not enough lines left in this slice -> load the rest in the buffer
+ {
+/* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
+ firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
+ lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
+ vChrBufSize, vLumBufSize);*/
+
+ //Do horizontal scaling
+ while(lastInLumBuf+1 < srcSliceY + srcSliceH)
+ {
+ uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
+ lumBufIndex++;
+ ASSERT(lumBufIndex < 2*vLumBufSize)
+ ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
+ ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
+ RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
+ flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
+ funnyYCode, c->srcFormat, formatConvBuffer,
+ c->lumMmx2Filter, c->lumMmx2FilterPos);
+ lastInLumBuf++;
+ }
+ while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
+ {
+ uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
+ uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
+ chrBufIndex++;
+ ASSERT(chrBufIndex < 2*vChrBufSize)
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
+
+ if(!(isGray(srcFormat) || isGray(dstFormat)))
+ RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
+ flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
+ funnyUVCode, c->srcFormat, formatConvBuffer,
+ c->chrMmx2Filter, c->chrMmx2FilterPos);
+ lastInChrBuf++;
+ }
+ //wrap buf index around to stay inside the ring buffer
+ if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
+ if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
+ break; //we can't output a dstY line so let's try with the next slice
+ }
+
+#ifdef HAVE_MMX
+ b5Dither= dither8[dstY&1];
+ g6Dither= dither4[dstY&1];
+ g5Dither= dither8[dstY&1];
+ r5Dither= dither8[(dstY+1)&1];
+#endif
+ if(dstY < dstH-2)
+ {
+ int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
+ int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
+#ifdef HAVE_MMX
+ int i;
+ if(flags & SWS_ACCURATE_RND){
+ for(i=0; i<vLumFilterSize; i+=2){
+ lumMmxFilter[2*i+0]= lumSrcPtr[i ];
+ lumMmxFilter[2*i+1]= lumSrcPtr[i+(vLumFilterSize>1)];
+ lumMmxFilter[2*i+2]=
+ lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i ]
+ + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
+ }
+ for(i=0; i<vChrFilterSize; i+=2){
+ chrMmxFilter[2*i+0]= chrSrcPtr[i ];
+ chrMmxFilter[2*i+1]= chrSrcPtr[i+(vChrFilterSize>1)];
+ chrMmxFilter[2*i+2]=
+ chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i ]
+ + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
+ }
+ }else{
+ for(i=0; i<vLumFilterSize; i++)
+ {
+ lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
+ lumMmxFilter[4*i+2]=
+ lumMmxFilter[4*i+3]=
+ ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
+ }
+ for(i=0; i<vChrFilterSize; i++)
+ {
+ chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
+ chrMmxFilter[4*i+2]=
+ chrMmxFilter[4*i+3]=
+ ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
+ }
+ }
+#endif
+ if(dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
+ RENAME(yuv2nv12X)(c,
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, dstW, chrDstW, dstFormat);
+ }
+ else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
+ {
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
+ if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
+ {
+ int16_t *lumBuf = lumPixBuf[0];
+ int16_t *chrBuf= chrPixBuf[0];
+ RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
+ }
+ else //General YV12
+ {
+ RENAME(yuv2yuvX)(c,
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+ }
+ }
+ else
+ {
+ ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
+ ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
+ if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
+ {
+ int chrAlpha= vChrFilter[2*dstY+1];
+ RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
+ dest, dstW, chrAlpha, dstFormat, flags, dstY);
+ }
+ else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
+ {
+ int lumAlpha= vLumFilter[2*dstY+1];
+ int chrAlpha= vChrFilter[2*dstY+1];
+ lumMmxFilter[2]=
+ lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
+ chrMmxFilter[2]=
+ chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
+ RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
+ dest, dstW, lumAlpha, chrAlpha, dstY);
+ }
+ else //General RGB
+ {
+ RENAME(yuv2packedX)(c,
+ vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+ vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, dstW, dstY);
+ }
+ }
+ }
+ else // hmm looks like we can't use MMX here without overwriting this array's tail
+ {
+ int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
+ int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
+ if(dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
+ yuv2nv12XinC(
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, dstW, chrDstW, dstFormat);
+ }
+ else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
+ {
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
+ yuv2yuvXinC(
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+ }
+ else
+ {
+ ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
+ ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
+ yuv2packedXinC(c,
+ vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+ vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, dstW, dstY);
+ }
+ }
+ }
+
+#ifdef HAVE_MMX
+ __asm __volatile(SFENCE:::"memory");
+ __asm __volatile(EMMS:::"memory");
+#endif
+ /* store changed local vars back in the context */
+ c->dstY= dstY;
+ c->lumBufIndex= lumBufIndex;
+ c->chrBufIndex= chrBufIndex;
+ c->lastInLumBuf= lastInLumBuf;
+ c->lastInChrBuf= lastInChrBuf;
+
+ return dstY - lastDstY;
+}
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb.c b/contrib/ffmpeg/libswscale/yuv2rgb.c
new file mode 100644
index 000000000..9066b68b2
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/yuv2rgb.c
@@ -0,0 +1,844 @@
+/*
+ * yuv2rgb.c, Software YUV to RGB coverter
+ *
+ * Copyright (C) 1999, Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ * All Rights Reserved.
+ *
+ * Functions broken out from display_x11.c and several new modes
+ * added by Håkan Hjort <d95hjort@dtek.chalmers.se>
+ *
+ * 15 & 16 bpp support by Franck Sicard <Franck.Sicard@solsoft.fr>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video decoder
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Make; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * MMX/MMX2 Template stuff from Michael Niedermayer (michaelni@gmx.at) (needed for fast movntq support)
+ * 1,4,8bpp support by Michael Niedermayer (michaelni@gmx.at)
+ * context / deglobalize stuff by Michael Niedermayer
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include "config.h"
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+
+#ifdef HAVE_MLIB
+#include "yuv2rgb_mlib.c"
+#endif
+
+#define DITHER1XBPP // only for mmx
+
+const uint8_t __attribute__((aligned(8))) dither_2x2_4[2][8]={
+{ 1, 3, 1, 3, 1, 3, 1, 3, },
+{ 2, 0, 2, 0, 2, 0, 2, 0, },
+};
+
+const uint8_t __attribute__((aligned(8))) dither_2x2_8[2][8]={
+{ 6, 2, 6, 2, 6, 2, 6, 2, },
+{ 0, 4, 0, 4, 0, 4, 0, 4, },
+};
+
+const uint8_t __attribute__((aligned(8))) dither_8x8_32[8][8]={
+{ 17, 9, 23, 15, 16, 8, 22, 14, },
+{ 5, 29, 3, 27, 4, 28, 2, 26, },
+{ 21, 13, 19, 11, 20, 12, 18, 10, },
+{ 0, 24, 6, 30, 1, 25, 7, 31, },
+{ 16, 8, 22, 14, 17, 9, 23, 15, },
+{ 4, 28, 2, 26, 5, 29, 3, 27, },
+{ 20, 12, 18, 10, 21, 13, 19, 11, },
+{ 1, 25, 7, 31, 0, 24, 6, 30, },
+};
+
+#if 0
+const uint8_t __attribute__((aligned(8))) dither_8x8_64[8][8]={
+{ 0, 48, 12, 60, 3, 51, 15, 63, },
+{ 32, 16, 44, 28, 35, 19, 47, 31, },
+{ 8, 56, 4, 52, 11, 59, 7, 55, },
+{ 40, 24, 36, 20, 43, 27, 39, 23, },
+{ 2, 50, 14, 62, 1, 49, 13, 61, },
+{ 34, 18, 46, 30, 33, 17, 45, 29, },
+{ 10, 58, 6, 54, 9, 57, 5, 53, },
+{ 42, 26, 38, 22, 41, 25, 37, 21, },
+};
+#endif
+
+const uint8_t __attribute__((aligned(8))) dither_8x8_73[8][8]={
+{ 0, 55, 14, 68, 3, 58, 17, 72, },
+{ 37, 18, 50, 32, 40, 22, 54, 35, },
+{ 9, 64, 5, 59, 13, 67, 8, 63, },
+{ 46, 27, 41, 23, 49, 31, 44, 26, },
+{ 2, 57, 16, 71, 1, 56, 15, 70, },
+{ 39, 21, 52, 34, 38, 19, 51, 33, },
+{ 11, 66, 7, 62, 10, 65, 6, 60, },
+{ 48, 30, 43, 25, 47, 29, 42, 24, },
+};
+
+#if 0
+const uint8_t __attribute__((aligned(8))) dither_8x8_128[8][8]={
+{ 68, 36, 92, 60, 66, 34, 90, 58, },
+{ 20, 116, 12, 108, 18, 114, 10, 106, },
+{ 84, 52, 76, 44, 82, 50, 74, 42, },
+{ 0, 96, 24, 120, 6, 102, 30, 126, },
+{ 64, 32, 88, 56, 70, 38, 94, 62, },
+{ 16, 112, 8, 104, 22, 118, 14, 110, },
+{ 80, 48, 72, 40, 86, 54, 78, 46, },
+{ 4, 100, 28, 124, 2, 98, 26, 122, },
+};
+#endif
+
+#if 1
+const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
+{117, 62, 158, 103, 113, 58, 155, 100, },
+{ 34, 199, 21, 186, 31, 196, 17, 182, },
+{144, 89, 131, 76, 141, 86, 127, 72, },
+{ 0, 165, 41, 206, 10, 175, 52, 217, },
+{110, 55, 151, 96, 120, 65, 162, 107, },
+{ 28, 193, 14, 179, 38, 203, 24, 189, },
+{138, 83, 124, 69, 148, 93, 134, 79, },
+{ 7, 172, 48, 213, 3, 168, 45, 210, },
+};
+#elif 1
+// tries to correct a gamma of 1.5
+const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
+{ 0, 143, 18, 200, 2, 156, 25, 215, },
+{ 78, 28, 125, 64, 89, 36, 138, 74, },
+{ 10, 180, 3, 161, 16, 195, 8, 175, },
+{109, 51, 93, 38, 121, 60, 105, 47, },
+{ 1, 152, 23, 210, 0, 147, 20, 205, },
+{ 85, 33, 134, 71, 81, 30, 130, 67, },
+{ 14, 190, 6, 171, 12, 185, 5, 166, },
+{117, 57, 101, 44, 113, 54, 97, 41, },
+};
+#elif 1
+// tries to correct a gamma of 2.0
+const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
+{ 0, 124, 8, 193, 0, 140, 12, 213, },
+{ 55, 14, 104, 42, 66, 19, 119, 52, },
+{ 3, 168, 1, 145, 6, 187, 3, 162, },
+{ 86, 31, 70, 21, 99, 39, 82, 28, },
+{ 0, 134, 11, 206, 0, 129, 9, 200, },
+{ 62, 17, 114, 48, 58, 16, 109, 45, },
+{ 5, 181, 2, 157, 4, 175, 1, 151, },
+{ 95, 36, 78, 26, 90, 34, 74, 24, },
+};
+#else
+// tries to correct a gamma of 2.5
+const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
+{ 0, 107, 3, 187, 0, 125, 6, 212, },
+{ 39, 7, 86, 28, 49, 11, 102, 36, },
+{ 1, 158, 0, 131, 3, 180, 1, 151, },
+{ 68, 19, 52, 12, 81, 25, 64, 17, },
+{ 0, 119, 5, 203, 0, 113, 4, 195, },
+{ 45, 9, 96, 33, 42, 8, 91, 30, },
+{ 2, 172, 1, 144, 2, 165, 0, 137, },
+{ 77, 23, 60, 15, 72, 21, 56, 14, },
+};
+#endif
+
+#ifdef HAVE_MMX
+
+/* hope these constant values are cache line aligned */
+static uint64_t attribute_used __attribute__((aligned(8))) mmx_00ffw = 0x00ff00ff00ff00ffULL;
+static uint64_t attribute_used __attribute__((aligned(8))) mmx_redmask = 0xf8f8f8f8f8f8f8f8ULL;
+static uint64_t attribute_used __attribute__((aligned(8))) mmx_grnmask = 0xfcfcfcfcfcfcfcfcULL;
+
+static uint64_t attribute_used __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFULL;
+static uint64_t attribute_used __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00ULL;
+static uint64_t attribute_used __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000ULL;
+
+// the volatile is required because gcc otherwise optimizes some writes away not knowing that these
+// are read in the asm block
+static volatile uint64_t attribute_used __attribute__((aligned(8))) b5Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) g5Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) g6Dither;
+static volatile uint64_t attribute_used __attribute__((aligned(8))) r5Dither;
+
+static uint64_t __attribute__((aligned(8))) dither4[2]={
+ 0x0103010301030103LL,
+ 0x0200020002000200LL,};
+
+static uint64_t __attribute__((aligned(8))) dither8[2]={
+ 0x0602060206020602LL,
+ 0x0004000400040004LL,};
+
+#undef HAVE_MMX
+
+//MMX versions
+#undef RENAME
+#define HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_3DNOW
+#define RENAME(a) a ## _MMX
+#include "yuv2rgb_template.c"
+
+//MMX2 versions
+#undef RENAME
+#define HAVE_MMX
+#define HAVE_MMX2
+#undef HAVE_3DNOW
+#define RENAME(a) a ## _MMX2
+#include "yuv2rgb_template.c"
+
+#endif /* defined(ARCH_X86) */
+
+const int32_t Inverse_Table_6_9[8][4] = {
+ {117504, 138453, 13954, 34903}, /* no sequence_display_extension */
+ {117504, 138453, 13954, 34903}, /* ITU-R Rec. 709 (1990) */
+ {104597, 132201, 25675, 53279}, /* unspecified */
+ {104597, 132201, 25675, 53279}, /* reserved */
+ {104448, 132798, 24759, 53109}, /* FCC */
+ {104597, 132201, 25675, 53279}, /* ITU-R Rec. 624-4 System B, G */
+ {104597, 132201, 25675, 53279}, /* SMPTE 170M */
+ {117579, 136230, 16907, 35559} /* SMPTE 240M (1987) */
+};
+
+#define RGB(i) \
+ U = pu[i]; \
+ V = pv[i]; \
+ r = c->table_rV[V]; \
+ g = c->table_gU[U] + c->table_gV[V]; \
+ b = c->table_bU[U];
+
+#define DST1(i) \
+ Y = py_1[2*i]; \
+ dst_1[2*i] = r[Y] + g[Y] + b[Y]; \
+ Y = py_1[2*i+1]; \
+ dst_1[2*i+1] = r[Y] + g[Y] + b[Y];
+
+#define DST2(i) \
+ Y = py_2[2*i]; \
+ dst_2[2*i] = r[Y] + g[Y] + b[Y]; \
+ Y = py_2[2*i+1]; \
+ dst_2[2*i+1] = r[Y] + g[Y] + b[Y];
+
+#define DST1RGB(i) \
+ Y = py_1[2*i]; \
+ dst_1[6*i] = r[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = b[Y]; \
+ Y = py_1[2*i+1]; \
+ dst_1[6*i+3] = r[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = b[Y];
+
+#define DST2RGB(i) \
+ Y = py_2[2*i]; \
+ dst_2[6*i] = r[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = b[Y]; \
+ Y = py_2[2*i+1]; \
+ dst_2[6*i+3] = r[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = b[Y];
+
+#define DST1BGR(i) \
+ Y = py_1[2*i]; \
+ dst_1[6*i] = b[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = r[Y]; \
+ Y = py_1[2*i+1]; \
+ dst_1[6*i+3] = b[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = r[Y];
+
+#define DST2BGR(i) \
+ Y = py_2[2*i]; \
+ dst_2[6*i] = b[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = r[Y]; \
+ Y = py_2[2*i+1]; \
+ dst_2[6*i+3] = b[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = r[Y];
+
+#define PROLOG(func_name, dst_type) \
+static int func_name(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, \
+ int srcSliceH, uint8_t* dst[], int dstStride[]){\
+ int y;\
+\
+ if(c->srcFormat == PIX_FMT_YUV422P){\
+ srcStride[1] *= 2;\
+ srcStride[2] *= 2;\
+ }\
+ for(y=0; y<srcSliceH; y+=2){\
+ dst_type *dst_1= (dst_type*)(dst[0] + (y+srcSliceY )*dstStride[0]);\
+ dst_type *dst_2= (dst_type*)(dst[0] + (y+srcSliceY+1)*dstStride[0]);\
+ dst_type *r, *g, *b;\
+ uint8_t *py_1= src[0] + y*srcStride[0];\
+ uint8_t *py_2= py_1 + srcStride[0];\
+ uint8_t *pu= src[1] + (y>>1)*srcStride[1];\
+ uint8_t *pv= src[2] + (y>>1)*srcStride[2];\
+ unsigned int h_size= c->dstW>>3;\
+ while (h_size--) {\
+ int U, V, Y;\
+
+#define EPILOG(dst_delta)\
+ pu += 4;\
+ pv += 4;\
+ py_1 += 8;\
+ py_2 += 8;\
+ dst_1 += dst_delta;\
+ dst_2 += dst_delta;\
+ }\
+ }\
+ return srcSliceH;\
+}
+
+PROLOG(yuv2rgb_c_32, uint32_t)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+
+ RGB(2);
+ DST1(2);
+ DST2(2);
+
+ RGB(3);
+ DST2(3);
+ DST1(3);
+EPILOG(8)
+
+PROLOG(yuv2rgb_c_24_rgb, uint8_t)
+ RGB(0);
+ DST1RGB(0);
+ DST2RGB(0);
+
+ RGB(1);
+ DST2RGB(1);
+ DST1RGB(1);
+
+ RGB(2);
+ DST1RGB(2);
+ DST2RGB(2);
+
+ RGB(3);
+ DST2RGB(3);
+ DST1RGB(3);
+EPILOG(24)
+
+// only trivial mods from yuv2rgb_c_24_rgb
+PROLOG(yuv2rgb_c_24_bgr, uint8_t)
+ RGB(0);
+ DST1BGR(0);
+ DST2BGR(0);
+
+ RGB(1);
+ DST2BGR(1);
+ DST1BGR(1);
+
+ RGB(2);
+ DST1BGR(2);
+ DST2BGR(2);
+
+ RGB(3);
+ DST2BGR(3);
+ DST1BGR(3);
+EPILOG(24)
+
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_16, uint16_t)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+
+ RGB(2);
+ DST1(2);
+ DST2(2);
+
+ RGB(3);
+ DST2(3);
+ DST1(3);
+EPILOG(8)
+
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_8, uint8_t)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+
+ RGB(2);
+ DST1(2);
+ DST2(2);
+
+ RGB(3);
+ DST2(3);
+ DST1(3);
+EPILOG(8)
+
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_8_ordered_dither, uint8_t)
+ const uint8_t *d32= dither_8x8_32[y&7];
+ const uint8_t *d64= dither_8x8_73[y&7];
+#define DST1bpp8(i,o) \
+ Y = py_1[2*i]; \
+ dst_1[2*i] = r[Y+d32[0+o]] + g[Y+d32[0+o]] + b[Y+d64[0+o]]; \
+ Y = py_1[2*i+1]; \
+ dst_1[2*i+1] = r[Y+d32[1+o]] + g[Y+d32[1+o]] + b[Y+d64[1+o]];
+
+#define DST2bpp8(i,o) \
+ Y = py_2[2*i]; \
+ dst_2[2*i] = r[Y+d32[8+o]] + g[Y+d32[8+o]] + b[Y+d64[8+o]]; \
+ Y = py_2[2*i+1]; \
+ dst_2[2*i+1] = r[Y+d32[9+o]] + g[Y+d32[9+o]] + b[Y+d64[9+o]];
+
+
+ RGB(0);
+ DST1bpp8(0,0);
+ DST2bpp8(0,0);
+
+ RGB(1);
+ DST2bpp8(1,2);
+ DST1bpp8(1,2);
+
+ RGB(2);
+ DST1bpp8(2,4);
+ DST2bpp8(2,4);
+
+ RGB(3);
+ DST2bpp8(3,6);
+ DST1bpp8(3,6);
+EPILOG(8)
+
+
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_4, uint8_t)
+ int acc;
+#define DST1_4(i) \
+ Y = py_1[2*i]; \
+ acc = r[Y] + g[Y] + b[Y]; \
+ Y = py_1[2*i+1]; \
+ acc |= (r[Y] + g[Y] + b[Y])<<4;\
+ dst_1[i] = acc;
+
+#define DST2_4(i) \
+ Y = py_2[2*i]; \
+ acc = r[Y] + g[Y] + b[Y]; \
+ Y = py_2[2*i+1]; \
+ acc |= (r[Y] + g[Y] + b[Y])<<4;\
+ dst_2[i] = acc;
+
+ RGB(0);
+ DST1_4(0);
+ DST2_4(0);
+
+ RGB(1);
+ DST2_4(1);
+ DST1_4(1);
+
+ RGB(2);
+ DST1_4(2);
+ DST2_4(2);
+
+ RGB(3);
+ DST2_4(3);
+ DST1_4(3);
+EPILOG(4)
+
+PROLOG(yuv2rgb_c_4_ordered_dither, uint8_t)
+ const uint8_t *d64= dither_8x8_73[y&7];
+ const uint8_t *d128=dither_8x8_220[y&7];
+ int acc;
+
+#define DST1bpp4(i,o) \
+ Y = py_1[2*i]; \
+ acc = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
+ Y = py_1[2*i+1]; \
+ acc |= (r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]])<<4;\
+ dst_1[i]= acc;
+
+#define DST2bpp4(i,o) \
+ Y = py_2[2*i]; \
+ acc = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
+ Y = py_2[2*i+1]; \
+ acc |= (r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]])<<4;\
+ dst_2[i]= acc;
+
+
+ RGB(0);
+ DST1bpp4(0,0);
+ DST2bpp4(0,0);
+
+ RGB(1);
+ DST2bpp4(1,2);
+ DST1bpp4(1,2);
+
+ RGB(2);
+ DST1bpp4(2,4);
+ DST2bpp4(2,4);
+
+ RGB(3);
+ DST2bpp4(3,6);
+ DST1bpp4(3,6);
+EPILOG(4)
+
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+PROLOG(yuv2rgb_c_4b, uint8_t)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+
+ RGB(2);
+ DST1(2);
+ DST2(2);
+
+ RGB(3);
+ DST2(3);
+ DST1(3);
+EPILOG(8)
+
+PROLOG(yuv2rgb_c_4b_ordered_dither, uint8_t)
+ const uint8_t *d64= dither_8x8_73[y&7];
+ const uint8_t *d128=dither_8x8_220[y&7];
+
+#define DST1bpp4b(i,o) \
+ Y = py_1[2*i]; \
+ dst_1[2*i] = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
+ Y = py_1[2*i+1]; \
+ dst_1[2*i+1] = r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]];
+
+#define DST2bpp4b(i,o) \
+ Y = py_2[2*i]; \
+ dst_2[2*i] = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
+ Y = py_2[2*i+1]; \
+ dst_2[2*i+1] = r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]];
+
+
+ RGB(0);
+ DST1bpp4b(0,0);
+ DST2bpp4b(0,0);
+
+ RGB(1);
+ DST2bpp4b(1,2);
+ DST1bpp4b(1,2);
+
+ RGB(2);
+ DST1bpp4b(2,4);
+ DST2bpp4b(2,4);
+
+ RGB(3);
+ DST2bpp4b(3,6);
+ DST1bpp4b(3,6);
+EPILOG(8)
+
+PROLOG(yuv2rgb_c_1_ordered_dither, uint8_t)
+ const uint8_t *d128=dither_8x8_220[y&7];
+ char out_1=0, out_2=0;
+ g= c->table_gU[128] + c->table_gV[128];
+
+#define DST1bpp1(i,o) \
+ Y = py_1[2*i]; \
+ out_1+= out_1 + g[Y+d128[0+o]]; \
+ Y = py_1[2*i+1]; \
+ out_1+= out_1 + g[Y+d128[1+o]];
+
+#define DST2bpp1(i,o) \
+ Y = py_2[2*i]; \
+ out_2+= out_2 + g[Y+d128[8+o]]; \
+ Y = py_2[2*i+1]; \
+ out_2+= out_2 + g[Y+d128[9+o]];
+
+ DST1bpp1(0,0);
+ DST2bpp1(0,0);
+
+ DST2bpp1(1,2);
+ DST1bpp1(1,2);
+
+ DST1bpp1(2,4);
+ DST2bpp1(2,4);
+
+ DST2bpp1(3,6);
+ DST1bpp1(3,6);
+
+ dst_1[0]= out_1;
+ dst_2[0]= out_2;
+EPILOG(1)
+
+SwsFunc yuv2rgb_get_func_ptr (SwsContext *c)
+{
+#if defined(HAVE_MMX2) || defined(HAVE_MMX)
+ if(c->flags & SWS_CPU_CAPS_MMX2){
+ switch(c->dstFormat){
+ case PIX_FMT_RGB32: return yuv420_rgb32_MMX2;
+ case PIX_FMT_BGR24: return yuv420_rgb24_MMX2;
+ case PIX_FMT_BGR565: return yuv420_rgb16_MMX2;
+ case PIX_FMT_BGR555: return yuv420_rgb15_MMX2;
+ }
+ }
+ if(c->flags & SWS_CPU_CAPS_MMX){
+ switch(c->dstFormat){
+ case PIX_FMT_RGB32: return yuv420_rgb32_MMX;
+ case PIX_FMT_BGR24: return yuv420_rgb24_MMX;
+ case PIX_FMT_BGR565: return yuv420_rgb16_MMX;
+ case PIX_FMT_BGR555: return yuv420_rgb15_MMX;
+ }
+ }
+#endif
+#ifdef HAVE_MLIB
+ {
+ SwsFunc t= yuv2rgb_init_mlib(c);
+ if(t) return t;
+ }
+#endif
+#ifdef HAVE_ALTIVEC
+ if (c->flags & SWS_CPU_CAPS_ALTIVEC)
+ {
+ SwsFunc t = yuv2rgb_init_altivec(c);
+ if(t) return t;
+ }
+#endif
+
+ MSG_WARN("No accelerated colorspace conversion found\n");
+
+ switch(c->dstFormat){
+ case PIX_FMT_BGR32:
+ case PIX_FMT_RGB32: return yuv2rgb_c_32;
+ case PIX_FMT_RGB24: return yuv2rgb_c_24_rgb;
+ case PIX_FMT_BGR24: return yuv2rgb_c_24_bgr;
+ case PIX_FMT_RGB565:
+ case PIX_FMT_BGR565:
+ case PIX_FMT_RGB555:
+ case PIX_FMT_BGR555: return yuv2rgb_c_16;
+ case PIX_FMT_RGB8:
+ case PIX_FMT_BGR8: return yuv2rgb_c_8_ordered_dither;
+ case PIX_FMT_RGB4:
+ case PIX_FMT_BGR4: return yuv2rgb_c_4_ordered_dither;
+ case PIX_FMT_RGB4_BYTE:
+ case PIX_FMT_BGR4_BYTE: return yuv2rgb_c_4b_ordered_dither;
+ case PIX_FMT_MONOBLACK: return yuv2rgb_c_1_ordered_dither;
+ default:
+ assert(0);
+ }
+ return NULL;
+}
+
+static int div_round (int dividend, int divisor)
+{
+ if (dividend > 0)
+ return (dividend + (divisor>>1)) / divisor;
+ else
+ return -((-dividend + (divisor>>1)) / divisor);
+}
+
+int yuv2rgb_c_init_tables (SwsContext *c, const int inv_table[4], int fullRange, int brightness, int contrast, int saturation)
+{
+ const int isRgb = isBGR(c->dstFormat);
+ const int bpp = fmt_depth(c->dstFormat);
+ int i;
+ uint8_t table_Y[1024];
+ uint32_t *table_32 = 0;
+ uint16_t *table_16 = 0;
+ uint8_t *table_8 = 0;
+ uint8_t *table_332 = 0;
+ uint8_t *table_121 = 0;
+ uint8_t *table_1 = 0;
+ int entry_size = 0;
+ void *table_r = 0, *table_g = 0, *table_b = 0;
+ void *table_start;
+
+ int64_t crv = inv_table[0];
+ int64_t cbu = inv_table[1];
+ int64_t cgu = -inv_table[2];
+ int64_t cgv = -inv_table[3];
+ int64_t cy = 1<<16;
+ int64_t oy = 0;
+
+//printf("%lld %lld %lld %lld %lld\n", cy, crv, cbu, cgu, cgv);
+ if(!fullRange){
+ cy= (cy*255) / 219;
+ oy= 16<<16;
+ }
+
+ cy = (cy *contrast )>>16;
+ crv= (crv*contrast * saturation)>>32;
+ cbu= (cbu*contrast * saturation)>>32;
+ cgu= (cgu*contrast * saturation)>>32;
+ cgv= (cgv*contrast * saturation)>>32;
+//printf("%lld %lld %lld %lld %lld\n", cy, crv, cbu, cgu, cgv);
+ oy -= 256*brightness;
+
+ for (i = 0; i < 1024; i++) {
+ int j;
+
+ j= (cy*(((i - 384)<<16) - oy) + (1<<31))>>32;
+ j = (j < 0) ? 0 : ((j > 255) ? 255 : j);
+ table_Y[i] = j;
+ }
+
+ switch (bpp) {
+ case 32:
+ table_start= table_32 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint32_t));
+
+ entry_size = sizeof (uint32_t);
+ table_r = table_32 + 197;
+ table_b = table_32 + 197 + 685;
+ table_g = table_32 + 197 + 2*682;
+
+ for (i = -197; i < 256+197; i++)
+ ((uint32_t *)table_r)[i] = table_Y[i+384] << (isRgb ? 16 : 0);
+ for (i = -132; i < 256+132; i++)
+ ((uint32_t *)table_g)[i] = table_Y[i+384] << 8;
+ for (i = -232; i < 256+232; i++)
+ ((uint32_t *)table_b)[i] = table_Y[i+384] << (isRgb ? 0 : 16);
+ break;
+
+ case 24:
+ table_start= table_8 = av_malloc ((256 + 2*232) * sizeof (uint8_t));
+
+ entry_size = sizeof (uint8_t);
+ table_r = table_g = table_b = table_8 + 232;
+
+ for (i = -232; i < 256+232; i++)
+ ((uint8_t * )table_b)[i] = table_Y[i+384];
+ break;
+
+ case 15:
+ case 16:
+ table_start= table_16 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint16_t));
+
+ entry_size = sizeof (uint16_t);
+ table_r = table_16 + 197;
+ table_b = table_16 + 197 + 685;
+ table_g = table_16 + 197 + 2*682;
+
+ for (i = -197; i < 256+197; i++) {
+ int j = table_Y[i+384] >> 3;
+
+ if (isRgb)
+ j <<= ((bpp==16) ? 11 : 10);
+
+ ((uint16_t *)table_r)[i] = j;
+ }
+ for (i = -132; i < 256+132; i++) {
+ int j = table_Y[i+384] >> ((bpp==16) ? 2 : 3);
+
+ ((uint16_t *)table_g)[i] = j << 5;
+ }
+ for (i = -232; i < 256+232; i++) {
+ int j = table_Y[i+384] >> 3;
+
+ if (!isRgb)
+ j <<= ((bpp==16) ? 11 : 10);
+
+ ((uint16_t *)table_b)[i] = j;
+ }
+ break;
+
+ case 8:
+ table_start= table_332 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
+
+ entry_size = sizeof (uint8_t);
+ table_r = table_332 + 197;
+ table_b = table_332 + 197 + 685;
+ table_g = table_332 + 197 + 2*682;
+
+ for (i = -197; i < 256+197; i++) {
+ int j = (table_Y[i+384 - 16] + 18)/36;
+
+ if (isRgb)
+ j <<= 5;
+
+ ((uint8_t *)table_r)[i] = j;
+ }
+ for (i = -132; i < 256+132; i++) {
+ int j = (table_Y[i+384 - 16] + 18)/36;
+
+ if (!isRgb)
+ j <<= 1;
+
+ ((uint8_t *)table_g)[i] = j << 2;
+ }
+ for (i = -232; i < 256+232; i++) {
+ int j = (table_Y[i+384 - 37] + 43)/85;
+
+ if (!isRgb)
+ j <<= 6;
+
+ ((uint8_t *)table_b)[i] = j;
+ }
+ break;
+ case 4:
+ case 4|128:
+ table_start= table_121 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
+
+ entry_size = sizeof (uint8_t);
+ table_r = table_121 + 197;
+ table_b = table_121 + 197 + 685;
+ table_g = table_121 + 197 + 2*682;
+
+ for (i = -197; i < 256+197; i++) {
+ int j = table_Y[i+384 - 110] >> 7;
+
+ if (isRgb)
+ j <<= 3;
+
+ ((uint8_t *)table_r)[i] = j;
+ }
+ for (i = -132; i < 256+132; i++) {
+ int j = (table_Y[i+384 - 37]+ 43)/85;
+
+ ((uint8_t *)table_g)[i] = j << 1;
+ }
+ for (i = -232; i < 256+232; i++) {
+ int j =table_Y[i+384 - 110] >> 7;
+
+ if (!isRgb)
+ j <<= 3;
+
+ ((uint8_t *)table_b)[i] = j;
+ }
+ break;
+
+ case 1:
+ table_start= table_1 = av_malloc (256*2 * sizeof (uint8_t));
+
+ entry_size = sizeof (uint8_t);
+ table_g = table_1;
+ table_r = table_b = NULL;
+
+ for (i = 0; i < 256+256; i++) {
+ int j = table_Y[i + 384 - 110]>>7;
+
+ ((uint8_t *)table_g)[i] = j;
+ }
+ break;
+
+ default:
+ table_start= NULL;
+ MSG_ERR("%ibpp not supported by yuv2rgb\n", bpp);
+ //free mem?
+ return -1;
+ }
+
+ for (i = 0; i < 256; i++) {
+ c->table_rV[i] = table_r + entry_size * div_round (crv * (i-128), 76309);
+ c->table_gU[i] = table_g + entry_size * div_round (cgu * (i-128), 76309);
+ c->table_gV[i] = entry_size * div_round (cgv * (i-128), 76309);
+ c->table_bU[i] = table_b + entry_size * div_round (cbu * (i-128), 76309);
+ }
+
+ av_free(c->yuvTable);
+ c->yuvTable= table_start;
+ return 0;
+}
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb_altivec.c b/contrib/ffmpeg/libswscale/yuv2rgb_altivec.c
new file mode 100644
index 000000000..ca0680a49
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/yuv2rgb_altivec.c
@@ -0,0 +1,963 @@
+/*
+ marc.hoffman@analog.com March 8, 2004
+
+ Altivec Acceleration for Color Space Conversion revision 0.2
+
+ convert I420 YV12 to RGB in various formats,
+ it rejects images that are not in 420 formats
+ it rejects images that don't have widths of multiples of 16
+ it rejects images that don't have heights of multiples of 2
+ reject defers to C simulation codes.
+
+ lots of optimizations to be done here
+
+ 1. need to fix saturation code, I just couldn't get it to fly with packs and adds.
+ so we currently use max min to clip
+
+ 2. the inefficient use of chroma loading needs a bit of brushing up
+
+ 3. analysis of pipeline stalls needs to be done, use shark to identify pipeline stalls
+
+
+ MODIFIED to calculate coeffs from currently selected color space.
+ MODIFIED core to be a macro which you spec the output format.
+ ADDED UYVY conversion which is never called due to some thing in SWSCALE.
+ CORRECTED algorithim selection to be strict on input formats.
+ ADDED runtime detection of altivec.
+
+ ADDED altivec_yuv2packedX vertical scl + RGB converter
+
+ March 27,2004
+ PERFORMANCE ANALYSIS
+
+ The C version use 25% of the processor or ~250Mips for D1 video rawvideo used as test
+ The ALTIVEC version uses 10% of the processor or ~100Mips for D1 video same sequence
+
+ 720*480*30 ~10MPS
+
+ so we have roughly 10clocks per pixel this is too high something has to be wrong.
+
+ OPTIMIZED clip codes to utilize vec_max and vec_packs removing the need for vec_min.
+
+ OPTIMIZED DST OUTPUT cache/dma controls. we are pretty much
+ guaranteed to have the input video frame it was just decompressed so
+ it probably resides in L1 caches. However we are creating the
+ output video stream this needs to use the DSTST instruction to
+ optimize for the cache. We couple this with the fact that we are
+ not going to be visiting the input buffer again so we mark it Least
+ Recently Used. This shaves 25% of the processor cycles off.
+
+ Now MEMCPY is the largest mips consumer in the system, probably due
+ to the inefficient X11 stuff.
+
+ GL libraries seem to be very slow on this machine 1.33Ghz PB running
+ Jaguar, this is not the case for my 1Ghz PB. I thought it might be
+ a versioning issues, however i have libGL.1.2.dylib for both
+ machines. ((We need to figure this out now))
+
+ GL2 libraries work now with patch for RGB32
+
+ NOTE quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor
+
+ Integrated luma prescaling adjustment for saturation/contrast/brightness adjustment.
+*/
+
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <assert.h>
+#include "config.h"
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+
+#undef PROFILE_THE_BEAST
+#undef INC_SCALING
+
+typedef unsigned char ubyte;
+typedef signed char sbyte;
+
+
+/* RGB interleaver, 16 planar pels 8-bit samples per channel in
+ homogeneous vector registers x0,x1,x2 are interleaved with the
+ following technique:
+
+ o0 = vec_mergeh (x0,x1);
+ o1 = vec_perm (o0, x2, perm_rgb_0);
+ o2 = vec_perm (o0, x2, perm_rgb_1);
+ o3 = vec_mergel (x0,x1);
+ o4 = vec_perm (o3,o2,perm_rgb_2);
+ o5 = vec_perm (o3,o2,perm_rgb_3);
+
+ perm_rgb_0: o0(RG).h v1(B) --> o1*
+ 0 1 2 3 4
+ rgbr|gbrg|brgb|rgbr
+ 0010 0100 1001 0010
+ 0102 3145 2673 894A
+
+ perm_rgb_1: o0(RG).h v1(B) --> o2
+ 0 1 2 3 4
+ gbrg|brgb|bbbb|bbbb
+ 0100 1001 1111 1111
+ B5CD 6EF7 89AB CDEF
+
+ perm_rgb_2: o3(RG).l o2(rgbB.l) --> o4*
+ 0 1 2 3 4
+ gbrg|brgb|rgbr|gbrg
+ 1111 1111 0010 0100
+ 89AB CDEF 0182 3945
+
+ perm_rgb_2: o3(RG).l o2(rgbB.l) ---> o5*
+ 0 1 2 3 4
+ brgb|rgbr|gbrg|brgb
+ 1001 0010 0100 1001
+ a67b 89cA BdCD eEFf
+
+*/
+static
+const vector unsigned char
+ perm_rgb_0 = (const vector unsigned char)AVV(0x00,0x01,0x10,0x02,0x03,0x11,0x04,0x05,
+ 0x12,0x06,0x07,0x13,0x08,0x09,0x14,0x0a),
+ perm_rgb_1 = (const vector unsigned char)AVV(0x0b,0x15,0x0c,0x0d,0x16,0x0e,0x0f,0x17,
+ 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f),
+ perm_rgb_2 = (const vector unsigned char)AVV(0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
+ 0x00,0x01,0x18,0x02,0x03,0x19,0x04,0x05),
+ perm_rgb_3 = (const vector unsigned char)AVV(0x1a,0x06,0x07,0x1b,0x08,0x09,0x1c,0x0a,
+ 0x0b,0x1d,0x0c,0x0d,0x1e,0x0e,0x0f,0x1f);
+
+#define vec_merge3(x2,x1,x0,y0,y1,y2) \
+do { \
+ typeof(x0) o0,o2,o3; \
+ o0 = vec_mergeh (x0,x1); \
+ y0 = vec_perm (o0, x2, perm_rgb_0);\
+ o2 = vec_perm (o0, x2, perm_rgb_1);\
+ o3 = vec_mergel (x0,x1); \
+ y1 = vec_perm (o3,o2,perm_rgb_2); \
+ y2 = vec_perm (o3,o2,perm_rgb_3); \
+} while(0)
+
+#define vec_mstbgr24(x0,x1,x2,ptr) \
+do { \
+ typeof(x0) _0,_1,_2; \
+ vec_merge3 (x0,x1,x2,_0,_1,_2); \
+ vec_st (_0, 0, ptr++); \
+ vec_st (_1, 0, ptr++); \
+ vec_st (_2, 0, ptr++); \
+} while (0);
+
+#define vec_mstrgb24(x0,x1,x2,ptr) \
+do { \
+ typeof(x0) _0,_1,_2; \
+ vec_merge3 (x2,x1,x0,_0,_1,_2); \
+ vec_st (_0, 0, ptr++); \
+ vec_st (_1, 0, ptr++); \
+ vec_st (_2, 0, ptr++); \
+} while (0);
+
+/* pack the pixels in rgb0 format
+ msb R
+ lsb 0
+*/
+#define vec_mstrgb32(T,x0,x1,x2,x3,ptr) \
+do { \
+ T _0,_1,_2,_3; \
+ _0 = vec_mergeh (x0,x1); \
+ _1 = vec_mergeh (x2,x3); \
+ _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
+ _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
+ vec_st (_2, 0*16, (T *)ptr); \
+ vec_st (_3, 1*16, (T *)ptr); \
+ _0 = vec_mergel (x0,x1); \
+ _1 = vec_mergel (x2,x3); \
+ _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
+ _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
+ vec_st (_2, 2*16, (T *)ptr); \
+ vec_st (_3, 3*16, (T *)ptr); \
+ ptr += 4; \
+} while (0);
+
+/*
+
+ | 1 0 1.4021 | | Y |
+ | 1 -0.3441 -0.7142 |x| Cb|
+ | 1 1.7718 0 | | Cr|
+
+
+ Y: [-128 127]
+ Cb/Cr : [-128 127]
+
+ typical yuv conversion work on Y: 0-255 this version has been optimized for jpeg decode.
+
+*/
+
+
+
+
+#define vec_unh(x) \
+ (vector signed short) \
+ vec_perm(x,(typeof(x))AVV(0),\
+ (vector unsigned char)AVV(0x10,0x00,0x10,0x01,0x10,0x02,0x10,0x03,\
+ 0x10,0x04,0x10,0x05,0x10,0x06,0x10,0x07))
+#define vec_unl(x) \
+ (vector signed short) \
+ vec_perm(x,(typeof(x))AVV(0),\
+ (vector unsigned char)AVV(0x10,0x08,0x10,0x09,0x10,0x0A,0x10,0x0B,\
+ 0x10,0x0C,0x10,0x0D,0x10,0x0E,0x10,0x0F))
+
+#define vec_clip_s16(x) \
+ vec_max (vec_min (x, (vector signed short)AVV(235,235,235,235,235,235,235,235)),\
+ (vector signed short)AVV(16, 16, 16, 16, 16, 16, 16, 16 ))
+
+#define vec_packclp(x,y) \
+ (vector unsigned char)vec_packs \
+ ((vector unsigned short)vec_max (x,(vector signed short) AVV(0)), \
+ (vector unsigned short)vec_max (y,(vector signed short) AVV(0)))
+
+//#define out_pixels(a,b,c,ptr) vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),a,a,a,ptr)
+
+
+static inline void cvtyuvtoRGB (SwsContext *c,
+ vector signed short Y, vector signed short U, vector signed short V,
+ vector signed short *R, vector signed short *G, vector signed short *B)
+{
+ vector signed short vx,ux,uvx;
+
+ Y = vec_mradds (Y, c->CY, c->OY);
+ U = vec_sub (U,(vector signed short)
+ vec_splat((vector signed short)AVV(128),0));
+ V = vec_sub (V,(vector signed short)
+ vec_splat((vector signed short)AVV(128),0));
+
+ // ux = (CBU*(u<<c->CSHIFT)+0x4000)>>15;
+ ux = vec_sl (U, c->CSHIFT);
+ *B = vec_mradds (ux, c->CBU, Y);
+
+ // vx = (CRV*(v<<c->CSHIFT)+0x4000)>>15;
+ vx = vec_sl (V, c->CSHIFT);
+ *R = vec_mradds (vx, c->CRV, Y);
+
+ // uvx = ((CGU*u) + (CGV*v))>>15;
+ uvx = vec_mradds (U, c->CGU, Y);
+ *G = vec_mradds (V, c->CGV, uvx);
+}
+
+
+/*
+ ------------------------------------------------------------------------------
+ CS converters
+ ------------------------------------------------------------------------------
+*/
+
+
+#define DEFCSP420_CVT(name,out_pixels) \
+static int altivec_##name (SwsContext *c, \
+ unsigned char **in, int *instrides, \
+ int srcSliceY, int srcSliceH, \
+ unsigned char **oplanes, int *outstrides) \
+{ \
+ int w = c->srcW; \
+ int h = srcSliceH; \
+ int i,j; \
+ int instrides_scl[3]; \
+ vector unsigned char y0,y1; \
+ \
+ vector signed char u,v; \
+ \
+ vector signed short Y0,Y1,Y2,Y3; \
+ vector signed short U,V; \
+ vector signed short vx,ux,uvx; \
+ vector signed short vx0,ux0,uvx0; \
+ vector signed short vx1,ux1,uvx1; \
+ vector signed short R0,G0,B0; \
+ vector signed short R1,G1,B1; \
+ vector unsigned char R,G,B; \
+ \
+ vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \
+ vector unsigned char align_perm; \
+ \
+ vector signed short \
+ lCY = c->CY, \
+ lOY = c->OY, \
+ lCRV = c->CRV, \
+ lCBU = c->CBU, \
+ lCGU = c->CGU, \
+ lCGV = c->CGV; \
+ \
+ vector unsigned short lCSHIFT = c->CSHIFT; \
+ \
+ ubyte *y1i = in[0]; \
+ ubyte *y2i = in[0]+instrides[0]; \
+ ubyte *ui = in[1]; \
+ ubyte *vi = in[2]; \
+ \
+ vector unsigned char *oute \
+ = (vector unsigned char *) \
+ (oplanes[0]+srcSliceY*outstrides[0]); \
+ vector unsigned char *outo \
+ = (vector unsigned char *) \
+ (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]); \
+ \
+ \
+ instrides_scl[0] = instrides[0]*2-w; /* the loop moves y{1,2}i by w */ \
+ instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */ \
+ instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */ \
+ \
+ \
+ for (i=0;i<h/2;i++) { \
+ vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0); \
+ vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1); \
+ \
+ for (j=0;j<w/16;j++) { \
+ \
+ y1ivP = (vector unsigned char *)y1i; \
+ y2ivP = (vector unsigned char *)y2i; \
+ uivP = (vector unsigned char *)ui; \
+ vivP = (vector unsigned char *)vi; \
+ \
+ align_perm = vec_lvsl (0, y1i); \
+ y0 = (vector unsigned char)vec_perm (y1ivP[0], y1ivP[1], align_perm);\
+ \
+ align_perm = vec_lvsl (0, y2i); \
+ y1 = (vector unsigned char)vec_perm (y2ivP[0], y2ivP[1], align_perm);\
+ \
+ align_perm = vec_lvsl (0, ui); \
+ u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm); \
+ \
+ align_perm = vec_lvsl (0, vi); \
+ v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm); \
+ \
+ u = (vector signed char) \
+ vec_sub (u,(vector signed char) \
+ vec_splat((vector signed char)AVV(128),0));\
+ v = (vector signed char) \
+ vec_sub (v,(vector signed char) \
+ vec_splat((vector signed char)AVV(128),0));\
+ \
+ U = vec_unpackh (u); \
+ V = vec_unpackh (v); \
+ \
+ \
+ Y0 = vec_unh (y0); \
+ Y1 = vec_unl (y0); \
+ Y2 = vec_unh (y1); \
+ Y3 = vec_unl (y1); \
+ \
+ Y0 = vec_mradds (Y0, lCY, lOY); \
+ Y1 = vec_mradds (Y1, lCY, lOY); \
+ Y2 = vec_mradds (Y2, lCY, lOY); \
+ Y3 = vec_mradds (Y3, lCY, lOY); \
+ \
+ /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */ \
+ ux = vec_sl (U, lCSHIFT); \
+ ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0)); \
+ ux0 = vec_mergeh (ux,ux); \
+ ux1 = vec_mergel (ux,ux); \
+ \
+ /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */ \
+ vx = vec_sl (V, lCSHIFT); \
+ vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0)); \
+ vx0 = vec_mergeh (vx,vx); \
+ vx1 = vec_mergel (vx,vx); \
+ \
+ /* uvx = ((CGU*u) + (CGV*v))>>15 */ \
+ uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0)); \
+ uvx = vec_mradds (V, lCGV, uvx); \
+ uvx0 = vec_mergeh (uvx,uvx); \
+ uvx1 = vec_mergel (uvx,uvx); \
+ \
+ R0 = vec_add (Y0,vx0); \
+ G0 = vec_add (Y0,uvx0); \
+ B0 = vec_add (Y0,ux0); \
+ R1 = vec_add (Y1,vx1); \
+ G1 = vec_add (Y1,uvx1); \
+ B1 = vec_add (Y1,ux1); \
+ \
+ R = vec_packclp (R0,R1); \
+ G = vec_packclp (G0,G1); \
+ B = vec_packclp (B0,B1); \
+ \
+ out_pixels(R,G,B,oute); \
+ \
+ R0 = vec_add (Y2,vx0); \
+ G0 = vec_add (Y2,uvx0); \
+ B0 = vec_add (Y2,ux0); \
+ R1 = vec_add (Y3,vx1); \
+ G1 = vec_add (Y3,uvx1); \
+ B1 = vec_add (Y3,ux1); \
+ R = vec_packclp (R0,R1); \
+ G = vec_packclp (G0,G1); \
+ B = vec_packclp (B0,B1); \
+ \
+ \
+ out_pixels(R,G,B,outo); \
+ \
+ y1i += 16; \
+ y2i += 16; \
+ ui += 8; \
+ vi += 8; \
+ \
+ } \
+ \
+ outo += (outstrides[0])>>4; \
+ oute += (outstrides[0])>>4; \
+ \
+ ui += instrides_scl[1]; \
+ vi += instrides_scl[2]; \
+ y1i += instrides_scl[0]; \
+ y2i += instrides_scl[0]; \
+ } \
+ return srcSliceH; \
+}
+
+
+#define out_abgr(a,b,c,ptr) vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),c,b,a,ptr)
+#define out_bgra(a,b,c,ptr) vec_mstrgb32(typeof(a),c,b,a,((typeof (a))AVV(0)),ptr)
+#define out_rgba(a,b,c,ptr) vec_mstrgb32(typeof(a),a,b,c,((typeof (a))AVV(0)),ptr)
+#define out_argb(a,b,c,ptr) vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),a,b,c,ptr)
+#define out_rgb24(a,b,c,ptr) vec_mstrgb24(a,b,c,ptr)
+#define out_bgr24(a,b,c,ptr) vec_mstbgr24(a,b,c,ptr)
+
+DEFCSP420_CVT (yuv2_abgr, out_abgr)
+#if 1
+DEFCSP420_CVT (yuv2_bgra, out_bgra)
+#else
+static int altivec_yuv2_bgra32 (SwsContext *c,
+ unsigned char **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ unsigned char **oplanes, int *outstrides)
+{
+ int w = c->srcW;
+ int h = srcSliceH;
+ int i,j;
+ int instrides_scl[3];
+ vector unsigned char y0,y1;
+
+ vector signed char u,v;
+
+ vector signed short Y0,Y1,Y2,Y3;
+ vector signed short U,V;
+ vector signed short vx,ux,uvx;
+ vector signed short vx0,ux0,uvx0;
+ vector signed short vx1,ux1,uvx1;
+ vector signed short R0,G0,B0;
+ vector signed short R1,G1,B1;
+ vector unsigned char R,G,B;
+
+ vector unsigned char *uivP, *vivP;
+ vector unsigned char align_perm;
+
+ vector signed short
+ lCY = c->CY,
+ lOY = c->OY,
+ lCRV = c->CRV,
+ lCBU = c->CBU,
+ lCGU = c->CGU,
+ lCGV = c->CGV;
+
+ vector unsigned short lCSHIFT = c->CSHIFT;
+
+ ubyte *y1i = in[0];
+ ubyte *y2i = in[0]+w;
+ ubyte *ui = in[1];
+ ubyte *vi = in[2];
+
+ vector unsigned char *oute
+ = (vector unsigned char *)
+ (oplanes[0]+srcSliceY*outstrides[0]);
+ vector unsigned char *outo
+ = (vector unsigned char *)
+ (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]);
+
+
+ instrides_scl[0] = instrides[0];
+ instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */
+ instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */
+
+
+ for (i=0;i<h/2;i++) {
+ vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0);
+ vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1);
+
+ for (j=0;j<w/16;j++) {
+
+ y0 = vec_ldl (0,y1i);
+ y1 = vec_ldl (0,y2i);
+ uivP = (vector unsigned char *)ui;
+ vivP = (vector unsigned char *)vi;
+
+ align_perm = vec_lvsl (0, ui);
+ u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm);
+
+ align_perm = vec_lvsl (0, vi);
+ v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm);
+ u = (vector signed char)
+ vec_sub (u,(vector signed char)
+ vec_splat((vector signed char)AVV(128),0));
+
+ v = (vector signed char)
+ vec_sub (v, (vector signed char)
+ vec_splat((vector signed char)AVV(128),0));
+
+ U = vec_unpackh (u);
+ V = vec_unpackh (v);
+
+
+ Y0 = vec_unh (y0);
+ Y1 = vec_unl (y0);
+ Y2 = vec_unh (y1);
+ Y3 = vec_unl (y1);
+
+ Y0 = vec_mradds (Y0, lCY, lOY);
+ Y1 = vec_mradds (Y1, lCY, lOY);
+ Y2 = vec_mradds (Y2, lCY, lOY);
+ Y3 = vec_mradds (Y3, lCY, lOY);
+
+ /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */
+ ux = vec_sl (U, lCSHIFT);
+ ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0));
+ ux0 = vec_mergeh (ux,ux);
+ ux1 = vec_mergel (ux,ux);
+
+ /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */
+ vx = vec_sl (V, lCSHIFT);
+ vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0));
+ vx0 = vec_mergeh (vx,vx);
+ vx1 = vec_mergel (vx,vx);
+ /* uvx = ((CGU*u) + (CGV*v))>>15 */
+ uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0));
+ uvx = vec_mradds (V, lCGV, uvx);
+ uvx0 = vec_mergeh (uvx,uvx);
+ uvx1 = vec_mergel (uvx,uvx);
+ R0 = vec_add (Y0,vx0);
+ G0 = vec_add (Y0,uvx0);
+ B0 = vec_add (Y0,ux0);
+ R1 = vec_add (Y1,vx1);
+ G1 = vec_add (Y1,uvx1);
+ B1 = vec_add (Y1,ux1);
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
+
+ out_argb(R,G,B,oute);
+ R0 = vec_add (Y2,vx0);
+ G0 = vec_add (Y2,uvx0);
+ B0 = vec_add (Y2,ux0);
+ R1 = vec_add (Y3,vx1);
+ G1 = vec_add (Y3,uvx1);
+ B1 = vec_add (Y3,ux1);
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
+
+ out_argb(R,G,B,outo);
+ y1i += 16;
+ y2i += 16;
+ ui += 8;
+ vi += 8;
+
+ }
+
+ outo += (outstrides[0])>>4;
+ oute += (outstrides[0])>>4;
+
+ ui += instrides_scl[1];
+ vi += instrides_scl[2];
+ y1i += instrides_scl[0];
+ y2i += instrides_scl[0];
+ }
+ return srcSliceH;
+}
+
+#endif
+
+
+DEFCSP420_CVT (yuv2_rgba, out_rgba)
+DEFCSP420_CVT (yuv2_argb, out_argb)
+DEFCSP420_CVT (yuv2_rgb24, out_rgb24)
+DEFCSP420_CVT (yuv2_bgr24, out_bgr24)
+
+
+// uyvy|uyvy|uyvy|uyvy
+// 0123 4567 89ab cdef
+static
+const vector unsigned char
+ demux_u = (const vector unsigned char)AVV(0x10,0x00,0x10,0x00,
+ 0x10,0x04,0x10,0x04,
+ 0x10,0x08,0x10,0x08,
+ 0x10,0x0c,0x10,0x0c),
+ demux_v = (const vector unsigned char)AVV(0x10,0x02,0x10,0x02,
+ 0x10,0x06,0x10,0x06,
+ 0x10,0x0A,0x10,0x0A,
+ 0x10,0x0E,0x10,0x0E),
+ demux_y = (const vector unsigned char)AVV(0x10,0x01,0x10,0x03,
+ 0x10,0x05,0x10,0x07,
+ 0x10,0x09,0x10,0x0B,
+ 0x10,0x0D,0x10,0x0F);
+
+/*
+ this is so I can play live CCIR raw video
+*/
+static int altivec_uyvy_rgb32 (SwsContext *c,
+ unsigned char **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ unsigned char **oplanes, int *outstrides)
+{
+ int w = c->srcW;
+ int h = srcSliceH;
+ int i,j;
+ vector unsigned char uyvy;
+ vector signed short Y,U,V;
+ vector signed short R0,G0,B0,R1,G1,B1;
+ vector unsigned char R,G,B;
+ vector unsigned char *out;
+ ubyte *img;
+
+ img = in[0];
+ out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]);
+
+ for (i=0;i<h;i++) {
+ for (j=0;j<w/16;j++) {
+ uyvy = vec_ld (0, img);
+ U = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
+
+ V = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
+
+ Y = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
+
+ cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0);
+
+ uyvy = vec_ld (16, img);
+ U = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
+
+ V = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
+
+ Y = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
+
+ cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1);
+
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
+
+ // vec_mstbgr24 (R,G,B, out);
+ out_rgba (R,G,B,out);
+
+ img += 32;
+ }
+ }
+ return srcSliceH;
+}
+
+
+
+/* Ok currently the acceleration routine only supports
+ inputs of widths a multiple of 16
+ and heights a multiple 2
+
+ So we just fall back to the C codes for this.
+*/
+SwsFunc yuv2rgb_init_altivec (SwsContext *c)
+{
+ if (!(c->flags & SWS_CPU_CAPS_ALTIVEC))
+ return NULL;
+
+ /*
+ and this seems not to matter too much I tried a bunch of
+ videos with abnormal widths and mplayer crashes else where.
+ mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
+ boom with X11 bad match.
+
+ */
+ if ((c->srcW & 0xf) != 0) return NULL;
+
+ switch (c->srcFormat) {
+ case PIX_FMT_YUV410P:
+ case PIX_FMT_YUV420P:
+ /*case IMGFMT_CLPL: ??? */
+ case PIX_FMT_GRAY8:
+ case PIX_FMT_NV12:
+ case PIX_FMT_NV21:
+ if ((c->srcH & 0x1) != 0)
+ return NULL;
+
+ switch(c->dstFormat){
+ case PIX_FMT_RGB24:
+ MSG_WARN("ALTIVEC: Color Space RGB24\n");
+ return altivec_yuv2_rgb24;
+ case PIX_FMT_BGR24:
+ MSG_WARN("ALTIVEC: Color Space BGR24\n");
+ return altivec_yuv2_bgr24;
+ case PIX_FMT_ARGB:
+ MSG_WARN("ALTIVEC: Color Space ARGB\n");
+ return altivec_yuv2_argb;
+ case PIX_FMT_ABGR:
+ MSG_WARN("ALTIVEC: Color Space ABGR\n");
+ return altivec_yuv2_abgr;
+ case PIX_FMT_RGBA:
+ MSG_WARN("ALTIVEC: Color Space RGBA\n");
+ return altivec_yuv2_rgba;
+ case PIX_FMT_BGRA:
+ MSG_WARN("ALTIVEC: Color Space BGRA\n");
+ return altivec_yuv2_bgra;
+ default: return NULL;
+ }
+ break;
+
+ case PIX_FMT_UYVY422:
+ switch(c->dstFormat){
+ case PIX_FMT_BGR32:
+ MSG_WARN("ALTIVEC: Color Space UYVY -> RGB32\n");
+ return altivec_uyvy_rgb32;
+ default: return NULL;
+ }
+ break;
+
+ }
+ return NULL;
+}
+
+static uint16_t roundToInt16(int64_t f){
+ int r= (f + (1<<15))>>16;
+ if(r<-0x7FFF) return 0x8000;
+ else if(r> 0x7FFF) return 0x7FFF;
+ else return r;
+}
+
+void yuv2rgb_altivec_init_tables (SwsContext *c, const int inv_table[4],int brightness,int contrast, int saturation)
+{
+ union {
+ signed short tmp[8] __attribute__ ((aligned(16)));
+ vector signed short vec;
+ } buf;
+
+ buf.tmp[0] = ( (0xffffLL) * contrast>>8 )>>9; //cy
+ buf.tmp[1] = -256*brightness; //oy
+ buf.tmp[2] = (inv_table[0]>>3) *(contrast>>16)*(saturation>>16); //crv
+ buf.tmp[3] = (inv_table[1]>>3) *(contrast>>16)*(saturation>>16); //cbu
+ buf.tmp[4] = -((inv_table[2]>>1)*(contrast>>16)*(saturation>>16)); //cgu
+ buf.tmp[5] = -((inv_table[3]>>1)*(contrast>>16)*(saturation>>16)); //cgv
+
+
+ c->CSHIFT = (vector unsigned short)vec_splat_u16(2);
+ c->CY = vec_splat ((vector signed short)buf.vec, 0);
+ c->OY = vec_splat ((vector signed short)buf.vec, 1);
+ c->CRV = vec_splat ((vector signed short)buf.vec, 2);
+ c->CBU = vec_splat ((vector signed short)buf.vec, 3);
+ c->CGU = vec_splat ((vector signed short)buf.vec, 4);
+ c->CGV = vec_splat ((vector signed short)buf.vec, 5);
+#if 0
+{
+int i;
+char *v[6]={"cy","oy","crv","cbu","cgu","cgv"};
+for (i=0; i<6;i++)
+ printf("%s %d ", v[i],buf.tmp[i] );
+ printf("\n");
+}
+#endif
+ return;
+}
+
+
+void
+altivec_yuv2packedX (SwsContext *c,
+ int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, int dstW, int dstY)
+{
+ int i,j;
+ vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
+ vector signed short R0,G0,B0,R1,G1,B1;
+
+ vector unsigned char R,G,B;
+ vector unsigned char *out,*nout;
+
+ vector signed short RND = vec_splat_s16(1<<3);
+ vector unsigned short SCL = vec_splat_u16(4);
+ unsigned long scratch[16] __attribute__ ((aligned (16)));
+
+ vector signed short *YCoeffs, *CCoeffs;
+
+ YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize;
+ CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize;
+
+ out = (vector unsigned char *)dest;
+
+ for(i=0; i<dstW; i+=16){
+ Y0 = RND;
+ Y1 = RND;
+ /* extract 16 coeffs from lumSrc */
+ for(j=0; j<lumFilterSize; j++) {
+ X0 = vec_ld (0, &lumSrc[j][i]);
+ X1 = vec_ld (16, &lumSrc[j][i]);
+ Y0 = vec_mradds (X0, YCoeffs[j], Y0);
+ Y1 = vec_mradds (X1, YCoeffs[j], Y1);
+ }
+
+ U = RND;
+ V = RND;
+ /* extract 8 coeffs from U,V */
+ for(j=0; j<chrFilterSize; j++) {
+ X = vec_ld (0, &chrSrc[j][i/2]);
+ U = vec_mradds (X, CCoeffs[j], U);
+ X = vec_ld (0, &chrSrc[j][i/2+2048]);
+ V = vec_mradds (X, CCoeffs[j], V);
+ }
+
+ /* scale and clip signals */
+ Y0 = vec_sra (Y0, SCL);
+ Y1 = vec_sra (Y1, SCL);
+ U = vec_sra (U, SCL);
+ V = vec_sra (V, SCL);
+
+ Y0 = vec_clip_s16 (Y0);
+ Y1 = vec_clip_s16 (Y1);
+ U = vec_clip_s16 (U);
+ V = vec_clip_s16 (V);
+
+ /* now we have
+ Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
+ U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
+
+ Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
+ U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
+ V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
+ */
+
+ U0 = vec_mergeh (U,U);
+ V0 = vec_mergeh (V,V);
+
+ U1 = vec_mergel (U,U);
+ V1 = vec_mergel (V,V);
+
+ cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
+ cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
+
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
+
+ switch(c->dstFormat) {
+ case PIX_FMT_ABGR: out_abgr (R,G,B,out); break;
+ case PIX_FMT_BGRA: out_bgra (R,G,B,out); break;
+ case PIX_FMT_RGBA: out_rgba (R,G,B,out); break;
+ case PIX_FMT_ARGB: out_argb (R,G,B,out); break;
+ case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break;
+ case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break;
+ default:
+ {
+ /* If this is reached, the caller should have called yuv2packedXinC
+ instead. */
+ static int printed_error_message;
+ if(!printed_error_message) {
+ MSG_ERR("altivec_yuv2packedX doesn't support %s output\n",
+ sws_format_name(c->dstFormat));
+ printed_error_message=1;
+ }
+ return;
+ }
+ }
+ }
+
+ if (i < dstW) {
+ i -= 16;
+
+ Y0 = RND;
+ Y1 = RND;
+ /* extract 16 coeffs from lumSrc */
+ for(j=0; j<lumFilterSize; j++) {
+ X0 = vec_ld (0, &lumSrc[j][i]);
+ X1 = vec_ld (16, &lumSrc[j][i]);
+ Y0 = vec_mradds (X0, YCoeffs[j], Y0);
+ Y1 = vec_mradds (X1, YCoeffs[j], Y1);
+ }
+
+ U = RND;
+ V = RND;
+ /* extract 8 coeffs from U,V */
+ for(j=0; j<chrFilterSize; j++) {
+ X = vec_ld (0, &chrSrc[j][i/2]);
+ U = vec_mradds (X, CCoeffs[j], U);
+ X = vec_ld (0, &chrSrc[j][i/2+2048]);
+ V = vec_mradds (X, CCoeffs[j], V);
+ }
+
+ /* scale and clip signals */
+ Y0 = vec_sra (Y0, SCL);
+ Y1 = vec_sra (Y1, SCL);
+ U = vec_sra (U, SCL);
+ V = vec_sra (V, SCL);
+
+ Y0 = vec_clip_s16 (Y0);
+ Y1 = vec_clip_s16 (Y1);
+ U = vec_clip_s16 (U);
+ V = vec_clip_s16 (V);
+
+ /* now we have
+ Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
+ U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
+
+ Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
+ U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
+ V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
+ */
+
+ U0 = vec_mergeh (U,U);
+ V0 = vec_mergeh (V,V);
+
+ U1 = vec_mergel (U,U);
+ V1 = vec_mergel (V,V);
+
+ cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
+ cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
+
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
+
+ nout = (vector unsigned char *)scratch;
+ switch(c->dstFormat) {
+ case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break;
+ case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break;
+ case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break;
+ case PIX_FMT_ARGB: out_argb (R,G,B,nout); break;
+ case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break;
+ case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break;
+ default:
+ /* Unreachable, I think. */
+ MSG_ERR("altivec_yuv2packedX doesn't support %s output\n",
+ sws_format_name(c->dstFormat));
+ return;
+ }
+
+ memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4);
+ }
+
+}
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb_mlib.c b/contrib/ffmpeg/libswscale/yuv2rgb_mlib.c
new file mode 100644
index 000000000..824ee39d1
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/yuv2rgb_mlib.c
@@ -0,0 +1,87 @@
+/*
+ * yuv2rgb_mlib.c, Software YUV to RGB coverter using mediaLib
+ *
+ * Copyright (C) 2000, Håkan Hjort <d95hjort@dtek.chalmers.se>
+ * All Rights Reserved.
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video decoder
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Make; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <mlib_types.h>
+#include <mlib_status.h>
+#include <mlib_sys.h>
+#include <mlib_video.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#include "swscale.h"
+
+static int mlib_YUV2ARGB420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ if(c->srcFormat == PIX_FMT_YUV422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ assert(srcStride[1] == srcStride[2]);
+
+ mlib_VideoColorYUV2ARGB420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW,
+ srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
+ return srcSliceH;
+}
+
+static int mlib_YUV2ABGR420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ if(c->srcFormat == PIX_FMT_YUV422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ assert(srcStride[1] == srcStride[2]);
+
+ mlib_VideoColorYUV2ABGR420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW,
+ srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
+ return srcSliceH;
+}
+
+static int mlib_YUV2RGB420_24(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ if(c->srcFormat == PIX_FMT_YUV422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ assert(srcStride[1] == srcStride[2]);
+
+ mlib_VideoColorYUV2RGB420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW,
+ srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
+ return srcSliceH;
+}
+
+
+SwsFunc yuv2rgb_init_mlib(SwsContext *c)
+{
+ switch(c->dstFormat){
+ case PIX_FMT_RGB24: return mlib_YUV2RGB420_24;
+ case PIX_FMT_BGR32: return mlib_YUV2ARGB420_32;
+ case PIX_FMT_RGB32: return mlib_YUV2ABGR420_32;
+ default: return NULL;
+ }
+}
+
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb_template.c b/contrib/ffmpeg/libswscale/yuv2rgb_template.c
new file mode 100644
index 000000000..fd222350e
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/yuv2rgb_template.c
@@ -0,0 +1,540 @@
+
+/*
+ * yuv2rgb_mmx.c, Software YUV to RGB coverter with Intel MMX "technology"
+ *
+ * Copyright (C) 2000, Silicon Integrated System Corp.
+ * All Rights Reserved.
+ *
+ * Author: Olie Lho <ollie@sis.com.tw>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video decoder
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Make; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * 15,24 bpp and dithering from Michael Niedermayer (michaelni@gmx.at)
+ * MMX/MMX2 Template stuff from Michael Niedermayer (needed for fast movntq support)
+ * context / deglobalize stuff by Michael Niedermayer
+ */
+
+#undef MOVNTQ
+#undef EMMS
+#undef SFENCE
+
+#ifdef HAVE_3DNOW
+/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
+#define EMMS "femms"
+#else
+#define EMMS "emms"
+#endif
+
+#ifdef HAVE_MMX2
+#define MOVNTQ "movntq"
+#define SFENCE "sfence"
+#else
+#define MOVNTQ "movq"
+#define SFENCE "/nop"
+#endif
+
+#define YUV2RGB \
+ /* Do the multiply part of the conversion for even and odd pixels,
+ register usage:
+ mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+ mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+ mm6 -> Y even, mm7 -> Y odd */\
+ /* convert the chroma part */\
+ "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+ "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+\
+ "psllw $3, %%mm0;" /* Promote precision */ \
+ "psllw $3, %%mm1;" /* Promote precision */ \
+\
+ "psubsw "U_OFFSET"(%4), %%mm0;" /* Cb -= 128 */ \
+ "psubsw "V_OFFSET"(%4), %%mm1;" /* Cr -= 128 */ \
+\
+ "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+ "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+\
+ "pmulhw "UG_COEFF"(%4), %%mm2;" /* Mul Cb with green coeff -> Cb green */ \
+ "pmulhw "VG_COEFF"(%4), %%mm3;" /* Mul Cr with green coeff -> Cr green */ \
+\
+ "pmulhw "UB_COEFF"(%4), %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */\
+ "pmulhw "VR_COEFF"(%4), %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */\
+\
+ "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */\
+\
+ /* convert the luma part */\
+ "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
+ "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\
+\
+ "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\
+\
+ "psllw $3, %%mm6;" /* Promote precision */\
+ "psllw $3, %%mm7;" /* Promote precision */\
+\
+ "psubw "Y_OFFSET"(%4), %%mm6;" /* Y -= 16 */\
+ "psubw "Y_OFFSET"(%4), %%mm7;" /* Y -= 16 */\
+\
+ "pmulhw "Y_COEFF"(%4), %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */\
+ "pmulhw "Y_COEFF"(%4), %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */\
+\
+ /* Do the addition part of the conversion for even and odd pixels,
+ register usage:
+ mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+ mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+ mm6 -> Y even, mm7 -> Y odd */\
+ "movq %%mm0, %%mm3;" /* Copy Cblue */\
+ "movq %%mm1, %%mm4;" /* Copy Cred */\
+ "movq %%mm2, %%mm5;" /* Copy Cgreen */\
+\
+ "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */\
+ "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */\
+\
+ "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */\
+ "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */\
+\
+ "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */\
+ "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */\
+\
+ /* Limit RGB even to 0..255 */\
+ "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */\
+ "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */\
+ "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */\
+\
+ /* Limit RGB odd to 0..255 */\
+ "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */\
+ "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */\
+ "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */\
+\
+ /* Interleave RGB even and odd */\
+ "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */\
+ "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */\
+ "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */\
+
+
+static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, h_size;
+
+ if(c->srcFormat == PIX_FMT_YUV422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ h_size= (c->dstW+7)&~7;
+ if(h_size*2 > dstStride[0]) h_size-=8;
+
+ __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
+//printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
+//srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
+ for (y= 0; y<srcSliceH; y++ ) {
+ uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *_py = src[0] + y*srcStride[0];
+ uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
+ long index= -h_size/2;
+
+ b5Dither= dither8[y&1];
+ g6Dither= dither4[y&1];
+ g5Dither= dither8[y&1];
+ r5Dither= dither8[(y+1)&1];
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+// ".balign 16 \n\t"
+ "1: \n\t"
+/* no speed diference on my p3@500 with prefetch,
+ * if it is faster for anyone with -benchmark then tell me
+ PREFETCH" 64(%0) \n\t"
+ PREFETCH" 64(%1) \n\t"
+ PREFETCH" 64(%2) \n\t"
+*/
+YUV2RGB
+
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm0;"
+ "paddusb "MANGLE(g6Dither)", %%mm2;"
+ "paddusb "MANGLE(r5Dither)", %%mm1;"
+#endif
+ /* mask unneeded bits off */
+ "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+ "pand "MANGLE(mmx_grnmask)", %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+ "psrlw $3,%%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+
+ "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+ "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
+
+ "add $16, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r" (index), "+r" (_image)
+ : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
+ );
+ }
+
+ __asm__ __volatile__ (EMMS);
+
+ return srcSliceH;
+}
+
+static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, h_size;
+
+ if(c->srcFormat == PIX_FMT_YUV422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ h_size= (c->dstW+7)&~7;
+ if(h_size*2 > dstStride[0]) h_size-=8;
+
+ __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
+//printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
+//srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
+ for (y= 0; y<srcSliceH; y++ ) {
+ uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *_py = src[0] + y*srcStride[0];
+ uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
+ long index= -h_size/2;
+
+ b5Dither= dither8[y&1];
+ g6Dither= dither4[y&1];
+ g5Dither= dither8[y&1];
+ r5Dither= dither8[(y+1)&1];
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+// ".balign 16 \n\t"
+ "1: \n\t"
+YUV2RGB
+
+#ifdef DITHER1XBPP
+ "paddusb "MANGLE(b5Dither)", %%mm0 \n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm1 \n\t"
+#endif
+
+ /* mask unneeded bits off */
+ "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm2;" /* g7g6g5g4 g3_0_0_0 g7g6g5g4 g3_0_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+ "psrlw $3,%%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+ "psrlw $1,%%mm1;" /* 0_r7r6r5 r4r3_0_0 0_r7r6r5 r4r3_0_0 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+
+ "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+ "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3_0_0_0 */
+ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $2, %%mm2;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+ "por %%mm2, %%mm0;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 0_g7g6g5 g4g3_0_0 */
+ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $2, %%mm7;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "por %%mm7, %%mm5;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
+
+ "add $16, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+ : "+r" (index), "+r" (_image)
+ : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
+ );
+ }
+
+ __asm__ __volatile__ (EMMS);
+ return srcSliceH;
+}
+
+static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, h_size;
+
+ if(c->srcFormat == PIX_FMT_YUV422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ h_size= (c->dstW+7)&~7;
+ if(h_size*3 > dstStride[0]) h_size-=8;
+
+ __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
+
+ for (y= 0; y<srcSliceH; y++ ) {
+ uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *_py = src[0] + y*srcStride[0];
+ uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
+ long index= -h_size/2;
+
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+// ".balign 16 \n\t"
+ "1: \n\t"
+YUV2RGB
+ /* mm0=B, %%mm2=G, %%mm1=R */
+#ifdef HAVE_MMX2
+ "movq "MANGLE(M24A)", %%mm4 \n\t"
+ "movq "MANGLE(M24C)", %%mm7 \n\t"
+ "pshufw $0x50, %%mm0, %%mm5 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */
+ "pshufw $0x50, %%mm2, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */
+ "pshufw $0x00, %%mm1, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */
+
+ "pand %%mm4, %%mm5 \n\t" /* B2 B1 B0 */
+ "pand %%mm4, %%mm3 \n\t" /* G2 G1 G0 */
+ "pand %%mm7, %%mm6 \n\t" /* R1 R0 */
+
+ "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */
+ "por %%mm5, %%mm6 \n\t"
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, (%1) \n\t"
+
+ "psrlq $8, %%mm2 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */
+ "pshufw $0xA5, %%mm0, %%mm5 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */
+ "pshufw $0x55, %%mm2, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */
+ "pshufw $0xA5, %%mm1, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */
+
+ "pand "MANGLE(M24B)", %%mm5 \n\t" /* B5 B4 B3 */
+ "pand %%mm7, %%mm3 \n\t" /* G4 G3 */
+ "pand %%mm4, %%mm6 \n\t" /* R4 R3 R2 */
+
+ "por %%mm5, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, 8(%1) \n\t"
+
+ "pshufw $0xFF, %%mm0, %%mm5 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */
+ "pshufw $0xFA, %%mm2, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */
+ "pshufw $0xFA, %%mm1, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "pand %%mm7, %%mm5 \n\t" /* B7 B6 */
+ "pand %%mm4, %%mm3 \n\t" /* G7 G6 G5 */
+ "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+\
+ "por %%mm5, %%mm3 \n\t"
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, 16(%1) \n\t"
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ "pxor %%mm4, %%mm4 \n\t"
+
+#else
+
+ "pxor %%mm4, %%mm4 \n\t"
+ "movq %%mm0, %%mm5 \n\t" /* B */
+ "movq %%mm1, %%mm6 \n\t" /* R */
+ "punpcklbw %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */
+ "punpcklbw %%mm4, %%mm1 \n\t" /* 0R0R0R0R 0 */
+ "punpckhbw %%mm2, %%mm5 \n\t" /* GBGBGBGB 2 */
+ "punpckhbw %%mm4, %%mm6 \n\t" /* 0R0R0R0R 2 */
+ "movq %%mm0, %%mm7 \n\t" /* GBGBGBGB 0 */
+ "movq %%mm5, %%mm3 \n\t" /* GBGBGBGB 2 */
+ "punpcklwd %%mm1, %%mm7 \n\t" /* 0RGB0RGB 0 */
+ "punpckhwd %%mm1, %%mm0 \n\t" /* 0RGB0RGB 1 */
+ "punpcklwd %%mm6, %%mm5 \n\t" /* 0RGB0RGB 2 */
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */
+
+ "movq %%mm7, %%mm2 \n\t" /* 0RGB0RGB 0 */
+ "movq %%mm0, %%mm6 \n\t" /* 0RGB0RGB 1 */
+ "movq %%mm5, %%mm1 \n\t" /* 0RGB0RGB 2 */
+ "movq %%mm3, %%mm4 \n\t" /* 0RGB0RGB 3 */
+
+ "psllq $40, %%mm7 \n\t" /* RGB00000 0 */
+ "psllq $40, %%mm0 \n\t" /* RGB00000 1 */
+ "psllq $40, %%mm5 \n\t" /* RGB00000 2 */
+ "psllq $40, %%mm3 \n\t" /* RGB00000 3 */
+
+ "punpckhdq %%mm2, %%mm7 \n\t" /* 0RGBRGB0 0 */
+ "punpckhdq %%mm6, %%mm0 \n\t" /* 0RGBRGB0 1 */
+ "punpckhdq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */
+ "punpckhdq %%mm4, %%mm3 \n\t" /* 0RGBRGB0 3 */
+
+ "psrlq $8, %%mm7 \n\t" /* 00RGBRGB 0 */
+ "movq %%mm0, %%mm6 \n\t" /* 0RGBRGB0 1 */
+ "psllq $40, %%mm0 \n\t" /* GB000000 1 */
+ "por %%mm0, %%mm7 \n\t" /* GBRGBRGB 0 */
+ MOVNTQ" %%mm7, (%1) \n\t"
+
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */
+ "movq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */
+ "psllq $24, %%mm5 \n\t" /* BRGB0000 2 */
+ "por %%mm5, %%mm6 \n\t" /* BRGBRGBR 1 */
+ MOVNTQ" %%mm6, 8(%1) \n\t"
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+
+ "psrlq $40, %%mm1 \n\t" /* 000000RG 2 */
+ "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */
+ "por %%mm3, %%mm1 \n\t" /* RGBRGBRG 2 */
+ MOVNTQ" %%mm1, 16(%1) \n\t"
+
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "pxor %%mm4, %%mm4 \n\t"
+#endif
+
+ "add $24, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r" (index), "+r" (_image)
+ : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
+ );
+ }
+
+ __asm__ __volatile__ (EMMS);
+ return srcSliceH;
+}
+
+static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, h_size;
+
+ if(c->srcFormat == PIX_FMT_YUV422P){
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
+ }
+
+ h_size= (c->dstW+7)&~7;
+ if(h_size*4 > dstStride[0]) h_size-=8;
+
+ __asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
+
+ for (y= 0; y<srcSliceH; y++ ) {
+ uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *_py = src[0] + y*srcStride[0];
+ uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
+ long index= -h_size/2;
+
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+// ".balign 16 \n\t"
+ "1: \n\t"
+YUV2RGB
+ /* convert RGB plane to RGB packed format,
+ mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
+ mm4 -> GB, mm5 -> AR pixel 4-7,
+ mm6 -> GB, mm7 -> AR pixel 0-3 */
+ "pxor %%mm3, %%mm3;" /* zero mm3 */
+
+ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "movq %%mm1, %%mm7;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
+
+ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "movq %%mm1, %%mm5;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
+
+ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
+ "punpcklbw %%mm3, %%mm7;" /* 00 R3 00 R2 00 R1 00 R0 */
+
+ "punpcklwd %%mm7, %%mm6;" /* 00 R1 B1 G1 00 R0 B0 G0 */
+ MOVNTQ " %%mm6, (%1);" /* Store ARGB1 ARGB0 */
+
+ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
+
+ "punpckhwd %%mm7, %%mm6;" /* 00 R3 G3 B3 00 R2 B3 G2 */
+ MOVNTQ " %%mm6, 8 (%1);" /* Store ARGB3 ARGB2 */
+
+ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
+ "punpckhbw %%mm3, %%mm5;" /* 00 R7 00 R6 00 R5 00 R4 */
+
+ "punpcklwd %%mm5, %%mm4;" /* 00 R5 B5 G5 00 R4 B4 G4 */
+ MOVNTQ " %%mm4, 16 (%1);" /* Store ARGB5 ARGB4 */
+
+ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
+
+ "punpckhwd %%mm5, %%mm4;" /* 00 R7 G7 B7 00 R6 B6 G6 */
+ MOVNTQ " %%mm4, 24 (%1);" /* Store ARGB7 ARGB6 */
+
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+
+ "add $32, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r" (index), "+r" (_image)
+ : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
+ );
+ }
+
+ __asm__ __volatile__ (EMMS);
+ return srcSliceH;
+}
diff --git a/contrib/ffmpeg/makefile.xine.in b/contrib/ffmpeg/makefile.xine.in
new file mode 100644
index 000000000..3515c2aaa
--- /dev/null
+++ b/contrib/ffmpeg/makefile.xine.in
@@ -0,0 +1,30 @@
+srcdir = @srcdir@
+CC = @CC@
+CFLAGS = @CFLAGS@
+LDFLAGS = @LDFLAGS@
+
+config.mak: $(srcdir)/configure makefile.xine
+ $(srcdir)/configure \
+ --disable-shared --enable-static \
+ --disable-encoders \
+ --disable-demuxers \
+ --disable-muxers \
+ --disable-strip \
+ --make="$(MAKE)" \
+ --cc="$(CC)" \
+ --extra-cflags="$(CFLAGS) -fPIC -DPIC" \
+ --extra-ldflags="$(LDFLAGS)" \
+ --enable-gpl \
+ --enable-pthreads \
+ --disable-ffmpeg \
+ --disable-ffserver \
+ --disable-ffplay
+
+libavutil/libavutil.a: config.mak
+ $(MAKE) -C libavutil libavutil.a
+
+libavcodec/libavcodec.a: config.mak
+ $(MAKE) -C libavcodec libavcodec.a
+
+libpostproc/libpostproc.a: config.mak
+ $(MAKE) -C libpostproc libpostproc.a
diff --git a/contrib/ffmpeg/output_example.c b/contrib/ffmpeg/output_example.c
new file mode 100644
index 000000000..7f75b76f2
--- /dev/null
+++ b/contrib/ffmpeg/output_example.c
@@ -0,0 +1,546 @@
+/*
+ * Libavformat API example: Output a media file in any supported
+ * libavformat format. The default codecs are used.
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+
+#ifndef M_PI
+#define M_PI 3.1415926535897931
+#endif
+
+#include "avformat.h"
+#include "swscale.h"
+
+/* 5 seconds stream duration */
+#define STREAM_DURATION 5.0
+#define STREAM_FRAME_RATE 25 /* 25 images/s */
+#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
+#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
+
+static int sws_flags = SWS_BICUBIC;
+
+/**************************************************************/
+/* audio output */
+
+float t, tincr, tincr2;
+int16_t *samples;
+uint8_t *audio_outbuf;
+int audio_outbuf_size;
+int audio_input_frame_size;
+
+/*
+ * add an audio output stream
+ */
+static AVStream *add_audio_stream(AVFormatContext *oc, int codec_id)
+{
+ AVCodecContext *c;
+ AVStream *st;
+
+ st = av_new_stream(oc, 1);
+ if (!st) {
+ fprintf(stderr, "Could not alloc stream\n");
+ exit(1);
+ }
+
+ c = st->codec;
+ c->codec_id = codec_id;
+ c->codec_type = CODEC_TYPE_AUDIO;
+
+ /* put sample parameters */
+ c->bit_rate = 64000;
+ c->sample_rate = 44100;
+ c->channels = 2;
+ return st;
+}
+
+static void open_audio(AVFormatContext *oc, AVStream *st)
+{
+ AVCodecContext *c;
+ AVCodec *codec;
+
+ c = st->codec;
+
+ /* find the audio encoder */
+ codec = avcodec_find_encoder(c->codec_id);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ /* open it */
+ if (avcodec_open(c, codec) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ /* init signal generator */
+ t = 0;
+ tincr = 2 * M_PI * 110.0 / c->sample_rate;
+ /* increment frequency by 110 Hz per second */
+ tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
+
+ audio_outbuf_size = 10000;
+ audio_outbuf = av_malloc(audio_outbuf_size);
+
+ /* ugly hack for PCM codecs (will be removed ASAP with new PCM
+ support to compute the input frame size in samples */
+ if (c->frame_size <= 1) {
+ audio_input_frame_size = audio_outbuf_size / c->channels;
+ switch(st->codec->codec_id) {
+ case CODEC_ID_PCM_S16LE:
+ case CODEC_ID_PCM_S16BE:
+ case CODEC_ID_PCM_U16LE:
+ case CODEC_ID_PCM_U16BE:
+ audio_input_frame_size >>= 1;
+ break;
+ default:
+ break;
+ }
+ } else {
+ audio_input_frame_size = c->frame_size;
+ }
+ samples = av_malloc(audio_input_frame_size * 2 * c->channels);
+}
+
+/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
+ 'nb_channels' channels */
+static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
+{
+ int j, i, v;
+ int16_t *q;
+
+ q = samples;
+ for(j=0;j<frame_size;j++) {
+ v = (int)(sin(t) * 10000);
+ for(i = 0; i < nb_channels; i++)
+ *q++ = v;
+ t += tincr;
+ tincr += tincr2;
+ }
+}
+
+static void write_audio_frame(AVFormatContext *oc, AVStream *st)
+{
+ AVCodecContext *c;
+ AVPacket pkt;
+ av_init_packet(&pkt);
+
+ c = st->codec;
+
+ get_audio_frame(samples, audio_input_frame_size, c->channels);
+
+ pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
+
+ pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
+ pkt.flags |= PKT_FLAG_KEY;
+ pkt.stream_index= st->index;
+ pkt.data= audio_outbuf;
+
+ /* write the compressed frame in the media file */
+ if (av_write_frame(oc, &pkt) != 0) {
+ fprintf(stderr, "Error while writing audio frame\n");
+ exit(1);
+ }
+}
+
+static void close_audio(AVFormatContext *oc, AVStream *st)
+{
+ avcodec_close(st->codec);
+
+ av_free(samples);
+ av_free(audio_outbuf);
+}
+
+/**************************************************************/
+/* video output */
+
+AVFrame *picture, *tmp_picture;
+uint8_t *video_outbuf;
+int frame_count, video_outbuf_size;
+
+/* add a video output stream */
+static AVStream *add_video_stream(AVFormatContext *oc, int codec_id)
+{
+ AVCodecContext *c;
+ AVStream *st;
+
+ st = av_new_stream(oc, 0);
+ if (!st) {
+ fprintf(stderr, "Could not alloc stream\n");
+ exit(1);
+ }
+
+ c = st->codec;
+ c->codec_id = codec_id;
+ c->codec_type = CODEC_TYPE_VIDEO;
+
+ /* put sample parameters */
+ c->bit_rate = 400000;
+ /* resolution must be a multiple of two */
+ c->width = 352;
+ c->height = 288;
+ /* time base: this is the fundamental unit of time (in seconds) in terms
+ of which frame timestamps are represented. for fixed-fps content,
+ timebase should be 1/framerate and timestamp increments should be
+ identically 1. */
+ c->time_base.den = STREAM_FRAME_RATE;
+ c->time_base.num = 1;
+ c->gop_size = 12; /* emit one intra frame every twelve frames at most */
+ c->pix_fmt = STREAM_PIX_FMT;
+ if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
+ /* just for testing, we also add B frames */
+ c->max_b_frames = 2;
+ }
+ if (c->codec_id == CODEC_ID_MPEG1VIDEO){
+ /* needed to avoid using macroblocks in which some coeffs overflow
+ this doesnt happen with normal video, it just happens here as the
+ motion of the chroma plane doesnt match the luma plane */
+ c->mb_decision=2;
+ }
+ // some formats want stream headers to be seperate
+ if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
+ c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+
+ return st;
+}
+
+static AVFrame *alloc_picture(int pix_fmt, int width, int height)
+{
+ AVFrame *picture;
+ uint8_t *picture_buf;
+ int size;
+
+ picture = avcodec_alloc_frame();
+ if (!picture)
+ return NULL;
+ size = avpicture_get_size(pix_fmt, width, height);
+ picture_buf = av_malloc(size);
+ if (!picture_buf) {
+ av_free(picture);
+ return NULL;
+ }
+ avpicture_fill((AVPicture *)picture, picture_buf,
+ pix_fmt, width, height);
+ return picture;
+}
+
+static void open_video(AVFormatContext *oc, AVStream *st)
+{
+ AVCodec *codec;
+ AVCodecContext *c;
+
+ c = st->codec;
+
+ /* find the video encoder */
+ codec = avcodec_find_encoder(c->codec_id);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ /* open the codec */
+ if (avcodec_open(c, codec) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ video_outbuf = NULL;
+ if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
+ /* allocate output buffer */
+ /* XXX: API change will be done */
+ /* buffers passed into lav* can be allocated any way you prefer,
+ as long as they're aligned enough for the architecture, and
+ they're freed appropriately (such as using av_free for buffers
+ allocated with av_malloc) */
+ video_outbuf_size = 200000;
+ video_outbuf = av_malloc(video_outbuf_size);
+ }
+
+ /* allocate the encoded raw picture */
+ picture = alloc_picture(c->pix_fmt, c->width, c->height);
+ if (!picture) {
+ fprintf(stderr, "Could not allocate picture\n");
+ exit(1);
+ }
+
+ /* if the output format is not YUV420P, then a temporary YUV420P
+ picture is needed too. It is then converted to the required
+ output format */
+ tmp_picture = NULL;
+ if (c->pix_fmt != PIX_FMT_YUV420P) {
+ tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
+ if (!tmp_picture) {
+ fprintf(stderr, "Could not allocate temporary picture\n");
+ exit(1);
+ }
+ }
+}
+
+/* prepare a dummy image */
+static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
+{
+ int x, y, i;
+
+ i = frame_index;
+
+ /* Y */
+ for(y=0;y<height;y++) {
+ for(x=0;x<width;x++) {
+ pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
+ }
+ }
+
+ /* Cb and Cr */
+ for(y=0;y<height/2;y++) {
+ for(x=0;x<width/2;x++) {
+ pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
+ pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
+ }
+ }
+}
+
+static void write_video_frame(AVFormatContext *oc, AVStream *st)
+{
+ int out_size, ret;
+ AVCodecContext *c;
+ static struct SwsContext *img_convert_ctx;
+
+ c = st->codec;
+
+ if (frame_count >= STREAM_NB_FRAMES) {
+ /* no more frame to compress. The codec has a latency of a few
+ frames if using B frames, so we get the last frames by
+ passing the same picture again */
+ } else {
+ if (c->pix_fmt != PIX_FMT_YUV420P) {
+ /* as we only generate a YUV420P picture, we must convert it
+ to the codec pixel format if needed */
+ if (img_convert_ctx == NULL) {
+ img_convert_ctx = sws_getContext(c->width, c->height,
+ PIX_FMT_YUV420P,
+ c->width, c->height,
+ c->pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (img_convert_ctx == NULL) {
+ fprintf(stderr, "Cannot initialize the conversion context\n");
+ exit(1);
+ }
+ }
+ fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
+ sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
+ 0, c->height, picture->data, picture->linesize);
+ } else {
+ fill_yuv_image(picture, frame_count, c->width, c->height);
+ }
+ }
+
+
+ if (oc->oformat->flags & AVFMT_RAWPICTURE) {
+ /* raw video case. The API will change slightly in the near
+ futur for that */
+ AVPacket pkt;
+ av_init_packet(&pkt);
+
+ pkt.flags |= PKT_FLAG_KEY;
+ pkt.stream_index= st->index;
+ pkt.data= (uint8_t *)picture;
+ pkt.size= sizeof(AVPicture);
+
+ ret = av_write_frame(oc, &pkt);
+ } else {
+ /* encode the image */
+ out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
+ /* if zero size, it means the image was buffered */
+ if (out_size > 0) {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+
+ pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
+ if(c->coded_frame->key_frame)
+ pkt.flags |= PKT_FLAG_KEY;
+ pkt.stream_index= st->index;
+ pkt.data= video_outbuf;
+ pkt.size= out_size;
+
+ /* write the compressed frame in the media file */
+ ret = av_write_frame(oc, &pkt);
+ } else {
+ ret = 0;
+ }
+ }
+ if (ret != 0) {
+ fprintf(stderr, "Error while writing video frame\n");
+ exit(1);
+ }
+ frame_count++;
+}
+
+static void close_video(AVFormatContext *oc, AVStream *st)
+{
+ avcodec_close(st->codec);
+ av_free(picture->data[0]);
+ av_free(picture);
+ if (tmp_picture) {
+ av_free(tmp_picture->data[0]);
+ av_free(tmp_picture);
+ }
+ av_free(video_outbuf);
+}
+
+/**************************************************************/
+/* media file output */
+
+int main(int argc, char **argv)
+{
+ const char *filename;
+ AVOutputFormat *fmt;
+ AVFormatContext *oc;
+ AVStream *audio_st, *video_st;
+ double audio_pts, video_pts;
+ int i;
+
+ /* initialize libavcodec, and register all codecs and formats */
+ av_register_all();
+
+ if (argc != 2) {
+ printf("usage: %s output_file\n"
+ "API example program to output a media file with libavformat.\n"
+ "The output format is automatically guessed according to the file extension.\n"
+ "Raw images can also be output by using '%%d' in the filename\n"
+ "\n", argv[0]);
+ exit(1);
+ }
+
+ filename = argv[1];
+
+ /* auto detect the output format from the name. default is
+ mpeg. */
+ fmt = guess_format(NULL, filename, NULL);
+ if (!fmt) {
+ printf("Could not deduce output format from file extension: using MPEG.\n");
+ fmt = guess_format("mpeg", NULL, NULL);
+ }
+ if (!fmt) {
+ fprintf(stderr, "Could not find suitable output format\n");
+ exit(1);
+ }
+
+ /* allocate the output media context */
+ oc = av_alloc_format_context();
+ if (!oc) {
+ fprintf(stderr, "Memory error\n");
+ exit(1);
+ }
+ oc->oformat = fmt;
+ snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
+
+ /* add the audio and video streams using the default format codecs
+ and initialize the codecs */
+ video_st = NULL;
+ audio_st = NULL;
+ if (fmt->video_codec != CODEC_ID_NONE) {
+ video_st = add_video_stream(oc, fmt->video_codec);
+ }
+ if (fmt->audio_codec != CODEC_ID_NONE) {
+ audio_st = add_audio_stream(oc, fmt->audio_codec);
+ }
+
+ /* set the output parameters (must be done even if no
+ parameters). */
+ if (av_set_parameters(oc, NULL) < 0) {
+ fprintf(stderr, "Invalid output format parameters\n");
+ exit(1);
+ }
+
+ dump_format(oc, 0, filename, 1);
+
+ /* now that all the parameters are set, we can open the audio and
+ video codecs and allocate the necessary encode buffers */
+ if (video_st)
+ open_video(oc, video_st);
+ if (audio_st)
+ open_audio(oc, audio_st);
+
+ /* open the output file, if needed */
+ if (!(fmt->flags & AVFMT_NOFILE)) {
+ if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
+ fprintf(stderr, "Could not open '%s'\n", filename);
+ exit(1);
+ }
+ }
+
+ /* write the stream header, if any */
+ av_write_header(oc);
+
+ for(;;) {
+ /* compute current audio and video time */
+ if (audio_st)
+ audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
+ else
+ audio_pts = 0.0;
+
+ if (video_st)
+ video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
+ else
+ video_pts = 0.0;
+
+ if ((!audio_st || audio_pts >= STREAM_DURATION) &&
+ (!video_st || video_pts >= STREAM_DURATION))
+ break;
+
+ /* write interleaved audio and video frames */
+ if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
+ write_audio_frame(oc, audio_st);
+ } else {
+ write_video_frame(oc, video_st);
+ }
+ }
+
+ /* close each codec */
+ if (video_st)
+ close_video(oc, video_st);
+ if (audio_st)
+ close_audio(oc, audio_st);
+
+ /* write the trailer, if any */
+ av_write_trailer(oc);
+
+ /* free the streams */
+ for(i = 0; i < oc->nb_streams; i++) {
+ av_freep(&oc->streams[i]->codec);
+ av_freep(&oc->streams[i]);
+ }
+
+ if (!(fmt->flags & AVFMT_NOFILE)) {
+ /* close the output file */
+ url_fclose(&oc->pb);
+ }
+
+ /* free the stream */
+ av_free(oc);
+
+ return 0;
+}
diff --git a/contrib/ffmpeg/pktdumper.c b/contrib/ffmpeg/pktdumper.c
new file mode 100644
index 000000000..3535fd03f
--- /dev/null
+++ b/contrib/ffmpeg/pktdumper.c
@@ -0,0 +1,97 @@
+#include <avformat.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#define PKTFILESUFF "_%08"PRId64"_%02d_%010"PRId64"_%06d_%c.bin"
+
+static int usage(int ret)
+{
+ fprintf(stderr, "dump (up to maxpkts) AVPackets as they are demuxed by libavformat.\n");
+ fprintf(stderr, "each packet is dumped in its own file named like `basename file.ext`_$PKTNUM_$STREAMINDEX_$STAMP_$SIZE_$FLAGS.bin\n");
+ fprintf(stderr, "pktdumper [-nw] file [maxpkts]\n");
+ fprintf(stderr, "-n\twrite No file at all, only demux.\n");
+ fprintf(stderr, "-w\tWait at end of processing instead of quitting.\n");
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ char fntemplate[PATH_MAX];
+ char pktfilename[PATH_MAX];
+ AVFormatContext *fctx;
+ AVPacket pkt;
+ int64_t pktnum = 0;
+ int64_t maxpkts = 0;
+ int dontquit = 0;
+ int nowrite = 0;
+ int err;
+
+ if ((argc > 1) && !strncmp(argv[1], "-", 1)) {
+ if (strchr(argv[1], 'w'))
+ dontquit = 1;
+ if (strchr(argv[1], 'n'))
+ nowrite = 1;
+ argv++;
+ argc--;
+ }
+ if (argc < 2)
+ return usage(1);
+ if (argc > 2)
+ maxpkts = atoi(argv[2]);
+ strncpy(fntemplate, argv[1], PATH_MAX-1);
+ if (strrchr(argv[1], '/'))
+ strncpy(fntemplate, strrchr(argv[1], '/')+1, PATH_MAX-1);
+ if (strrchr(fntemplate, '.'))
+ *strrchr(fntemplate, '.') = '\0';
+ if (strchr(fntemplate, '%')) {
+ fprintf(stderr, "can't use filenames containing '%%'\n");
+ return usage(1);
+ }
+ if (strlen(fntemplate) + sizeof(PKTFILESUFF) >= PATH_MAX-1) {
+ fprintf(stderr, "filename too long\n");
+ return usage(1);
+ }
+ strcat(fntemplate, PKTFILESUFF);
+ printf("FNTEMPLATE: '%s'\n", fntemplate);
+
+ // register all file formats
+ av_register_all();
+
+ err = av_open_input_file(&fctx, argv[1], NULL, 0, NULL);
+ if (err < 0) {
+ fprintf(stderr, "av_open_input_file: error %d\n", err);
+ return 1;
+ }
+
+ err = av_find_stream_info(fctx);
+ if (err < 0) {
+ fprintf(stderr, "av_find_stream_info: error %d\n", err);
+ return 1;
+ }
+
+ av_init_packet(&pkt);
+
+ while ((err = av_read_frame(fctx, &pkt)) >= 0) {
+ int fd;
+ snprintf(pktfilename, PATH_MAX-1, fntemplate, pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & PKT_FLAG_KEY)?'K':'_');
+ printf(PKTFILESUFF"\n", pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & PKT_FLAG_KEY)?'K':'_');
+ //printf("open(\"%s\")\n", pktfilename);
+ if (!nowrite) {
+ fd = open(pktfilename, O_WRONLY|O_CREAT, 0644);
+ write(fd, pkt.data, pkt.size);
+ close(fd);
+ }
+ pktnum++;
+ if (maxpkts && (pktnum >= maxpkts))
+ break;
+ }
+
+ while (dontquit)
+ sleep(60);
+
+ return 0;
+}
diff --git a/contrib/ffmpeg/qt-faststart.c b/contrib/ffmpeg/qt-faststart.c
new file mode 100644
index 000000000..f9de43514
--- /dev/null
+++ b/contrib/ffmpeg/qt-faststart.c
@@ -0,0 +1,311 @@
+/*
+ * qt-faststart.c, v0.1
+ * by Mike Melanson (melanson@pcisys.net)
+ * This file is placed in the public domain. Use the program however you
+ * see fit.
+ *
+ * This utility rearranges a Quicktime file such that the moov atom
+ * is in front of the data, thus facilitating network streaming.
+ *
+ * Compile this program using:
+ * cc qt-faststart.c -o qt-faststart
+ * Invoke the program with:
+ * qt-faststart <infile.mov> <outfile.mov>
+ *
+ * Notes: Quicktime files can come in many configurations of top-level
+ * atoms. This utility stipulates that the very last atom in the file needs
+ * to be a moov atom. When given such a file, this utility will rearrange
+ * the top-level atoms by shifting the moov atom from the back of the file
+ * to the front, and patch the chunk offsets along the way. This utility
+ * presently only operates on uncompressed moov atoms.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+
+#ifdef __MINGW32__
+#define fseeko(x,y,z) fseeko64(x,y,z)
+#define ftello(x) ftello64(x)
+#endif
+
+#define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1])
+#define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \
+ (((uint8_t*)(x))[1] << 16) | \
+ (((uint8_t*)(x))[2] << 8) | \
+ ((uint8_t*)(x))[3])
+#define BE_64(x) (((uint64_t)(((uint8_t*)(x))[0]) << 56) | \
+ ((uint64_t)(((uint8_t*)(x))[1]) << 48) | \
+ ((uint64_t)(((uint8_t*)(x))[2]) << 40) | \
+ ((uint64_t)(((uint8_t*)(x))[3]) << 32) | \
+ ((uint64_t)(((uint8_t*)(x))[4]) << 24) | \
+ ((uint64_t)(((uint8_t*)(x))[5]) << 16) | \
+ ((uint64_t)(((uint8_t*)(x))[6]) << 8) | \
+ ((uint64_t)((uint8_t*)(x))[7]))
+
+#define BE_FOURCC( ch0, ch1, ch2, ch3 ) \
+ ( (uint32_t)(unsigned char)(ch3) | \
+ ( (uint32_t)(unsigned char)(ch2) << 8 ) | \
+ ( (uint32_t)(unsigned char)(ch1) << 16 ) | \
+ ( (uint32_t)(unsigned char)(ch0) << 24 ) )
+
+#define QT_ATOM BE_FOURCC
+/* top level atoms */
+#define FREE_ATOM QT_ATOM('f', 'r', 'e', 'e')
+#define JUNK_ATOM QT_ATOM('j', 'u', 'n', 'k')
+#define MDAT_ATOM QT_ATOM('m', 'd', 'a', 't')
+#define MOOV_ATOM QT_ATOM('m', 'o', 'o', 'v')
+#define PNOT_ATOM QT_ATOM('p', 'n', 'o', 't')
+#define SKIP_ATOM QT_ATOM('s', 'k', 'i', 'p')
+#define WIDE_ATOM QT_ATOM('w', 'i', 'd', 'e')
+#define PICT_ATOM QT_ATOM('P', 'I', 'C', 'T')
+#define FTYP_ATOM QT_ATOM('f', 't', 'y', 'p')
+
+#define CMOV_ATOM QT_ATOM('c', 'm', 'o', 'v')
+#define STCO_ATOM QT_ATOM('s', 't', 'c', 'o')
+#define CO64_ATOM QT_ATOM('c', 'o', '6', '4')
+
+#define ATOM_PREAMBLE_SIZE 8
+#define COPY_BUFFER_SIZE 1024
+
+int main(int argc, char *argv[])
+{
+ FILE *infile;
+ FILE *outfile;
+ unsigned char atom_bytes[ATOM_PREAMBLE_SIZE];
+ uint32_t atom_type = 0;
+ uint64_t atom_size = 0;
+ uint64_t last_offset;
+ unsigned char *moov_atom;
+ unsigned char *ftyp_atom = 0;
+ uint64_t moov_atom_size;
+ uint64_t ftyp_atom_size = 0;
+ uint64_t i, j;
+ uint32_t offset_count;
+ uint64_t current_offset;
+ uint64_t start_offset = 0;
+ unsigned char copy_buffer[COPY_BUFFER_SIZE];
+ int bytes_to_copy;
+
+ if (argc != 3) {
+ printf ("Usage: qt-faststart <infile.mov> <outfile.mov>\n");
+ return 0;
+ }
+
+ infile = fopen(argv[1], "rb");
+ if (!infile) {
+ perror(argv[1]);
+ return 1;
+ }
+
+ /* traverse through the atoms in the file to make sure that 'moov' is
+ * at the end */
+ while (!feof(infile)) {
+ if (fread(atom_bytes, ATOM_PREAMBLE_SIZE, 1, infile) != 1) {
+ break;
+ }
+ atom_size = (uint32_t)BE_32(&atom_bytes[0]);
+ atom_type = BE_32(&atom_bytes[4]);
+
+ if ((atom_type != FREE_ATOM) &&
+ (atom_type != JUNK_ATOM) &&
+ (atom_type != MDAT_ATOM) &&
+ (atom_type != MOOV_ATOM) &&
+ (atom_type != PNOT_ATOM) &&
+ (atom_type != SKIP_ATOM) &&
+ (atom_type != WIDE_ATOM) &&
+ (atom_type != PICT_ATOM) &&
+ (atom_type != FTYP_ATOM)) {
+ printf ("encountered non-QT top-level atom (is this a Quicktime file?)\n");
+ break;
+ }
+
+ /* keep ftyp atom */
+ if (atom_type == FTYP_ATOM) {
+ ftyp_atom_size = atom_size;
+ ftyp_atom = malloc(ftyp_atom_size);
+ if (!ftyp_atom) {
+ printf ("could not allocate 0x%llX byte for ftyp atom\n",
+ atom_size);
+ fclose(infile);
+ return 1;
+ }
+ fseeko(infile, -ATOM_PREAMBLE_SIZE, SEEK_CUR);
+ if (fread(ftyp_atom, atom_size, 1, infile) != 1) {
+ perror(argv[1]);
+ free(ftyp_atom);
+ fclose(infile);
+ return 1;
+ }
+ start_offset = ftello(infile);
+ continue;
+ }
+
+ /* 64-bit special case */
+ if (atom_size == 1) {
+ if (fread(atom_bytes, ATOM_PREAMBLE_SIZE, 1, infile) != 1) {
+ break;
+ }
+ atom_size = BE_64(&atom_bytes[0]);
+ fseeko(infile, atom_size - ATOM_PREAMBLE_SIZE * 2, SEEK_CUR);
+ } else {
+ fseeko(infile, atom_size - ATOM_PREAMBLE_SIZE, SEEK_CUR);
+ }
+ }
+
+ if (atom_type != MOOV_ATOM) {
+ printf ("last atom in file was not a moov atom\n");
+ fclose(infile);
+ return 0;
+ }
+
+ /* moov atom was, in fact, the last atom in the chunk; load the whole
+ * moov atom */
+ fseeko(infile, -atom_size, SEEK_END);
+ last_offset = ftello(infile);
+ moov_atom_size = atom_size;
+ moov_atom = malloc(moov_atom_size);
+ if (!moov_atom) {
+ printf ("could not allocate 0x%llX byte for moov atom\n",
+ atom_size);
+ fclose(infile);
+ return 1;
+ }
+ if (fread(moov_atom, atom_size, 1, infile) != 1) {
+ perror(argv[1]);
+ free(moov_atom);
+ fclose(infile);
+ return 1;
+ }
+
+ /* this utility does not support compressed atoms yet, so disqualify
+ * files with compressed QT atoms */
+ if (BE_32(&moov_atom[12]) == CMOV_ATOM) {
+ printf ("this utility does not support compressed moov atoms yet\n");
+ free(moov_atom);
+ fclose(infile);
+ return 1;
+ }
+
+ /* close; will be re-opened later */
+ fclose(infile);
+
+ /* crawl through the moov chunk in search of stco or co64 atoms */
+ for (i = 4; i < moov_atom_size - 4; i++) {
+ atom_type = BE_32(&moov_atom[i]);
+ if (atom_type == STCO_ATOM) {
+ printf (" patching stco atom...\n");
+ atom_size = BE_32(&moov_atom[i - 4]);
+ if (i + atom_size - 4 > moov_atom_size) {
+ printf (" bad atom size\n");
+ free(moov_atom);
+ return 1;
+ }
+ offset_count = BE_32(&moov_atom[i + 8]);
+ for (j = 0; j < offset_count; j++) {
+ current_offset = BE_32(&moov_atom[i + 12 + j * 4]);
+ current_offset += moov_atom_size;
+ moov_atom[i + 12 + j * 4 + 0] = (current_offset >> 24) & 0xFF;
+ moov_atom[i + 12 + j * 4 + 1] = (current_offset >> 16) & 0xFF;
+ moov_atom[i + 12 + j * 4 + 2] = (current_offset >> 8) & 0xFF;
+ moov_atom[i + 12 + j * 4 + 3] = (current_offset >> 0) & 0xFF;
+ }
+ i += atom_size - 4;
+ } else if (atom_type == CO64_ATOM) {
+ printf (" patching co64 atom...\n");
+ atom_size = BE_32(&moov_atom[i - 4]);
+ if (i + atom_size - 4 > moov_atom_size) {
+ printf (" bad atom size\n");
+ free(moov_atom);
+ return 1;
+ }
+ offset_count = BE_32(&moov_atom[i + 8]);
+ for (j = 0; j < offset_count; j++) {
+ current_offset = BE_64(&moov_atom[i + 12 + j * 8]);
+ current_offset += moov_atom_size;
+ moov_atom[i + 12 + j * 8 + 0] = (current_offset >> 56) & 0xFF;
+ moov_atom[i + 12 + j * 8 + 1] = (current_offset >> 48) & 0xFF;
+ moov_atom[i + 12 + j * 8 + 2] = (current_offset >> 40) & 0xFF;
+ moov_atom[i + 12 + j * 8 + 3] = (current_offset >> 32) & 0xFF;
+ moov_atom[i + 12 + j * 8 + 4] = (current_offset >> 24) & 0xFF;
+ moov_atom[i + 12 + j * 8 + 5] = (current_offset >> 16) & 0xFF;
+ moov_atom[i + 12 + j * 8 + 6] = (current_offset >> 8) & 0xFF;
+ moov_atom[i + 12 + j * 8 + 7] = (current_offset >> 0) & 0xFF;
+ }
+ i += atom_size - 4;
+ }
+ }
+
+ /* re-open the input file and open the output file */
+ infile = fopen(argv[1], "rb");
+ if (!infile) {
+ perror(argv[1]);
+ free(moov_atom);
+ return 1;
+ }
+
+ if (start_offset > 0) { /* seek after ftyp atom */
+ fseeko(infile, start_offset, SEEK_SET);
+ last_offset -= start_offset;
+ }
+
+ outfile = fopen(argv[2], "wb");
+ if (!outfile) {
+ perror(argv[2]);
+ fclose(outfile);
+ free(moov_atom);
+ return 1;
+ }
+
+ /* dump the same ftyp atom */
+ if (ftyp_atom_size > 0) {
+ printf (" writing ftyp atom...\n");
+ if (fwrite(ftyp_atom, ftyp_atom_size, 1, outfile) != 1) {
+ perror(argv[2]);
+ goto error_out;
+ }
+ }
+
+ /* dump the new moov atom */
+ printf (" writing moov atom...\n");
+ if (fwrite(moov_atom, moov_atom_size, 1, outfile) != 1) {
+ perror(argv[2]);
+ goto error_out;
+ }
+
+ /* copy the remainder of the infile, from offset 0 -> last_offset - 1 */
+ printf (" copying rest of file...\n");
+ while (last_offset) {
+ if (last_offset > COPY_BUFFER_SIZE)
+ bytes_to_copy = COPY_BUFFER_SIZE;
+ else
+ bytes_to_copy = last_offset;
+
+ if (fread(copy_buffer, bytes_to_copy, 1, infile) != 1) {
+ perror(argv[1]);
+ goto error_out;
+ }
+ if (fwrite(copy_buffer, bytes_to_copy, 1, outfile) != 1) {
+ perror(argv[2]);
+ goto error_out;
+ }
+
+ last_offset -= bytes_to_copy;
+ }
+
+ fclose(infile);
+ fclose(outfile);
+ free(moov_atom);
+ if (ftyp_atom_size > 0)
+ free(ftyp_atom);
+
+ return 0;
+
+error_out:
+ fclose(infile);
+ fclose(outfile);
+ free(moov_atom);
+ if (ftyp_atom_size > 0)
+ free(ftyp_atom);
+ return 1;
+}
diff --git a/contrib/ffmpeg/tests/Makefile b/contrib/ffmpeg/tests/Makefile
new file mode 100644
index 000000000..c4c6925f8
--- /dev/null
+++ b/contrib/ffmpeg/tests/Makefile
@@ -0,0 +1,91 @@
+#
+# Makefile for tests
+# (c) 2002 Fabrice Bellard
+#
+include ../config.mak
+
+VPATH=$(SRC_PATH_BARE)/tests
+SRC_DIR=$(SRC_PATH)/tests
+CFLAGS=-O2 -Wall -g
+
+REFFILE1=$(SRC_DIR)/ffmpeg.regression.ref
+REFFILE2=$(SRC_DIR)/rotozoom.regression.ref
+
+SERVER_REFFILE=$(SRC_DIR)/ffserver.regression.ref
+
+LIBAV_REFFILE=$(SRC_DIR)/libav.regression.ref
+
+all fulltest test: codectest libavtest test-server
+
+test-server: vsynth1/00.pgm asynth1.sw
+ @$(SRC_DIR)/server-regression.sh $(SERVER_REFFILE) $(SRC_DIR)/test.conf
+
+# fast regression tests for all codecs
+codectest mpeg4 mpeg ac3 snow snowll: vsynth1/00.pgm vsynth2/00.pgm asynth1.sw tiny_psnr$(EXESUF)
+ @$(SRC_DIR)/regression.sh $@ $(REFFILE1) vsynth1
+ @$(SRC_DIR)/regression.sh $@ $(REFFILE2) vsynth2
+
+# fast regression for libav formats
+ifeq ($(CONFIG_GPL),yes)
+libavtest: vsynth1/00.pgm asynth1.sw
+ @$(SRC_DIR)/regression.sh $@ $(LIBAV_REFFILE) vsynth1
+else
+libavtest:
+ @echo
+ @echo "This test requires FFmpeg to be compiled with --enable-gpl."
+ @echo
+endif
+
+# video generation
+
+vsynth1/00.pgm: videogen$(EXESUF)
+ @mkdir -p vsynth1
+ ./videogen 'vsynth1/'
+
+vsynth2/00.pgm: rotozoom$(EXESUF)
+ @mkdir -p vsynth2
+ ./rotozoom 'vsynth2/' $(SRC_DIR)/lena.pnm
+
+videogen$(EXESUF): videogen.c
+ $(CC) $(LDFLAGS) $(CFLAGS) -o $@ $<
+
+rotozoom$(EXESUF): rotozoom.c
+ $(CC) $(LDFLAGS) $(CFLAGS) -o $@ $<
+
+# audio generation
+
+asynth1.sw: audiogen$(EXESUF)
+ ./audiogen $@
+
+audiogen$(EXESUF): audiogen.c
+ $(CC) $(LDFLAGS) $(CFLAGS) -o $@ $<
+
+tiny_psnr$(EXESUF): tiny_psnr.c
+ $(CC) $(LDFLAGS) $(CFLAGS) -o $@ $<
+
+DSPDEPS = $(SRC_PATH)/libavcodec/i386/dsputil_mmx.c \
+ $(SRC_PATH)/libavcodec/i386/dsputil_mmx_avg.h \
+ $(SRC_PATH)/libavcodec/i386/dsputil_mmx_rnd.h \
+ $(SRC_PATH)/libavcodec/i386/fdct_mmx.c \
+ $(SRC_PATH)/libavcodec/i386/idct_mmx.c \
+ $(SRC_PATH)/libavcodec/i386/motion_est_mmx.c \
+ $(SRC_PATH)/libavcodec/i386/simple_idct_mmx.c \
+ $(SRC_PATH)/libavcodec/dsputil.c \
+ $(SRC_PATH)/libavcodec/dsputil.h \
+ $(SRC_PATH)/libavcodec/simple_idct.c
+
+DSPCFLAGS = -O4 -fomit-frame-pointer -DHAVE_AV_CONFIG_H -I.. \
+ -I$(SRC_PATH)/libavutil/ -I$(SRC_PATH)/libavcodec/i386 \
+ -I$(SRC_PATH)/libavcodec/ -lm
+
+dsptestpic: dsptest.c $(DSPDEPS)
+ $(CC) -fPIC -DPIC $(DSPCFLAGS) -o $@ $<
+dsptest: dsptest.c $(DSPDEPS)
+ $(CC) $(DSPCFLAGS) -o $@ $<
+
+distclean clean:
+ rm -rf vsynth1 vsynth2 data
+ rm -f asynth1.sw *~ audiogen$(EXESUF) videogen$(EXESUF) rotozoom$(EXESUF) tiny_psnr$(EXESUF)
+
+.PHONY: all fulltest test codectest libavtest test-server
+.PHONY: mpeg4 mpeg ac3 snow snowll distclean clean
diff --git a/contrib/ffmpeg/tests/audiogen.c b/contrib/ffmpeg/tests/audiogen.c
new file mode 100644
index 000000000..31c437149
--- /dev/null
+++ b/contrib/ffmpeg/tests/audiogen.c
@@ -0,0 +1,168 @@
+/*
+ * Generates a synthetic stereo sound
+ * NOTE: no floats are used to guaranty a bit exact output.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+
+#define NB_CHANNELS 2
+#define FE 44100
+
+static unsigned int myrnd(unsigned int *seed_ptr, int n)
+{
+ unsigned int seed, val;
+
+ seed = *seed_ptr;
+ seed = (seed * 314159) + 1;
+ if (n == 256) {
+ val = seed >> 24;
+ } else {
+ val = seed % n;
+ }
+ *seed_ptr = seed;
+ return val;
+}
+
+#define FRAC_BITS 16
+#define FRAC_ONE (1 << FRAC_BITS)
+
+#define COS_TABLE_BITS 7
+
+/* integer cosinus */
+static const unsigned short cos_table[(1 << COS_TABLE_BITS) + 2] = {
+ 0x8000, 0x7ffe, 0x7ff6, 0x7fea, 0x7fd9, 0x7fc2, 0x7fa7, 0x7f87,
+ 0x7f62, 0x7f38, 0x7f0a, 0x7ed6, 0x7e9d, 0x7e60, 0x7e1e, 0x7dd6,
+ 0x7d8a, 0x7d3a, 0x7ce4, 0x7c89, 0x7c2a, 0x7bc6, 0x7b5d, 0x7aef,
+ 0x7a7d, 0x7a06, 0x798a, 0x790a, 0x7885, 0x77fb, 0x776c, 0x76d9,
+ 0x7642, 0x75a6, 0x7505, 0x7460, 0x73b6, 0x7308, 0x7255, 0x719e,
+ 0x70e3, 0x7023, 0x6f5f, 0x6e97, 0x6dca, 0x6cf9, 0x6c24, 0x6b4b,
+ 0x6a6e, 0x698c, 0x68a7, 0x67bd, 0x66d0, 0x65de, 0x64e9, 0x63ef,
+ 0x62f2, 0x61f1, 0x60ec, 0x5fe4, 0x5ed7, 0x5dc8, 0x5cb4, 0x5b9d,
+ 0x5a82, 0x5964, 0x5843, 0x571e, 0x55f6, 0x54ca, 0x539b, 0x5269,
+ 0x5134, 0x4ffb, 0x4ec0, 0x4d81, 0x4c40, 0x4afb, 0x49b4, 0x486a,
+ 0x471d, 0x45cd, 0x447b, 0x4326, 0x41ce, 0x4074, 0x3f17, 0x3db8,
+ 0x3c57, 0x3af3, 0x398d, 0x3825, 0x36ba, 0x354e, 0x33df, 0x326e,
+ 0x30fc, 0x2f87, 0x2e11, 0x2c99, 0x2b1f, 0x29a4, 0x2827, 0x26a8,
+ 0x2528, 0x23a7, 0x2224, 0x209f, 0x1f1a, 0x1d93, 0x1c0c, 0x1a83,
+ 0x18f9, 0x176e, 0x15e2, 0x1455, 0x12c8, 0x113a, 0x0fab, 0x0e1c,
+ 0x0c8c, 0x0afb, 0x096b, 0x07d9, 0x0648, 0x04b6, 0x0324, 0x0192,
+ 0x0000, 0x0000,
+};
+
+#define CSHIFT (FRAC_BITS - COS_TABLE_BITS - 2)
+
+static int int_cos(int a)
+{
+ int neg, v, f;
+ const unsigned short *p;
+
+ a = a & (FRAC_ONE - 1); /* modulo 2 * pi */
+ if (a >= (FRAC_ONE / 2))
+ a = FRAC_ONE - a;
+ neg = 0;
+ if (a > (FRAC_ONE / 4)) {
+ neg = -1;
+ a = (FRAC_ONE / 2) - a;
+ }
+ p = cos_table + (a >> CSHIFT);
+ /* linear interpolation */
+ f = a & ((1 << CSHIFT) - 1);
+ v = p[0] + (((p[1] - p[0]) * f + (1 << (CSHIFT - 1))) >> CSHIFT);
+ v = (v ^ neg) - neg;
+ v = v << (FRAC_BITS - 15);
+ return v;
+}
+
+FILE *outfile;
+
+void put_sample(int v)
+{
+ fputc(v & 0xff, outfile);
+ fputc((v >> 8) & 0xff, outfile);
+}
+
+int main(int argc, char **argv)
+{
+ int i, a, v, j, f, amp, ampa;
+ unsigned int seed = 1;
+ int tabf1[NB_CHANNELS], tabf2[NB_CHANNELS];
+ int taba[NB_CHANNELS];
+
+ if (argc != 2) {
+ printf("usage: %s file\n"
+ "generate a test raw 16 bit stereo audio stream\n", argv[0]);
+ exit(1);
+ }
+
+ outfile = fopen(argv[1], "wb");
+ if (!outfile) {
+ perror(argv[1]);
+ return 1;
+ }
+
+ /* 1 second of single freq sinus at 1000 Hz */
+ a = 0;
+ for(i=0;i<1 * FE;i++) {
+ v = (int_cos(a) * 10000) >> FRAC_BITS;
+ for(j=0;j<NB_CHANNELS;j++)
+ put_sample(v);
+ a += (1000 * FRAC_ONE) / FE;
+ }
+
+ /* 1 second of varing frequency between 100 and 10000 Hz */
+ a = 0;
+ for(i=0;i<1 * FE;i++) {
+ v = (int_cos(a) * 10000) >> FRAC_BITS;
+ for(j=0;j<NB_CHANNELS;j++)
+ put_sample(v);
+ f = 100 + (((10000 - 100) * i) / FE);
+ a += (f * FRAC_ONE) / FE;
+ }
+
+ /* 0.5 second of low amplitude white noise */
+ for(i=0;i<FE / 2;i++) {
+ v = myrnd(&seed, 20000) - 10000;
+ for(j=0;j<NB_CHANNELS;j++)
+ put_sample(v);
+ }
+
+ /* 0.5 second of high amplitude white noise */
+ for(i=0;i<FE / 2;i++) {
+ v = myrnd(&seed, 65535) - 32768;
+ for(j=0;j<NB_CHANNELS;j++)
+ put_sample(v);
+ }
+
+ /* stereo : 2 unrelated ramps */
+ for(j=0;j<NB_CHANNELS;j++) {
+ taba[j] = 0;
+ tabf1[j] = 100 + myrnd(&seed, 5000);
+ tabf2[j] = 100 + myrnd(&seed, 5000);
+ }
+ for(i=0;i<1 * FE;i++) {
+ for(j=0;j<NB_CHANNELS;j++) {
+ v = (int_cos(taba[j]) * 10000) >> FRAC_BITS;
+ put_sample(v);
+ f = tabf1[j] + (((tabf2[j] - tabf1[j]) * i) / FE);
+ taba[j] += (f * FRAC_ONE) / FE;
+ }
+ }
+
+ /* stereo 500 Hz with varying volume */
+ a = 0;
+ ampa = 0;
+ for(i=0;i<2 * FE;i++) {
+ for(j=0;j<NB_CHANNELS;j++) {
+ amp = ((FRAC_ONE + int_cos(ampa)) * 5000) >> FRAC_BITS;
+ if (j & 1)
+ amp = 10000 - amp;
+ v = (int_cos(a) * amp) >> FRAC_BITS;
+ put_sample(v);
+ a += (500 * FRAC_ONE) / FE;
+ ampa += (2 * FRAC_ONE) / FE;
+ }
+ }
+
+ fclose(outfile);
+ return 0;
+}
diff --git a/contrib/ffmpeg/tests/dsptest.c b/contrib/ffmpeg/tests/dsptest.c
new file mode 100644
index 000000000..06a185202
--- /dev/null
+++ b/contrib/ffmpeg/tests/dsptest.c
@@ -0,0 +1,178 @@
+/*
+ * MMX optimized DSP utils
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#define TESTCPU_MAIN
+#include "avcodec.h"
+#include "dsputil.h"
+#include "mpegvideo.h"
+#include "mpeg12data.h"
+#include "mpeg4data.h"
+#include "../libavcodec/i386/cputest.c"
+#include "../libavcodec/i386/dsputil_mmx.c"
+
+#include "../libavcodec/i386/fdct_mmx.c"
+#include "../libavcodec/i386/idct_mmx.c"
+#include "../libavcodec/i386/motion_est_mmx.c"
+#include "../libavcodec/i386/simple_idct_mmx.c"
+#include "../libavcodec/dsputil.c"
+#include "../libavcodec/simple_idct.c"
+#include "../libavcodec/jfdctfst.c"
+
+#undef TESTCPU_MAIN
+
+#define PAD 0x10000
+/*
+ * for testing speed of various routine - should be probably extended
+ * for a general purpose regression test later
+ *
+ * currently only for i386 - FIXME
+ */
+
+#define PIX_FUNC_C(a) \
+ { #a "_c", a ## _c, 0 }, \
+ { #a "_mmx", a ## _mmx, MM_MMX }, \
+ { #a "_mmx2", a ## _mmx2, MM_MMXEXT | PAD }
+
+#define PIX_FUNC(a) \
+ { #a "_mmx", a ## _mmx, MM_MMX }, \
+ { #a "_3dnow", a ## _3dnow, MM_3DNOW }, \
+ { #a "_mmx2", a ## _mmx2, MM_MMXEXT | PAD }
+
+#define PIX_FUNC_MMX(a) \
+ { #a "_mmx", a ## _mmx, MM_MMX | PAD }
+
+/*
+ PIX_FUNC_C(pix_abs16x16),
+ PIX_FUNC_C(pix_abs16x16_x2),
+ PIX_FUNC_C(pix_abs16x16_y2),
+ PIX_FUNC_C(pix_abs16x16_xy2),
+ PIX_FUNC_C(pix_abs8x8),
+ PIX_FUNC_C(pix_abs8x8_x2),
+ PIX_FUNC_C(pix_abs8x8_y2),
+ PIX_FUNC_C(pix_abs8x8_xy2),
+*/
+
+static const struct pix_func {
+ char* name;
+ op_pixels_func func;
+ int mm_flags;
+} pix_func[] = {
+
+ PIX_FUNC_MMX(put_pixels),
+ //PIX_FUNC_MMX(get_pixels),
+ //PIX_FUNC_MMX(put_pixels_clamped),
+#if 1
+ PIX_FUNC(put_pixels_x2),
+ PIX_FUNC(put_pixels_y2),
+ PIX_FUNC_MMX(put_pixels_xy2),
+
+ PIX_FUNC(put_no_rnd_pixels_x2),
+ PIX_FUNC(put_no_rnd_pixels_y2),
+ PIX_FUNC_MMX(put_no_rnd_pixels_xy2),
+
+ PIX_FUNC(avg_pixels),
+ PIX_FUNC(avg_pixels_x2),
+ PIX_FUNC(avg_pixels_y2),
+ PIX_FUNC(avg_pixels_xy2),
+
+ PIX_FUNC_MMX(avg_no_rnd_pixels),
+ PIX_FUNC_MMX(avg_no_rnd_pixels_x2),
+ PIX_FUNC_MMX(avg_no_rnd_pixels_y2),
+ PIX_FUNC_MMX(avg_no_rnd_pixels_xy2),
+#endif
+ { 0, 0 }
+};
+
+static inline long long rdtsc()
+{
+ long long l;
+ asm volatile( "rdtsc\n\t"
+ : "=A" (l)
+ );
+ return l;
+}
+
+static test_speed(int step)
+{
+ const struct pix_func* pix = pix_func;
+ const int linesize = 720;
+ char empty[32768];
+ char* bu =(char*)(((long)empty + 32) & ~0xf);
+
+ int sum = 0;
+
+ while (pix->name)
+ {
+ int i;
+ uint64_t te, ts;
+ op_pixels_func func = pix->func;
+ char* im = bu;
+
+ if (pix->mm_flags & mm_flags)
+ {
+ printf("%30s... ", pix->name);
+ fflush(stdout);
+ ts = rdtsc();
+ for(i=0; i<100000; i++){
+ func(im, im + 1000, linesize, 16);
+ im += step;
+ if (im > bu + 20000)
+ im = bu;
+ }
+ te = rdtsc();
+ emms();
+ printf("% 9d\n", (int)(te - ts));
+ sum += (te - ts) / 100000;
+ if (pix->mm_flags & PAD)
+ puts("");
+ }
+ pix++;
+ }
+
+ printf("Total sum: %d\n", sum);
+}
+
+int main(int argc, char* argv[])
+{
+ int step = 16;
+
+ if (argc > 1)
+ {
+ // something simple for now
+ if (argc > 2 && (strcmp("-s", argv[1]) == 0
+ || strcmp("-step", argv[1]) == 0))
+ step = atoi(argv[2]);
+ }
+
+ mm_flags = mm_support();
+ printf("%s: detected CPU flags:", argv[0]);
+ if (mm_flags & MM_MMX)
+ printf(" mmx");
+ if (mm_flags & MM_MMXEXT)
+ printf(" mmxext");
+ if (mm_flags & MM_3DNOW)
+ printf(" 3dnow");
+ if (mm_flags & MM_SSE)
+ printf(" sse");
+ if (mm_flags & MM_SSE2)
+ printf(" sse2");
+ printf("\n");
+
+ printf("Using step: %d\n", step);
+ test_speed(step);
+}
diff --git a/contrib/ffmpeg/tests/ffmpeg.regression.ref b/contrib/ffmpeg/tests/ffmpeg.regression.ref
new file mode 100644
index 000000000..9db847a1f
--- /dev/null
+++ b/contrib/ffmpeg/tests/ffmpeg.regression.ref
@@ -0,0 +1,182 @@
+ffmpeg regression test
+dd4c189859399f7f251876be8e26e4f3 *./data/a-mpeg1.mpg
+722848 ./data/a-mpeg1.mpg
+78d202830e5ce5a67495ab14ebe6469e *./data/out.yuv
+stddev: 7.65 PSNR:30.44 bytes:7602176
+f3b008355f68394b6cad694f3488ea2b *./data/a-mpeg2.mpg
+736978 ./data/a-mpeg2.mpg
+9f364a477987c3b14412e303b94377ca *./data/out.yuv
+stddev: 7.68 PSNR:30.41 bytes:7602176
+1ed851cc079a7bc999e1cc2dc2aa146c *./data/a-mpeg2ivlc.mpg
+727637 ./data/a-mpeg2ivlc.mpg
+9f364a477987c3b14412e303b94377ca *./data/out.yuv
+stddev: 7.68 PSNR:30.41 bytes:7602176
+2566ea5760247a9485c8281cb52291a8 *./data/a-mpeg2.mpg
+735853 ./data/a-mpeg2.mpg
+55c22a09e4924977ee2cc4180078d3ae *./data/out.yuv
+stddev: 7.67 PSNR:30.42 bytes:7602176
+0093ab9141105dec8dc4452ba8f0ab6f *./data/a-mpeg2i.mpg
+749746 ./data/a-mpeg2i.mpg
+5189af71e6aa96cc2f6452e7f6b29287 *./data/out.yuv
+stddev: 7.68 PSNR:30.41 bytes:7602176
+f2067ddbe91a90ef12d606bba91cb900 *./data/a-mpeg2thread.mpg
+812667 ./data/a-mpeg2thread.mpg
+f101d3844ea2e12ac38001718c519232 *./data/out.yuv
+stddev: 7.63 PSNR:30.46 bytes:7602176
+fb900ca681fc054ed634ee4eadc9dc38 *./data/a-mpeg2threadivlc.mpg
+803035 ./data/a-mpeg2threadivlc.mpg
+f101d3844ea2e12ac38001718c519232 *./data/out.yuv
+stddev: 7.63 PSNR:30.46 bytes:7602176
+fd5f3e2a81558213bdfb8a7c86b3f541 *./data/a-mpeg2reuse.mpg
+2099878 ./data/a-mpeg2reuse.mpg
+e7f8692982e6d339a1f64bde4c297e59 *./data/out.yuv
+stddev: 7.67 PSNR:30.42 bytes:7602176
+c83ae8d8f3e2b4506df58e6a2f7e3b2a *./data/a-msmpeg4v2.avi
+636512 ./data/a-msmpeg4v2.avi
+279c33c2f6f58b7eb3d2daaa87160cb5 *./data/out.yuv
+stddev: 8.00 PSNR:30.06 bytes:7602176
+f546e8d0ada1917bc470584477f83e0e *./data/a-msmpeg4.avi
+639406 ./data/a-msmpeg4.avi
+8692a2e9ddb8081c4f00cb1557e2388e *./data/out.yuv
+stddev: 8.00 PSNR:30.05 bytes:7602176
+44c11ce4aa20af1aa609f68e544e5479 *./data/a-wmv1.avi
+641448 ./data/a-wmv1.avi
+69454f78ca636e83a600834e5a90660e *./data/out.yuv
+stddev: 8.01 PSNR:30.04 bytes:7602176
+044b1b5bd5899d54a8fe09eac2181d8b *./data/a-wmv2.avi
+675342 ./data/a-wmv2.avi
+69454f78ca636e83a600834e5a90660e *./data/out.yuv
+stddev: 8.01 PSNR:30.04 bytes:7602176
+12d215719748b4cf1adeaca4e519ba6c *./data/a-h261.avi
+727616 ./data/a-h261.avi
+bb2e71de01899ade4f850c180f9b0258 *./data/out.yuv
+stddev: 9.13 PSNR:28.90 bytes:7602176
+66d36048d15c3b04bd7bfc08ab977fae *./data/a-h263.avi
+673694 ./data/a-h263.avi
+d507be4253a9c8211a3738c58ba28118 *./data/out.yuv
+stddev: 8.06 PSNR:29.99 bytes:7602176
+e9e884a7c6b77d1aeeb4cb56ac150f92 *./data/a-h263p.avi
+2389564 ./data/a-h263p.avi
+0bb16a352798c997cb36e167f4fa8f3c *./data/out.yuv
+stddev: 2.07 PSNR:41.77 bytes:7602176
+3ee2dd25f141d520f61e5c01d08bdef1 *./data/a-odivx.mp4
+550787 ./data/a-odivx.mp4
+a1c691f3be526ecbf3be3152d5bab88c *./data/out.yuv
+stddev: 7.99 PSNR:30.06 bytes:7602176
+6c58e5707afe056b072d2ce21b3b8e4f *./data/a-huffyuv.avi
+7933744 ./data/a-huffyuv.avi
+799d3db687f6cdd7a837ec156efc171f *./data/out.yuv
+stddev: 0.00 PSNR:99.99 bytes:7602176
+5f35533ff4357e405971466ee06375db *./data/a-mpeg4-rc.avi
+814102 ./data/a-mpeg4-rc.avi
+2cc2a0b6699d861b4cffad28a704067b *./data/out.yuv
+stddev: 10.41 PSNR:27.76 bytes:7602176
+85e2456a673041d528b242d78318fb65 *./data/a-mpeg4-adv.avi
+600188 ./data/a-mpeg4-adv.avi
+60edc5a67271e425d0a2a52981895b81 *./data/out.yuv
+stddev: 10.25 PSNR:27.91 bytes:7602176
+c429ad61139904f03a211ba2bdf4cf01 *./data/a-mpeg4-thread.avi
+761308 ./data/a-mpeg4-thread.avi
+cd5bb5cbd9a7ea0953825482d2990f5a *./data/out.yuv
+stddev: 12.31 PSNR:26.31 bytes:7602176
+339c0dd9f30afa33a6f7134f84d5b60a *./data/a-mpeg4-Q.avi
+878910 ./data/a-mpeg4-Q.avi
+83b3cef07c91a397c63bc4bc45df667f *./data/out.yuv
+stddev: 5.61 PSNR:33.13 bytes:7602176
+c1dae02bddd79790266bc0a9f7d6eb0e *./data/a-mpeg4-PSP.mp4
+406449 ./data/a-mpeg4-PSP.mp4
+7315281e07830456208dff61337c982b *./data/a-error-mpeg4-adv.avi
+731526 ./data/a-error-mpeg4-adv.avi
+6ce2c82a0a9cf67a6991694473e9a306 *./data/out.yuv
+stddev: 18.23 PSNR:22.90 bytes:7602176
+b699b2fd005571dda3f8d34cb0ce7aec *./data/a-mpeg4-nr.avi
+688676 ./data/a-mpeg4-nr.avi
+2c16e13b1367022d52b0e75d93a734ba *./data/out.yuv
+stddev: 7.02 PSNR:31.18 bytes:7602176
+215cd567b5001181c24128e535cf77b0 *./data/a-mpeg1b.mpg
+1026948 ./data/a-mpeg1b.mpg
+1ee27511d22a4a553cac2ca64a79f2e1 *./data/out.yuv
+stddev: 6.34 PSNR:32.07 bytes:7602176
+2f9cb2ede35f7d12f6b518c50e20d81c *./data/a-mjpeg.avi
+1567580 ./data/a-mjpeg.avi
+18c3a76f984e717dd886d21fa04355f6 *./data/out.yuv
+stddev: 7.93 PSNR:30.13 bytes:7602176
+5a662e3833d900b56cca79ba5ed5ec06 *./data/a-ljpeg.avi
+6264498 ./data/a-ljpeg.avi
+799d3db687f6cdd7a837ec156efc171f *./data/out.yuv
+stddev: 0.00 PSNR:99.99 bytes:7602176
+8b9359781a7e87d09af1b1c9a536cb75 *./data/a-jpegls.avi
+9086694 ./data/a-jpegls.avi
+0f8637e9b861230aff9894825af83720 *./data/out.yuv
+stddev: 2.84 PSNR:39.04 bytes:7602176
+7eee6367442884321e27d15a26bc032a *./data/a-rv10.rm
+667915 ./data/a-rv10.rm
+d507be4253a9c8211a3738c58ba28118 *./data/out.yuv
+stddev: 8.06 PSNR:29.99 bytes:7602176
+55c73229105f35cbb06ee0dda215df2f *./data/a-rv20.rm
+640856 ./data/a-rv20.rm
+297dc46da1a256c0a97158c036c30c7f *./data/out.yuv
+stddev: 8.26 PSNR:29.77 bytes:7602176
+d13292f4583618d1b7b525a9ee010dff *./data/a-asv1.avi
+1488864 ./data/a-asv1.avi
+925320b74c7dfda5dc8378dd879ae2c3 *./data/out.yuv
+stddev: 20.00 PSNR:22.10 bytes:7602176
+2e50b590f32bf98bde82dbfaf180007a *./data/a-asv2.avi
+1454536 ./data/a-asv2.avi
+0b310840a6d3970595983491687669df *./data/out.yuv
+stddev: 18.82 PSNR:22.63 bytes:7602176
+4478bd22d09ae383b5cff05100437727 *./data/a-flv.flv
+649017 ./data/a-flv.flv
+40281942d6ee254f7d3027b8593b19be *./data/out.yuv
+stddev: 8.06 PSNR:29.99 bytes:7602176
+f8f51fa737add17f7fecaefa118b57ed *./data/a-ffv1.avi
+2654678 ./data/a-ffv1.avi
+799d3db687f6cdd7a837ec156efc171f *./data/out.yuv
+stddev: 0.00 PSNR:99.99 bytes:7602176
+b12ff53580cd8a9ce92ab7a0649e99f9 *./data/a-snow.avi
+155948 ./data/a-snow.avi
+b3feb1bf17bb4e720da1f1e8b4da4c03 *./data/out.yuv
+stddev: 23.19 PSNR:20.81 bytes:7602176
+265c4e0c45b2313817fa4d86dccbe6ba *./data/a-snow53.avi
+3519574 ./data/a-snow53.avi
+799d3db687f6cdd7a837ec156efc171f *./data/out.yuv
+stddev: 0.00 PSNR:99.99 bytes:7602176
+2fcbcdc63816e1321bf4b6b5380338d2 *./data/a-dv.dv
+7200000 ./data/a-dv.dv
+c2082cd8adf417c4ebc32654e446cba1 *./data/out.yuv
+stddev: 8.86 PSNR:29.17 bytes:7602176
+4f71942eb699bf3b12508a9e777a319f *./data/a-dv.dv
+14400000 ./data/a-dv.dv
+fc866b8879a34f5b440647e5135e4bfb *./data/out.yuv
+stddev: 8.45 PSNR:29.58 bytes:7602176
+6860534864cad0de4b1af9f987aaf9bf *./data/a-svq1.mov
+1365791 ./data/a-svq1.mov
+fb0a97094a89d6f379535f615783d00c *./data/out.yuv
+stddev: 10.98 PSNR:27.30 bytes:7602176
+21f8ff9f1daacd9133683bb4ea0f50a4 *./data/a-mp2.mp2
+95712 ./data/a-mp2.mp2
+83f8df5d5f84480566af548bb037fceb *./data/out.wav
+stddev:9330.70 PSNR:16.92 bytes:1054720
+stddev:4396.13 PSNR:23.46 bytes:1052672
+dd68110cb7e5388392f89d5160d3a825 *./data/a-ac3.rm
+98203 ./data/a-ac3.rm
+9e6e66847a568ef4f1f229b0939d2aae *./data/a-g726.wav
+24268 ./data/a-g726.wav
+a719ab6d47d8d601520edb13bf6136b4 *./data/out.wav
+stddev:8459.88 PSNR:17.77 bytes:96256
+ea2efb8ba20695a35ab0d71a7ee86f22 *./data/a-adpcm_ima.wav
+266288 ./data/a-adpcm_ima.wav
+60178d48204f5662d91776e36eddc82e *./data/out.wav
+stddev:11441.89 PSNR:15.15 bytes:1054720
+d2eee867856d2bdb6d08e936d4ceec0c *./data/a-adpcm_ms.wav
+267308 ./data/a-adpcm_ms.wav
+91a84bb4f319a3a0bf0c0441b3d3a529 *./data/out.wav
+stddev:1050.18 PSNR:35.89 bytes:1054720
+48ae9fcb043a44e316998b85043b61bc *./data/a-adpcm_yam.wav
+264236 ./data/a-adpcm_yam.wav
+e92cec8c07913ffb91ad2b11f79cdc00 *./data/out.wav
+stddev:18312.68 PSNR:11.06 bytes:1056768
+c3382f03ce2efb5d475240d288a33898 *./data/a-flac.flac
+353368 ./data/a-flac.flac
+c4228df189aad9567a037727d0e763e4 *./data/out.wav
+stddev: 33.31 PSNR:65.87 bytes:1040384
diff --git a/contrib/ffmpeg/tests/ffserver.regression.ref b/contrib/ffmpeg/tests/ffserver.regression.ref
new file mode 100644
index 000000000..7350fa481
--- /dev/null
+++ b/contrib/ffmpeg/tests/ffserver.regression.ref
@@ -0,0 +1,10 @@
+418d0c843a6605a7d48f285804a69df4 ff-test_h.avi
+ad4605187044d5e169f32a2ea205f937 ff-test_l.avi
+34f4f0c160efcdb37c0999680e07c41c ff-test.swf
+fe6de6234a8bb4ace52b86763741cecd ff-test_h.asf
+68e7c8a8ff64c4016a8d4e5ea27353f3 ff-test_l.asf
+cba8663681bf3ef89c95c09bb90204db ff-test_h.rm
+0a14086c708a7b8f4604e705a2bb7087 ff-test_l.rm
+353a9a8fb3d5bb2334ac991b5c60f6d3 ff-test.jpg
+7cbd46a6174d8203c26be333980595b5 ff-test_small.jpg
+aaa459e638cd1dc2c120760fa4e1e384 ff-test.mjpg
diff --git a/contrib/ffmpeg/tests/lena.pnm b/contrib/ffmpeg/tests/lena.pnm
new file mode 100644
index 000000000..9fa0c923f
--- /dev/null
+++ b/contrib/ffmpeg/tests/lena.pnm
@@ -0,0 +1,109 @@
+P6
+# CREATOR: The GIMP's PNM Filter Version 1.0
+256 256
+255
+â‰}߈‚â†väˆ|âŠ{â†|à†rÞ„uÝ„nÞŠvß‚kÞ„jÜ„pà€gÝ‚tÞsà‚jà„râŠvæŠrèŽrè’wì—zè—yî•vê”xévè†tàwfØvvÊZa´P_¨@Tœ>\œ=Y¤EX¦GXªJZ®IZ²J\²NW±JV¯LV°OZ²LX²LX´NX²LV²NZ°NZ®JS´JT´OV¸SY¼TZÂXZÂTXÂZYÄ^ZÄ[\Ê`\Ê^\Ë\ZÌf_Îb^ÎebÌabËbbËkhÍffÎd`Ìb]Îb`Ê`^Êg_Ëd^Ëd`Ëa^ÎhaÎhjÑb^ÐebÐfaÐgfÒebÔe`ÒddÒfbÒgdÎheÎf`Ïd^ÐffÐjfÌfeÎfbÎjfÐgbÓidÑibÐicÎhjÒheÊghÊknÎhlÌffËe_ÇddÌf^Îe]ÍcbÊcbÈjhÍibÍdaÎddÎdbÒb\Ðe`ÑhbÒf`Ðg`ÐjdÑd\ÏfdÎ``ÎjjÒplÍgcÌd_ÈbbÒe`Êd`ÎfgÌb^Çb`Îe`È`eÎieÈbcÈc^Êb`ÌcbÈccÎabËadÈbbÇbdÈ^^ÉbcÄbaÀ\`Â[dÀZ`»Za´X_°T[±PX¨HV¶LP¾V\È`]ÑlbÔqlÖzlÙtÜ„pÞˆnÞŠrÚŽvÝ‚nÜzgÜ~jÚ€jÜlÞ€nÜ€rÞ‚rÝ€pÞ†xÞ‚n߀hÞ€jÜ~j߀eÞ€lÞfà}fÝ‚jÞhÜ€eÞ„hÜ„m܆nÝ‚jÞ†q݈qÝ„pÜ‚hØ„nä”nï¹…ôÈøÎ–øÐžúÔ£üÔœúÔŸôÈ⪀Ärj§M\¥HX¨PZ®OZµX\¹Z\¶Za¼^`ºac¼X\¼^^¾ZZ½^b»dhº[cº\aÃZ[À[^ÃZZÃZ^¾^^Ã]`Â_^Á`cÄ`bÀ^_Å^]¼V\Á`cÂ`bÄbhÁ``Æ[[ÂbbÄa`¿[]¼V\¼V\ºVZ¾\bÚ{væ’€è”zÒrdâ‰}߈‚â†väˆ|âŠ{â†|à†rÞ„uÝ„nÞŠvß‚kÞ„jÜ„pà€gÝ‚tÞsà‚jà„râŠvæŠrèŽrè’wì—zè—yî•vê”xévè†tàwfØvvÊZa´P_¨@Tœ>\œ=Y¤EX¦GXªJZ®IZ²J\²NW±JV¯LV°OZ²LX²LX´NX²LV²NZ°NZ®JS´JT´OV¸SY¼TZÂXZÂTXÂZYÄ^ZÄ[\Ê`\Ê^\Ë\ZÌf_Îb^ÎebÌabËbbËkhÍffÎd`Ìb]Îb`Ê`^Êg_Ëd^Ëd`Ëa^ÎhaÎhjÑb^ÐebÐfaÐgfÒebÔe`ÒddÒfbÒgdÎheÎf`Ïd^ÐffÐjfÌfeÎfbÎjfÐgbÓidÑibÐicÎhjÒheÊghÊknÎhlÌffËe_ÇddÌf^Îe]ÍcbÊcbÈjhÍibÍdaÎddÎdbÒb\Ðe`ÑhbÒf`Ðg`ÐjdÑd\ÏfdÎ``ÎjjÒplÍgcÌd_ÈbbÒe`Êd`ÎfgÌb^Çb`Îe`È`eÎieÈbcÈc^Êb`ÌcbÈccÎabËadÈbbÇbdÈ^^ÉbcÄbaÀ\`Â[dÀZ`»Za´X_°T[±PX¨HV¶LP¾V\È`]ÑlbÔqlÖzlÙtÜ„pÞˆnÞŠrÚŽvÝ‚nÜzgÜ~jÚ€jÜlÞ€nÜ€rÞ‚rÝ€pÞ†xÞ‚n߀hÞ€jÜ~j߀eÞ€lÞfà}fÝ‚jÞhÜ€eÞ„hÜ„m܆nÝ‚jÞ†q݈qÝ„pÜ‚hØ„nä”nï¹…ôÈøÎ–øÐžúÔ£üÔœúÔŸôÈ⪀Ärj§M\¥HX¨PZ®OZµX\¹Z\¶Za¼^`ºac¼X\¼^^¾ZZ½^b»dhº[cº\aÃZ[À[^ÃZZÃZ^¾^^Ã]`Â_^Á`cÄ`bÀ^_Å^]¼V\Á`cÂ`bÄbhÁ``Æ[[ÂbbÄa`¿[]¼V\¼V\ºVZ¾\bÚ{væ’€è”zÒrdâŠ|à†yâ†vä†xâˆvâ„vàƒnà‚oß„m߆p߀hàhÞ„oà€gÝ€tÞ‚nß‚lÞnáˆtæ‹qçrè“yë—zë—yî–vì”vêuç„oßwh×omÈX_´O^§AW>Yž<V¤BV¦FZªFY®JZ°JZ²KV±JT®JT°LX³IS²JV³LW±JT²LX²LW¯JR³KU²PW¸QX¼U[ÂWYÃVZÂZXÄ\ZÆ\]Ê][Ê^^Ë[[Êa^Ïc^ÎdbËddËa^ÌheËccÎfbÍc^Ía`Ëa`ÊfcÌd`Íb`Ìb_ÏieÎgjÐd_ÐgdÐhdÒidÓfaÔeaÓfbÒfbÒfbÏidÐd`Ïc_ÐecÐgbÎdbÏfcÐhbÐgaÒhaÑfbÏhdÎhfÒgaËgjÍhiÍjjËebÌb`ÉbaÌe^Ðd]ÎcaÌb`ÈfdÍjaÊb_ÌddÍc_Ña\ÍfbÐfaÐfaÏf^ÐgbÐe^Ðd`Îb_ÌhhÒjiÎhdÌdaÇbaÐd_Ìc_ÌccÍd_Éa`ÍdaÉacËhiÈ^`Èa^ÉccÊdeÉa`ÎbcËceÈ`bÇbdÈ``Ë`aÄ`aÁ\_Â[bÀ\`ºY`µW^³TZ°OY¨JVµJP½TXÇ_^ÏjbÔpl×ylÙ}rÝ‚rÞ‡uߊrÛŠs݃mÜ|jÝhÜ~kÜmÜ}kÜ~oÞ‚pÜ€mÝ‚pÞƒoà€h߀hÝjÞ~fÞ~lÞ€hß~iÞ€hÞ„iÜdÞƒhÞ‚i܆o܃jÝ„qÞ†nÞ„pÜiÙlâ‘oôÅ÷ΘùМúÔ¡üÔúÖžö˔求Êzm¨NX¥HXªLV®Q\³X]¸Y\¹Y\½\^½``¼Z]»]]¾\[¼^b¼`eº]a¸[`¿[\¿Y\ÃZYÄ\]¿_`Ä^]Ä^^À_aÄa`Á`_Ä^\½WZÂ_aÁ^aÅbdÄ_`Ç]ZÂbbÄ`^À\\¾U\¾V\¼Z\À_bØtlÞ…vÚ}l¶WZâ†xà†qâ†vä‚nâ‡vâ†tâ€eà}lànà€ià€há~gÞ‚kà€fß|jÞ‚kß„nÞ€gä„næŒrévì’tì–|ì–xí–yì’sënæ„gÞxiÔfbÈZ\µL[¨<UŸ7S›;T¢<S§>T¬@T°KZ°LY°FS±FQ²EP±IV®FR°GS±JV²HOµFN²IT°GO³KV¶LRºOR¼VXÀTXÄWYÆ\\È_ZÊ\VÉ^ZÊZUÌZXÍ_\Ðb\Îa[ÌecÏ`\Îb_ËcbÐa^Ë`aÏbbÑf`Ðd_Íc]Ìc`Ïb]Òc`Ïb\Òc\ÐfgÐfdÑfbÒf`Òb^ÕebÔe`Ód^Ôa[Ñf^Òb^ÐabÎcdÎc_Ñc^ÑfcÎfdÎdaÏedÎfdÐc^Ïb`ÌdhËknÌdfËmjÊbbÍgbÊb`Îd`Î`ZËc`ËjfÌieËbbÊe`Í^XÎd\Îd^ÎgdÓd^Îh`Íb`Ðe`Ïa]ÐfbÐd^ÐffÍedÐd`Îa\Ë_\Ç`cÌ`^ÉfhÌ^]Ëb`Éa`Êa_Ê_^ÅbeÎa\ÎabËcaÌbaÌabÉ`^ÊbdÌ_\È^^Æ^`Æ[]ÁZ^¼\f¼W^¹T[¶RZ²LT«KX¬EPºUXÄ\^Èe`ÓlgÔvnØ{nÛrÛ†pÞˆt߈tÞ„mÞ‚oÞ~gÞ}gÜoÜ|gÞ€nÜ€nÜiÝ‚nß‚là€fßißfßeÞ~cà€ià‚hÝ€j߀fÞ}dÞeÞ‚gÝ‚iÜ‚lÝ„mÜ„rÞ‚kÝ‚jÚiÛhè¢xñ¾Š÷ÊøÐšúÒ›ûÔžüÖžûÒ•òÉߞs¸d_£HX¦JW«P]²V]¸V[¼[]½TWº[^»Z]½ZZ¼`^ÀZ^¾`a¿\^º^bº_d½]^¾^\Âa[Á__Æ_[À`_Á^]Ã]\Â\^Å\ZÀ^^¿[`Ä^^Æ``Ç]bÇ\[ÄbdÇ`\Ä_`Á[\Å[]Å`cÅb`¹VZœET~,KfEà‚ià…páƒrâ‚nâ…tâƒrà„kã€lâ€kà„nà‚jâiÞnß~mÝ~hà‚lÞ~iâƒjå‰mçŽré“rì“wì”sì–rî•ní‘pêŒlå€dÞtfÔjaÉWZ¸NX¨=Rž:Q¢8N¤:O¦@R­DT®DR²DPµHSµFP²DN³LW¯GR²KR²HR²GO²HN²HR°JT³LU³PY´OX»QY»VZÃUWÅZ]ÃYWÅ]]Ê^YÈ^]Ê]XÈa^Ì]]Í`^Ê_[Ðd]Ìc_Ìb`Íb^Íd_Íd`Îa^Îb]Îb^Ìd^Íb\Ðb^Îf_ÐbaÏc`ÏfaÐgbÒfaÓh^ÒhaÓd^Ñb_ÒgbÏhaÒdbÎb`ÐkhÑedÏdaÐfbÐdbÍa`ÑefÏcaÐd`ÏbfÌ`_Ë`bÒdbÎb^ËgfÌbaË`\Í^\Í`]ÉfbÌgdËhcÎedÎ^VÎ_[Ìb\Ëe_ÎdcÐb^Óa]Ðc`Ðb[Ïb_Òb\ÎcaÐd^ÐfbÌecÏ`\Ða^Êb`Í`\ËbbÊ]]Ëb]Ë`^Ì`]Ê\\È^`Í^^É]^ÈbaÎa_Ê_\Ì`[Ëb`Ç[[É^]Ç]]È_^Æ\^¼Y_¼Ya¹TX¸PX·QS³LU¬HT±JU¼SYÂ]^ÎffÑql×xpÚ{nÜ‚qÞ„tߊuß…rß‚nÝ‚pÞ€jÝ~jÞƒká„lß‚iàjÞ„mßiàjá„kàƒgâ‚eßjßg߀jà‚jß}eÞjß‚jÞƒjÜ‚lÜlÝnÛmÜlÜkÚhÚ~mÞ‰n쮀õÂŒöÌ™øÐšûÓžúÓ›üÔ™øÍ’ë·~Єl¬U[¨LY¬NU¯S\¸SZ¹XZºZ]¼\^¼Z_»Z\º]^¾[^¼]a¾\aÀ\]¾X[ÀZ`Á\^Â^^Â`^Ä[XÄ_^ÁZ\Ã_[Ã\_Ä^ZÆ]\Ä]^Ä`\Æ`cÆ``Å`]Ã`_Æa]ÈbbÆbdÉ_^Ä_\°NTŒ/Id@\B\Aàƒlà†nä‚iázká…oâƒká‚lâƒkä‚nàná„kâiß~hàhâ‚làbà‚hâ„jæŠjèŽnê’oë“qì–sì•oî”nìhèŽmå}eÞo^Õf]É\[µKT¨;OŸ8Q8P¥<P©@S¬BR®@P°HS´CN³GQ´FP±GT±IO´IR´DM±GP³IQ°DO°JR´KS¶OV¸OS½QT»VZÀW[Â[_Ä]`Æ\^È[VÊ^ZÎ[WÌb\Ï^ZËb_Í_ZÌa^Î`\Ïa^Í_ZÊfdÐd^Ïd`Ðe_Î`]Ïc\ÎaZÐ`]Ðe]Ð`[ÐdbÒfbÐfbÓb^Ôb[ÒdaÓd`ÒebÔd^ÐcaÐ_\ÑbbÐfbÓb^Òd`Ôd_Ñ`]Îc^ÐfaÎbbÑc\Ïc`ËcbÐa^ÐfdÍdbÎb`Ìb`ÍcbÏ`\Îa^Ìa^Ï`^ÈfcÏb^ÌcZÌc]ÍbaÎb\ÍdaÏcaÐhdÏc]Íd`Ïa]Îa_Î`_Î`[Ïb_Íd_Ðd_Ìc^ÌbbÑaZÌ`]Ì`]Êa^ÊbfÌb]Í``Èa_Ì_^ÊaaÊ^]Ì`dÌd`Ì`_Ìa]Èa`ÈbaÈ`]Ä^_À\`¼V\¿YbºTZ·U`´NV²NX¯LY®HV±NX¾X^ÈegÐjfÖsoÚzrÜ~tß„vàˆuà†rá„oßlÞ‚pßjßläƒiá†lá„mâ†oájâƒlá„jáƒlßj߀ià‚jà€hà†jàkà~fà€gßlÚ}aÝnÚlÚƒlÚhÛ€fÚ‚gÚ|iØkä—tð¹„öÆ’ùΘúМûÔšþÖœûÓ˜õÆŒä¤uÀlc¬JR¬IU«LU²RX³SZ¼VXÀ\]½ZZºY^»^a¾ZZ¾]^½Y\º\]¼Y_½ZZ½XZ¾\\¿\\Ã^^½]b¿]^Ä^\Ä\\Á^\Æ\\Â^^Ä^_È^\Í_\É\\Æ^`ÈdcÎfbËhbÃ_]®HO…,Fe@\@ZAZBäƒnâ‚pâ‚kâƒná„nâƒjâ€hâƒlâ‚jà‚qâ…lâkà€j߀iájáhãƒjä‹qèŒnè’rç”pé“tê’të“kínêŽhç‹hâ~gÝp\Ôh]ÉVT²JRª?Qœ7N :O¢=O¨<N¬BR°DQ±IV±GR³LX±KV°MV±NV³FN°DO±IP²CL±FO±DOµJR¸RVºNW¾RRÁWVÃVZÁVYÄZ^ÄX[È\ZË\YÍ\XÌ]YÐ]VÎ[XÍ^XÑb`Ì``Ðb_Îd_ÐebÑd`Òc`Îb^ÏbaÎb`Ía\Ñc\Îd`Ï`\ÏbcÏgcÐb\Ôb^Ód^ÓgdÔg]Öc^Ôb\Ðb\ÐfcÑddÐd`Óc`Ób]Ô_]Ñd_Î`aÐdcÎ`cÒc_Òb_ÑeaÒe`ÑdaÎdcÐd`Îb^Ï`^Îb_Ð_\Íb^ÏcaÍc`Òc`Ì_[Î`ZÎebÐb]ÐhdÒhdÒhaÑddÏfbÎb`ÎebÏb^Îb]Îd`Íb_Îb^ÐfbÌdfÎa\Èb`Î_ZÍcaÌb`ÉbaË^]Ìb`Ì__Ëc`Æ^\È]^Ì`^Ê`_É_^Ë`^ÌddÈ__Ã]bÅ\`À\`ÀZ^¹V`µPZ´PX±SZ²PZ®IT©HW·S\Â_hÏkiÔqnÚvlÛ}tàmá‡vâˆsâ‡sà„má„mß…lÞ‚oã†ná‡lâˆmåˆoä‰kä†pâ„pâ…jã‚fà‚là‚jà…gà‚ká‚fÞ€hàhÞ~dÜ‚jÜ€jÜ~jÜ‚oÜ€fÚ€jÚ‚mØ|hÖ~kÜ„lê©|óÀ‹øÊ’ùЛûÔžüÕœüÖžúјð»ˆÖr´WY¨LX¨M[¯T\±TZºTX¼X]¼Y_¼X\¼\_½\]ÂZZ¾\^¾[]ÀZ\ÁX[¼^c½[[¿^aÀ[ZÃ]\ÂZYÄ\]Ã\\Â\\Ã[\Æa^Ê`[Ç\[Æ`aÇ^`È^^ÌhcÏgcÇ]Y²OV†,Gb$HY@X=Z@\>ä„pâ„nâ‚jã‚eß„nâƒhâ‚lâjã‚mâ‚jâ€gá€jà€gÞ€jáiâ„iåˆpåŽpçqè“rç“vé“rê’nênêŽmæŒoç„fä~hÝr^×bVÊX\¶NW¦>Rœ<Qž<R£<M§?O­FT±CN³FSµIRµJU±KT²JU´JR³HO²CP°IP²KP±BK°DM´JQ¶MV¹OV¾PQ¾TWÁTZÆVUÄ[aÅ^\Ê^\É\YÉ]YÍb[Ì^XÎ^YÌb^Î^ZÍ^\Îb]Îd`Ìc`Ða^Ðd\Ïb^Ða`Ða^Ï_\Óc\Îd`Ðc]Ñc_Ðb^Òb^ÒaZÓheÖb`Ôc]Òb`Ñb[ÐcbÒd`ÑdbÒfeÖc^Ò`\Ñ``ÏfdÏdeÒfdÐcbÐecÒa`Ïc_ÏddÏdaÐc`Ñc\Îd_Ðb^Òb\Îc`Ïd]ÍdaÌb`Ïc`Ï_ZÎc]Îe_Ïb[Ðd^ÓhaÓd_ÒgfÎddÏb]ÐgdÏb]Î`^ÎddÎd`Îb_ÎaaÌcbÎc`ÉbcÍa^Í`\Ê`\Ë_]Ì`^Ê_`Î`bÊbcÊ`aÌ`_Îb^Ìa`Ë``Ìe_ËadÈ^cÄ^bÆ^`Â\aÁX^¼WY¶R]·T]±Vc±N[­KY«JX²NZ¼VaÊehÖpkØvrÜ|qÝsÞ…tâŠpâˆráŠvãˆnàˆtá…rä‡täˆnåŠpæ‰påŒoè‡jã…ká„kâƒgá€lâ‚hà‚hàhâ~fâ‚fá‚fà‚hÝ~hÜ|jÜ|hÙ€kÙ€pÜ€lÚ€oØ‚pØ~jØyhä“qñ¶€øÄú͘úÒœüÕŸüÖüÕŸ÷ʓ簀È{i¬MX­MY­MU´NW¶OUºZ[¸T\·VZ¼\^½Y[¾ZYÂ`]À[[¿YX½\]¾Z\À\`¿\b¿Z^Ã\\Á]`Ã^]Ä_`Æ_^Â]^Æ^^Å^^Å`ZÊcdÍdcÌecÎk`ÄZ\¨EQ&Ea?\HZBZ<\B\Bã‚pà‚máƒmágâƒlä‚gâ‚oâ‚hâ€gã‚kâ‚jãjà~fÞ~mâ€jãŠtæŠnætè’påqç‘pêpêlêŽmêŽnç‰mæ„gà|gÝo\Ôf\ÈWY¶HR¦<LŸ7L 6J¢=O¨:J­FR²GQ°@L·KR´HO´HRµMV±HT²GT°FQ´JT²DM´DN±DKµEN·MP¼NQÀQUÁRSÅTSÇXVÈZ[Æ\bÇ]ZË\YË]ZÎ^ZÏ`\Ï_\Í_[ÌeaÌ`\Ða[Ðd\Îb^Óa[Òb^Òc^Ñb^ÎfaÒa^Ð`ZÐc_Óc`Òf_ÐfdÑd^Òc^ÑgeÒfbÔe`ÓkhÒegÒfcÔa]Òd^Ôf\ÔefÑd^Õa_Ò``ÒfcÐecÑcbÌa_ÐedÑgdÑffÐhdÒecÒb^ÒffÑebÑ`\Í_\Ï`ZÎf]Ñd_Ïc`ÌbaÌ`]Ðb]Ðc^Óf]ÒifÒfcÕjfÓe^ÐcaÐd`Îb\ÎebÎc^Ðc`Òa_Ðd\Íc`Î_\Ì`\ÏbaÎ`[Ì`_Ê`]Î^ZÌ[^Î_^Ì_^ÎbbÎaaÉbcÎ``ËbdÎ`^ÎbcËb`Æ_`À`bÃY\ÃZ]¾TX¿QV¹T[¶Zc´MX²OWªJ\®HW¹S^Á^hÐooÖvpØwpÜ}qÜ„wà…pá‰tâŽsâŠsâ†tã†lå†påŠpäŒuã‰lãŠqä‰qä†mâ„nâ…oâŠnâ†jáƒnà‚láfâgá}bàhÞ„rÝ|eÚjÚ|lÛ€mÚ}lØ~mÚ~hØ}i×{iÞ~hé¥{ô¿ˆúÊ‘úÑžûÓœüÖ¢üÖ ûÒœóÃŽážvºb`¦JX­LV¬NZ¶PWºX\»TZ¼V[½Z_ÀY\¾[[Ä[ZÀZ[¾[]À_\½[\¿_a¾]_¿^^ÂabÂ_bÂ\\ÀceÅa^Ê`[Éb^Ìb`Îb\Ðd`ÒhbÏfeÃ\[¦@Ly!B^>W>X<ZBY>[9_@âlà‚là„sâhâ„jâiä„hä†kä†kâ†mâiàƒpà€màlá…pä‰påråoænè‘qæ‘pèŽpèsç‰jèŽuèˆoä‚dâ{jÝreÖjeÆ^a¶Q\¦@Sœ8MŸ9L¤?P©@M®DM²CN±BN¶FP´GR´LT±IW´FP³HT°JT´FO±DN¯JS±DL¶KS¹NR»QW¼UZÁSWÀY_ÅXXÂY\ÈWZÆ^[Ê\\ÊYVÌ`]Í`\Ï^`Ð`[Ða_Ñ^\Ða\Òd\ÎdbÐc_Ðc]ÏfdÒd`ÑfcÐa^Ïb_ÎdaÒa]Ñb`Òc`Ô`[ÑaaÑdbÒfbÔd_Òd_Òf`Òc_Ôb^Òb^ÒdaÔc\Òb\ÓcaÒ^[ÐbcÓjiÒdbÔb^ÐdbÑcbÑabÎaaÎdbÑdaÓgeÏa`ÎdfÌbeÎ`bÌfbÌgbÍf`Ì_]Í_^Ðb]Ðd]ÐgaÑgfÐigÒjfÐgdÑdcÐfbÎdaÌfcÐd`Ðb_ÎgfÓfbÌ`^Ìa^Î\[Ía`ÎcdÌcbÎdeÍ_ZÌ`^Ì^bÍ_`Ì`\ÎbbÐcaÍadÌabÊ\ZÏ__ÊaaÃ^dÀbgÂ^d¿Z`¾VZ¾TY»S\¶P[³NX³NX®HV­FX³Q^¼[gËfgÓol×xqÛ~uÜ‚uâ…tã‰râŒxãˆväŠwãŠoã†oäŠqã‹räŽuæˆmåŒmå‹qã…jã„má…jã†oä†ná‚pâ‚hàkà€dá‚jÝlÝ~lÜ~jÚlÝ}jÙ€pØlÙnØ~iØzeØxf߉jð°øÅŠúΕûÓ ûÔžüÖŸýÖøÌ“íµ„Ð…l«NXªISªM\°NVµTZ·V^ºTZ¼X_¾Z\Ä\ZÀ_`¾\`Ä_[À``¾\]À]`¼^`¾bbÀ]^Â__Â^^Äb^ÆbdÌ^]Ìb`ÐfbÐg^ÒgdÒiaÄZ[¤>Lu$CZ<V@YBZC^D\?\>`;â‚jà‚oâ‚nâláƒlæ‚jâ…mä„kä…lãƒlà~nâ‚nànà‚ná†oãŒqætæ‘oèŒpæ‘ränæoåpåŠkæ‰lç‰oç‚dä|fÞqbÕhbÈ^^´V\¤BRž=O9N¥=J¨>J®BPµFP²FO³OWµIRµFN´KQ°HP°IQ®CO°DL±KP°EL³EK³FR·OX¼OV¼RU¿QPÄUUÃVVÆWVÆZYË\YÈZWÌ\VÈ\\Ì\XÍ_^Ìa[Í`^Ïa`Ña[ÎcbÌ`_Ð_]ÎefÐ_[Ðc`Ñb`ÎdaÒb`ÍdcÐbZÏabÑc`Ñd\Ða`Ña]Òc`Ôd`Ôc^Ð`ZÑaZÔc`Ò`_ÐbaÒbaÒd_ÐcbÐaaÑeeÔc`Ò`]Ò`]Ô_\ÒecÓfgÎbbÏa`ÎacÏbbÎb`ÌjfÌbbÎe`Îc`ÌcaÍbbÎ^\Í`ZÌb`Îa]ÌcaÑedÒghÐkmÑooÑefÎdeÎb^ÏfdÐb_Ìb`ÍbaÑfcÌ`ZÌa`Ì]_Ðb[Ì_`Ë`]Î``Ð`\Ê\]ÌZ[Ê``Ë`^Î^\Ï`^Ì``Í_^Ì_\Ï_^Ê^ZÊ^`Â\`À]h¿\dÀelÂV\ºPX¶T\³LZ²JX°NY®FT´OY¸V^ÌecÑok×ztÜznÜsß„pàŠvâŒyäŠvåvä‰pä†oá‰täŠxä‰rãnäˆrä‰pâˆoäƒlà„nâ‚pâ„oâ„râ€fá‚sà‚jÞƒpÝ}kÜ~nÜ~lÛjÜ€jÚoÙlØ}oÚ|kØ}lÚ{kÙyeæœt󻇸ɒúÏšúÓ ûÖ üؤüÓ˜õÆŒä¦w»jbªLT°R[®NU¸T[µPW¹X^»X\»X\¾Z\¼\`À`aÀZ^½[]¼\_¼`e¼Z^¿^`¾aeÂhjÄabÈabÊb_ÊfeÍedÑhgÒigÒlfÆ\]£?Nx#D[AR@ZFXFX@\CZB]?aAâƒqà‚lâ~iâ‚lâ„mä‚iäƒnä„mã†râƒlâƒnà„màjâ€iã„mæŠtç‘nå“tçtä‹nåŽlæpäŒpäŠlå†jä‰ræ‚hâ{hÚwhÖheÉ`d´OZ¥@Rœ8N;N¥?O«AK°AL±DN°GP²QX³DM´FO²DN²EL²HN²GN±HN±EL±IP²GN¶HPµLQ¹PT½NQ¿UU¿SVÄZYÂWXÅWVÉZXÌZVÎ\WÈ\\Ë`\Ð_]Î`[Îc^Ñb[Ð`YÎc_Î`\ÎccÎc_Ðc`Î`\Î`bÍb[Ð`^Íb`Ò`ZÒc_ÐdbÐb]Îb]Òc]ÒdbÓa^ÑcfÐ`]Òc]Ñ`[Îb_Ï^ZÔc^Ñ_ZÑb^ÒecÐ^^Ób^Òd`Ò\XÒ_[Ðb`Ñ^]Í]]Î`\Ïb^Ìa^Ð``Ð\[Ïb^Ðc]Íb^Ì``Ëc`Í^\Ì`\Î_YÍbbÑfdÑbbÏedÏfdÎffÍbbÐc`ËbcÍa`Îb`ËdbÐb`Ðb]Ìe`ËaaÌa_Î`[ÌddÌ[YÊ\_Ë]\Ë\]Í^ZÌ`^Ì^ZÌ`ZÍ\]Ð^\ÎZ\Ï\ZÏa_Ê`\Ë]]Â\`Â[`À[b¿Y]¾X`¾RV¶QZ°PZ²S\¯JW«JZ®O[¶P[Æ`dÐjiÖxpÚzvÜ€xÝ„tàˆyâ‰vãŠwãŠxä‡näŠrã‡máˆpá‡sátä‡näˆkâŠoá‰qàƒgà€mâ‚jâ‚jâ€hàmÝhÝlÝ~jÛ€lÜ|kÛ€jÚ}jÛ~lÛ|iØ{kØ~k×~lØveÔvfÜ„jì¬|õÉøÌ’ùÒ›ûÔžüÕžû×¢úҗf֌i³TW¬JS°PU´PZ´RV¹V[»Y\ºVX¼V\¾UXÀ\\À_]¾[^¾\]ÀWZ¾\`ÀZ\ÁadÂbcÃfiÆdhÇddÌedÎgdÒhiÓldÊ\[ >OnA_IV@XEZA\ LZ?]>\>b<c@â‚hßlànâƒtä†qä…jå…mä„lã‡mãlàƒrâ„oà‚qâ†päŠsçtçvæsæ’oårã‹qåŠsä‹rá„kåˆlæ‡læ„jã}dÞtcÖh\ÊZZ¸LT¦@Nž6I5H¥>Mª<I°=J¯BN¯JR¶JR´HP²HR³CO±JR³JP±FL´FK°EL²EM´HN¸LP¹MP¸PU¾LR¾QSÃWXÅZYÆZWÈWUÈZXÈYTÌ`\Ë_\Ï_[Ð`^ÍZVÌ^\Ìc]Ïc_Ò`[Îc_Ð^_Ð_ZÐb^Òb_Ì]^Î``Ð`^Î`^Ñ_\Ñ`YÎb`Ò`ZÐ^YÓd_ÔfdÖe`Ô_\Òa\Ó`YÒ`ZÐ``Ð`\Òb`ÐfhÑgfÓdaÒa_Òa\Ó^\Õ^WÒ_ZÑ]XÒZVÐ^[Ï^[ÐZWÒ]WÍ]^Ì\^Î^]Î]YÎ^ZÍ`ZÌ^[Î^ZÌ`]Ð\WÌ][ÐaaÍ`_Ïb`ÍdiÎdbÒcbÒb`ÎcaÍbaÑc_ÏcbÐd_Ï_\Î`\Î^\Î\ZÎ^ZÎb^Î`XÍ\[Í_\Í\YÌ\[Ì^^Ê_\Î_\Î^[Î\ZÌ[\Í\XÎ``Î`_É\[Æ\aÂ^dÂZbÂ_fÀV]ºQ\µNW²N[°NW®JV®L\±T`·YhÆafÎjlÔtrÛ{tÜ~xß…yà‰|â‹vâ‹xâ‰xãŒtäŽuâ†pá†ràˆrâ‰nâ‡lâˆnáˆkâˆoã…ißjàlà„jájá‚ißoà‚lÞ~hÜlÞ~mÚ€kÛ}kÜ}jÛlÚ}lÙ|jØ|i×zmÖxkÖydã–qòº†øÇ’úКúÔžûÖ¢ûØ¡üÖ÷ÊŽç°~Çw`¨JU­KS°OX±TW¶X^¼]]¿ZYÁ\]½Z]Á\ZÄ^ZÀ[]À\Y¿Z]¿ZZ½Y\¼]cÄbfÃ]_ÅcbÉdfÐdhÒifÒjgÈ\_¤<Nr#Gf4XWDV>W=X<Z=X:`;_?f?f@â€fß‚pâ‚oäjä„næ…mæ…iäƒiä…mâ†râ‚nâ„má„nåŠpærètérç‘sçræoå‰iä‡pâˆnàˆqä‡jæˆpæ‡nã~gàt^Ùg[ÉTQ´FL¨7FŸ8Lš0Gž6I¨<L¯BM°DN°HN¶EN´GPµHQ²LV°JT°IT°DN¯FP²FQ´DL¶GN¸KO¸LP¹JN¼QW¿STÁX\ÆYXÄUUÈZ\ÊZVÇ^\ÌXYÈabÍ^[Îb_Í^\Ë]aÌ^YÏ^_Ð`ZÍbcÍ^aÏ^ZÎ`^Ð``Î`eÏbcÑb_ÎcbÐaaÐc_Ñ`\Ò^XÏ\ZÐaZÏ_`Ó`_Ó`]Ôb^Ò^ZÑ]ZÑ`\ÒdbÑcdÐdlÕhfÒgfÒebÒ`aÕ^ZÒXXÑYVÑ[WÎZZÏZZÎZWÎ]WÎ\XÎ`\Ì`bÐ^[Í`[Ì\_Ê\ZÍ_\Ê`]É^]Ê^[ÈbaÌZXÎ`bÌ]^ÌbbÏfeÏcdÐcbÐaaÏdbÒddÎcbÌfcÌ_ZÌ_]Í^ZÌ^\Ì^[Ì__Ì^`Ê][ËabË^_Î]WË_`Î`^Ë^]Î`XÍ\\Î]ZÎb`Í^_É^]È^`Ä]^ÀY`Ã\_Ã^a¾^dÀR\¶MX¸NW´LS±JQ±JY¶P^¹WbÃ[fÑfjÖolÖutÜ€xà†xâ…{â‡zâŠuâ‰uáŠwäˆtâ‡sá†mà†qá‡pâ…mà†lá‡hãˆhâ‡kÞƒlâƒlß„ná‚jâƒkÞ„mßjÞ€lÜ~lÝmÛmÜlÞhØ€mÛ|jÙ}mÚ~kÖwhÖzlÕvhÜ€dê«zö‹ù͘úÒ™ûÕŸúØ¥üÛ üÔ™ò‰ڜw´\Z¬JP²NT³RW´V[¶ST¾ac¼^cÀ`aÄY^Â]^Â^^¿Z]¿\\¿V\Àab½^cÃ\\Á_bÆbbÎd`ÒheÐhhÊ^X¢=MsAYA\FYDS:U7Z<\8_=c9h=j>c9äƒpã„oä„mã„jä„hæ„nå…lå„hå…oã†oä…jã†rãˆuçŽoètç”rç”tä‘ræpåŒsã‡hã‡kâ„oà†qåŠhæŠnç†låcàsZØgZÊXR·JN§<Kž6Hœ3H£;I¨CN®?M¯DO²HS±DNµHP³IR²HN²LV²FN¬KV°FN±HP°EP´HM·KT¸KO¸KR½SVÀSRÂTWÆXUÄZ\ÆYXÈYXÈ\[É]ZÊ`ZÊ`ZÌ^WÌ]ZÍ]ZÌa\Ì^[Î_[Î_[Î\]Ìb]ÍabÎa_Ña`ÌchÎ]]ÎbbÐbaÐacÑabÒc]ÎacÐ`]Ð`]ÒddÕ^^Ô`aÒ\]Ð^^Ï__Ô^ZÔ^^ÑedÓfeÒhhÒgaÒadÑ[\ÎZXÑ\ZÍ[XÊZWËVUÎWRÍZZÎVWÎ]\ÌbdÎ^[ÍZYÈ\`Ë\XÌZ[ÊYYÌ^\Ì^^Ì\ZÎa^Ða_Ìb`Ì^^ÎbdÐ`^ÎbeÌbfÐb^ÐdcÏhfÐbbÍb^Ê\\Ð_]Ê^\Ì_bÍa^Ê`aË`aÌ^[Î\ZÍ_]Ì\XÉ]\Ì\ZÏ]\ÍZ]ÈZ]Îa`Ì^aÎ_\Ë]\Æ\_Ä^bÃU]ÄX^¾V^ÀYb¸OW¹QX¸NVµLU²LX¶Pa½XcÆaiÎfjÔprØwtÚ~wÞƒyß„và†xãˆ{âŠtâŠxâˆtà‰vá‡tá‡o߇tâ‰oà‚mâˆjä†kâ…jã†jâ„iàmâƒmâ‚qá‚nâ€lÞƒrß‚oÝ‚sÜ„rÞ€nÜ~lÜ~nÚ~hÚ~qÙ{h×xkØxi×xdÖvgâlô¸€øÇ‘úÑ›úÔŸúÖ üؤüØ¢øÐ”Él°IN¬MU²OU´TY½UW¼VX½ZZ¾[^¾^]Â\ZÀ[]¾]]ÂZZ¾[`½]]Á^aÆb`Ä^^ÉfdÌfdÏjdÂZ_¢9Kt>W=V:ZCXA[BW;^?^<b=d=g@a>f'Lä„jã…qä„pâ…må†mç„kåˆpä‡iä‡qä†näˆqåŒnåvèrçwé“rè’tèrçŽrçŠlä†jâ†nà‚rá†tçˆkèŠnèŠmè€fÞs^ØgXÊXU¶KP¦=KŸ:Nž4K¦<Kª@M¯BR°BL²FR¶LV²LVµNV±DN±LU²GM°EP²FO²IR±DP²IQ¶IR·JO¸KQ½NR¾TUÅTVÇWVÂ\_Æ[ZÊ[XÆ\ZÉ\ZËZVÌ_[Ì\YÍ`^Ì_]Ë`_Ì`\Î`^Ì]ZÎ^^Ì\\Ï`\Î_aÍ``ËbfÍadÏ``Î_cÓd_Ð_^Ñb]Òc`Ð`\Î``ÏcbÓ^`Òa^Ð`]Ò`_Ð_^Î^^ÏbfÑebÐdeÒa`ÑbbÒdcÎ`cÎ_\ÎYVÌZZËUUÍXVÌZVÊYWÌ\[ÌZ\Î\ZÍ`]Ì^^Ì]`Î\\Î\XÈ]^È[\Î\[Ê]\Ñ[XÏcaÌdgÌacÎ`^Íb`ÌbdÌefÍfhÌa`ÌdhÎ^[Ëa`Ç]`ÌbbË\[É\_Ìb^Ë``Ë_^Ìb^Î]\ÎZXÊ^aÌ_^Ê[ZË^^Î]`Í\`ËbaÐ^\Ï``Ë`aÊbdÇX]ÀT_ÁV^ÀV\¼V`¿TYºLWºNX´NZ°KZ´Oaº[jÆclÎilÓkjÕwxÛ|tÞ€và„xá‚tâ†sâ‹xá‡wâ†tß…và‚qá‡và„oãˆpã†iâ†gä„hâ„mä„jáƒlà€eánàqá„rá€há~kÞƒp݃rÞ„pß‚nÞ‚lÝ~lÜ€oÚzlÛjÚyjÖwgÖxjÔtf×|gì¥xöÀ‰úÊ”ûÒœùÕ¡úÙ§üÙ¤üמ÷Ë’ã§v¹d]®JP²OT´PXºUV¹VV¾XX¾V[Á^`À]`¾Y\¿ZZ¿\^À^]¿`bÅ[]Å``ÈcdËgbÍgcÄZV¢=Ks<X9W@[@\@\BZ=]?^>b=e<e<b:bAcEä‚gä„pä„näiå…lä…hä†jä‡lå‡pä‰væ†pçŒrèsé”té”ré”rè’ræ‘tèpæŠeä‡fâ‡lâ…pã…iæˆoèˆièˆjåcâv^ÜhVÌ[VºHN§<JŸ9J¡9K¦?Q¥?P¬AN°BN²DN³GQ´HS´KT²KT°NV²FO´CL°CL²EP®ENµHO·GP¶JSºNU½PVÂPQÂTUÆUVÆYZÄZ]É\\ÆVVÊ[[É\\Ë_\Í^ZÎb]ÊZ]Ð`\Î^\Ía`Ê_aÌ^ZÊ]`Ð]ZÎ`aÌ`^Î`aÑ`^Ð`aÔ``Ò`_ÐaaÐb]ÑbbÒb]ÓbcÏ_`Ï_^ÐaeÓehÑ`^Ñ^]Ò_\Ñ``Ò``ÒcdÏbbÓddÑ\ZÔ\YÓ`_Ð[XÎXVÍZXÉZVÈXYÈVWÌ[XÈVDÌ[YÍ]ZÉ]^ÊZ[Ì^[Í\\Ì\XÌZZÉ\YÆ\YÊ[^Ë_]ÍabÌ_`ÎbeÌ`aÌ]`ËabÍheÎbeÍcbÎ_`Í__Ê^]Í__Ë^^ÌaJÎabÍ^`É^^Ëa_Ï^\ÎZXÌ`\É`cÊ\`Í]\Ì[^Ì^]Í[]Í`]Ê``Ç]`É^_É[_ÆW_ÅX_ÄT`¼RX¸QH¶L]¸N[´HX¶JX¶LY¼Y]Ç^bÏelÒjjÔqnÙwsÝ|rÞ€rÞƒvá†xá†wâ‡tâ‰vß„wÞ†t߆s߆sá‰rá‰mà…râ‡oá…là„Vâ„oà‚pâƒkà„qãƒoà€jß„nÞ‚p߆nà‚n߃TÞƒrÞ€mÝnÛoÛ~qÙ~oØ|nØvmÕrOÒqe܃iï®löÄdùÎúÓ‚úØ—úÚžüÛ¡üÖ õƘښ³\~«Nw®Ni´PZ¼VV½WU¼UZ¿XZ¾[YÀ_[Á^[¿^\ÀaaÂ^^Â_`Èd`ÇgeÍg]Â[]¨CUw#R[NXNTG_DZ2]B\:`Hb?eBe;b<`<d@d >ä‡sâ…lá…nâƒkä…mä‰läŠnæˆpå‡räŠtåŒrèŽqè’uêré’qèrêoèpèkå‡jä†mã†mâ„lã†lèˆiæ‹lè†gä~eàr[ØfVÉVP´HM¨>Kš6L;M¥;K¨BP®BR°@M´IR´GP´HP²HQ²JQ¯FM²FN°IS²EK°CM²BM´CMµEO¸LTºOW½OUÂRRÄSVÄSUÆXXÉWTË[ZÈZVÈ]YÆ\\Ê[ZÌ\YÍ`]Ð__Í^]Ð][Ò`]Ï_]Í_XÏ`^ÐZZÑ]\ÎYZÎ^`Óa_ÎceÕb^Ò^`Ñ^\ÒZXÒ``ÎbdÏ`bÎa`Ñ`^Î^eÐ`eÐ``Ð^^Ð`_Ð^]Ð`aÕcgÓdaÒ``Ñ^`Î][Ñ[XÏZYÏWUÍXYÉ[\ÈXZÆX[ÊXVÊX[ËZ[È\^Ê^^Ìb`ÈXVÄ[ZÉWWÌYWÉYXÈ]\Ê[XÎ_^ÌabÊafË_aÍefÌbcËbfÍejËgfÊfjÌaaÎ^\ËbdÍ`_Ì`aËbaÏa_Ë\`Ð][Î^^É]\Í]ZË]`Î]\Ì]\Ê\XÍ[]Î`]Í]]Î^]Ê_`Æ`dË]^Æ[_ÉW^ÅW^ÁRY¿T^µP[·P[¹RZ´JX¹LYºQ^½VaÈYcÏbfÕhkÕqnÙwqÝ}tÜ}tßuà‚tÞ†vá…tâˆuÞ„vá„qà†oß„ná†kà†mà„náƒnáƒnà‚iá†mÝ…pà€ná„pà„pá†lßoàn߃mÞ€pàƒmà‚rßpÜ~oÚpÝ~pÜ~pØxjØzjÕtcÔtcÖveçžsô¿ŠúË—ùÒ úרúÚ«ûÛ¦ýÚ¥úΘ賆ÂvfªLU«JS±NX¶PW»QTºVY¿VX¾\]À^^ÄZX¿[\Â^aÄdhÇbbÍdbÏhgÅ]\¤?Pu>Z9W;V=Z=Z@a"LaBfFdCf;iAd<b@d@f>e<ä„wä†mä„oãˆlâ‹väŽrå‰nä‰næ‹yæpæsè’tè’oêrépê‘péjèŽnçŠfç†jäiâ…mâ†nã†kå†jç‡lè†læ|eÞp[ÙdTÌ[R¶HP¤<Hš5Hž4I¢9Hª@P®@O±CN¯DQ´FN°DO®FL°FN­HP®HN°EQ®EQ°CO²JU²IS·FP·LTºNTºQX¼PU¿X[ÂRVÇY[ÆVTÄVXÇXXÈ^YÅZ\ÉVXË^XÌ_`Î^YÎ][Ï\ZÎcdÎ_\Ñ\WÐ\ZÏYYÌ^^ÉX\Í^\Ð\\Î``Ñ\]Ð[ZÔ`_Ñ\VÐ^\Ì\_Ê^bÎ`_Ð_aÐckÏgkÒbdÑ`^Ò`]Ð^^Ò\\Ñ_^Ò`^Ò^]Ï]^ÍZ\Ñ\ZÑ[[ÍZXÎ[WÎ[WËYVÇYZÉ^`ËXTÊ_aÊ]XÌ]XÌ]VÆXTÈXUÂ\XÃXZÉVWÈ]ZÈZZÊ]]Ì`bÌ`cÎabÌ`aÍdcÌaeËfjÌccÊejÌabÍ``Ê]`Ê^`Í^^Ï`_Ï`^Ë`^Í]^Ì^`Ì][ÐZXÌZXËZZÐ^_Ï_aÐ[XÎ][Í\ZÐ\\Ê]_Î_cËXZÉWZËY]ÄV\ÃQZÂY`¼T]¹T\ºPV¸JT´JZ¶M[¾UcÆ[fÌblÓklÔmpÔtrÙzqÛ~uÞ€uß‚tÜ€t߃sá„tà„tÞ†rÞmÞ…oá„là‡rá†lß‚p߃oàoá‡q߃lÞ€oá…ráƒnâ‚làƒoàkàƒoß‚jÞ€pÞp߃qß~lÚ{pÛ{nÜ{oØzlÙ{kÖviÕtfÖrcÝfî­~øÆŽùИùÖ¤úئùÚ©üÛ«üÖ£öÅÜžy·^Y¨DO¯FQ´OT¸RX½SX½TYÀZ\À^^Â]_ÂbbÂ_cÌecÊfdÏfeÀVWœ:Js?X:W>V<Y>YC^D`FdDdCe>d@d?^>d=cAa<^;äˆvãˆnäˆoã‡gä‡råˆjå‡qåŠpæŒsæuê‘rêrèŽoêséoéŠjèŒfè‹kè‰eæ‡nâ„kä„jã‰rä‡oçˆlç‰næ„gä}gÝq]Öi]ÊXQ¶FK£6Fž0C›3G 8J§?N©BN°CP²DJµGM±KT°CK¯GN°FN°DL®GP°HN¯HQ²GP®BNµEPµKR¸NS¼QTÀSUÂSUÃSXÅZ\ÄVXÄVXÃVYÅW]Ç^^È]YÈ\]É\\Ë_ZÈ[WË]\Ï_]Ê^^Î\YÈ\ZÌVVÌ]]É`cÎ`^Ð^_Ï`_ÑabÑ^^ÐabÐ^ZËZYÍ`^Î`bÌ_aÑ^]Ò`cÐ`bÒ_aÏ`cÐ`^Ï\\Ó^\ÐbcÏ\[Ò]]Ô\[ÒZ[Ò[YÏZZÌZZÍ\]ÈYZÌTTÌ\^ÊXYËVVÊXXÊ[XËZZÇTSÉVTÉTXÆVVÅTWÂZ[ÅY[È\^Ê\ZÊ\\Ì`bË]`Ë_dÈbhÊbeÎehÎacËabÐ``Ë^^Ê^^Ì^_Ë^`Ï^aÑ``Ï`^ÏZ\Î^aÌ`_Í\ZÊ]_Ë^\Ê[^ÌZ\Ð]XÍ_`Î[XÏ[\Î]\Ì]bÌY^ÉZcÇV\ÄUXÀV_¼Wa¿Wd¸U`¹NVµJX¶HY¹KY¾R`Å]fÌchÒhk×pnÔwsØzsÚzqÜ~oÝ€uÜ€uÝ„tà„vß…xÞ†sß‚o܆tàƒjà‚oáƒhàƒpà‚pß‚láƒmá‚oà~jà~kâ‚jã†rá„oà„là€oÞƒsà€nßnÞ~mÞ~lÚzkÛzjÜzlÙyiÚwf×ufÖqbÕpaÕrbæ–pô¼„øË”úÔžùÙªúÚªüÛ¬üÜ©ûÒ™ñ¾ƒÎƒhªJR¦DO¯OX³PV·PT»RYÀUZºW`¾Y[À_dÃacÈdfÌhe¾W[>Nn@Y<X@U>Y?V=\Cb"Od'Pe#Kj%Ld$GdDb>d@b>_?^<`CåŠwâŠpåŠnä†päˆjæ‹päˆqæŒvètè’uè“véoèpé‰oèˆièŒkçŒfçˆkçˆjæ†lä†næ†må‡qçˆjéˆhè‰lç‚gä}fàp[ÖbVÈUP¸AJ¦:I™0Fœ2G£<N¦:K«BP±HR²JT²FP±AP³BL´EP°EP®DN¯EQ²EO°DN¯EN°GPµEMºFN¼NT¾PTÂQSÄVWÁX\ÃVVÅZ\ÄYXÆ\YÁ\^ÄWZÉ][ÉVTÆYZÎ]ZÍ]YÈ]^È\`È^`Ì[ZËZZÎZWÍZ^ÏYZÍ[]Ñ^`Ð\ZÔ^`Ñ^^Ñ_aÒ^ZÐ\ZÐbaÎ`bÐ^]Îa_Ð\]Ð`dÐ\^Ða`Ñ^\Ð\`Ñ^aÔ^^Ñ^aÒ\^Ó^bÐ\\Ð\\Ð^^Ð[[Í]aÊ\[ÌXYÎ[WÏZUÌYTÈTPÇTTÆWVÈWSÆWYÅYUÆVVÉVQÃXZÄVVÈXXÉWWË^\É``Ê]aËabÊefÍafÌbdÊbfÌ_^Ë]`Í_eÎ^]ÌZ[È^cË^`Î_`Í_^Î^`Î][ÌYXÎ^`Î_`Ì^^Ë^bË^ZÎ]XÌ\ZÍ][Ò^^Î]\Ê\^È[`ÈY_ÇUZÂTZ½R[¿S\¾S\¼R\¼PZ»OZ¹L\»NYÄT_ÆZdÎbdÐflÕopÕrnÙwsÚwoÚ|tÝvÝ€uÜxÞ‚vÝ‚tÞ†sß…sÞ‚oà…káƒlß‚oá€iÞ‚tà€ià‚làjà}iß~i߀lá‚pá…nâ‚rál߀jàlÞ~mÜ}nÝ~jÛzjÛziÛ{iÚvjÚubØtbÖtbÕp_Ön_Üdî­y÷ÈŽùÑ—øÖ¥ùÚªûÛ©ûÞªüפøÈå©z¾f[§EP®LR¯OX·MS¸QYºRZ¶T\¼Z_À\`ÅddÊd_Ã\\¥?Lt?Z=V=X@X>\>Z@^@d@fAdAa>b<`@`<a:_>_9Z>\>å†mäˆpå‰oæˆpä†käˆpäˆpçsé’yè”réxçˆjç‡lç†hèƒfè†cç„få‚eç‡jèˆnå‡räƒhæ‡oæ†oè†jçˆkç†låzfÜp\ØgXÇYU´EM¤7Kœ1H4I£:N¥<O«DT®DO²EP²DJ±HS¶HO±HS¯GO´LR°AL±JP®DO¯DM²IO¶FJ¹IP¼MR¼NQ¼RXÄUXÈXTÇYWÆTTÆTTÅZ_Æ\[Æ\[Ì]ZÌYXÉXXÌZYÎZZÌ\bÎ`_ÉYZÍZZÍ\\ÐYXÒ_^Ñ^bÎ`aÑ^bÐ^bÒ][Î\_Ñ\YÐ\bÎZYÒ`^Ó[ZÐ`]Ñ_`Ð^^Ð_bÒ`^Ò``Ò]^Ñ^]Ð__Ñ``Ð]\Ï\^Ò^^ÐZZÑ\ZÔcaÒ^\ÓZXÐ[WÐZXÏ[UÊWVÌZVÌXTÃXVÈURÆXUÄUVÄWTÃSRÅXRÂTR¾XWÆVUÈVWÉ\[Ç\]Ë^_Ê\`Í_bË_dÈadÎbeÍ^aÍ\^É\aË_`Ë^_Ê`eÌZ\Î`gÍbdÍehÎ[^Î\_ÏbfÍ^bÌ][Ê_bÍ\XÐ\YÎZZÏ]YÐ^]Ë_aÉYZÄY[ÈZ\ÈXZÄRWÂX]¾RV½S]ºQ\¸R]¶NY¶KX¸MZ¿R]ÄXdÎ\`ÑjjÒlj×qnÖtrØxpÙypÜzpÛ~sÜ€wÞ~pÜ„w߃tà…tßjß‚lá€jànàƒnÝ€rÞ‚lÜ€qß~gá€kà‚lß~lâ€nà~jâ‚nâ€qßmà~lÞ|jÞzfÛwfÛxkØvfÚtgØvjÙtbØsbÔqdÖq^×o\Ös_ä“oòº‡ùÌ”ùÕ¢øØ¦úÚ¬úÛ§üܧüÔ›ð¾…ÑŠl®MRªGP®LU±MV±OV´RY¸SYºZ]À]bÉheÁYY¤>Hw"A^>V<V=X>Y>]A_BaHg"If&KdCb@_DaD`B]?^<^:]>\;ä‰pä…käŠläˆlãˆläˆrä‹qè’uê–ué’vèŠmæ„hæ€eæ‚gæ€hæ‚bå€eä‚iç†gè‹dç‡må‡hã…nä†læŠnçˆlè…nå|fÞub×bWÉVS±DM£=L˜/E3F£=M§=N®AM­CP°DQ´EN´EO±GR²EQ²BN²HR²HO°FL®FP±FO²FPµHPºLR»IR¾PTÁU\ÅRVÆVVÆVXÈVXÆXYË\ZÈ^[ÈZYÊ\YÊ][Ì[ZÎ\XÌ\YÊ^`ËZYË\[ÎZ[Ê\^Î^\Ï^bÑ]]Ð`bÑblÐ[^Ò^^Î\_Ë[]ÍZXÎ_bÐc`Ô^^Ð`_Ó^_ÑbcÒ\_ÒabÑ[ZÓ^`ÐZ_Ò[YÐbbÑbbÒZZÒ^ZÐXYÏZYÒ[\Ò_\Î[\ÎZ^ÎX[ËZ^ÈXcÆdqÄhrȇÇuzņ‘Ày~Àv~¾jp¾kr¼[`½X^ÁWVÅWVÆXWÉ[XÃXZÈWZÇ^^Ç\_ÇZ_Ë[_ÈX\Í^aÍ^^É^`Ï^`Ì^cÍ]`Ð^aÌaeÎbcÍ`dÎ\\Ñ]]Ï]^Í\`Ì`cÎb^Ñ[UÐ^ZÌ]ZÎ]`Î]^Ç\^ÊZ^É]\ÈY_ÃS\¾T^½Q\»R\¸VaºPX¶JT·JZºLY¼P\ÅT`Ê^cÐhgÔlkÕnfÖvqÙtiØwpÚzrÚ}sÚ|vÝ|nÝ€uÞ‚tÞ‚rÞƒqßoÞ€l߀mà€mÜqÞlß‚pßjá€jâ‚kâ€iâià~hàoáià}hßzlÝzgÜzjÚwjÕuj×qdØwhØqeÚrcØtbÖrcØsbØp_ÖrdÜ{dì©xöÅŒùΘùÖ¤úÚ¨úÚ©ûܪýÙ£÷Ìæ­}ÀhZ¨FR­JT°KS²PX¸U\¹RZ¼^bÁb`¼X\£@Nv@[>X?W=X@X>\A_AdCa FeFg"IgFaEa@dAb>^9^<`;_>]<æ‹xä‰qåŠså†nä…låŠtçŒnê’uìréˆhæ†hå€hä|cæ}cäz_ãz[ç|bä€cçˆlèŠjçŒpç‰lä†qå‹uèˆlé‰pêƒbå~lát_Ùj^È\[²GS¥;K™3Jœ2H <R¦=N­>O®CL²FQµFN³HSµKT´LU°GN²FR²GQ²FM¯EN°FP³HN¹GN¸PW¹NU¾RY¾QYÄUTÌXUÅWXÈZYËVXËZXÉZVËYUÍ[ZÊ[YË\ZÐ]\Í[\Ì\`Ì^cË[\ÑZXÎ]aÏ^]Òa\Ð\\Ñ[^Ð`bÒ^bÔ`bÒ]]ÎZ]Ð\]Ï\^ÒljÒ_aÐ``ÒbbÒdmÏ]`Ô\`Ð]_ÒYYÑ_`Ñ[[Ó^\ÒhfÍ_eÒY\Ï\`ÌZ\ÎbnÈ_fÍz‚ÈpuÅuÂs{¿x…Å„‰ÊžÏ”‰Î šÔœ•Ρ¡Ðš’Ѩ£Ô¤”ͤ¢É œÀ€†Áu}¾[]ÁVYÂRTÅTTÂVVÁVZÆXZÈ\aÌZZÌZ_ÍZ]Í\^ÎY]Ê_bË]aÏbfÎbfÎ`hÎacÍ_dÍ_cÉY_Í[]Ë]aË\\ÑZZÐYZÌ__Î\^Í[ZÌZ]ÌY_ÇYYÅV\ÂT_ÀR[¼Q\½S[ºR^¼LV¹KT¸HT¹NY¸UaÆU_Ì`eÒgdÕklÔqnÖsmÕup×toÚulÙxrÚ{uÛzrÛxÝ€tÞ€o݃rßlÞ‚pÞ~jÞ~mÜnÝ}oÞ€lÞ€náiâgâ€lâ€ià|hà~ià|iß|hÞyhÞvfÚxfÚvhØwlØvdØvjØrc×tg×udÕpaÕvdÖsdÕqaØqbãpó¶ŠøÈ’ùÓœúצúÛªûܪýÛ©üÔœóĈזt°RW­W_®OX³SZ¹U]¼Y^¼_a¼VV˜8Jq B\:V?ZBZAY<Z@]@^BbDgGh"Fe#IcDfAcE`=]<]>]=\<^?\<æŠuäˆnå‰kæˆlä…nèŠmè’yê”sê–pé‰læ‚eå}eäw]âw^âu\ßxdâw]âjèŠnæ‰hæˆnå‡mä†pä†réŒrçŠpé„hæ}dàs_Üe\Í]Z¶HO¦<Mœ6Lœ2J 9K¨<L¬=O°@L°BQ²FR²FUµFQ±JX®FP³EM´HO´FM³CN²DL´FL¿FL¹PS¿PVÀNQÂLPÄQVÈUSÇYYÇVWÇYXÊYZÌ\[ÌZYÊ^[Ì[VÌ\YÏ^ZÌ]^Ì^^Ð\^Î^]ÐZZÒ\\Ñ^\Ó\\Ð^`ÑcdÒ\aÏ]^Ð^_Ï[_Ð^`Ó]_Ï^^Ë]_Ð]\ÐabÐb`Ò`bÒ\^Ò^\Ò^]Õ_[ÒX[ÒZZÐ]^Î\bÐirÍivÇr€Æq}Æ~‰Â}ŠÃ}‚¾|ˆÀ~ŠÈ‰ŒÊ––Í’ˆÏ”ϥΡ›Î£›ÎŸœÓ­Ñ¥˜Ö´¦Ù¨˜Ö°§Ú³¢Ó¤šÊ§¥È˜™Àv|¿`h¿WZ¾UX¾VY¿X^ÄV]Ê\_ÈZ`ÈZZÊ\^È]_Ê`dÉehÈdlÉagÌdiÌcdÌ`bÌ\^ÏaeÏZXÍ^^Ð]^Ï[^Î`_É[^Í\^Î^`ÌZ\ÇY^ÉUVÃX^ÁV\ÁNV¼OZ½NZ¼P[¶LX·IV·LX¸P]ÂX`Ì\cÐed×kjÖqjØsnÖqmÕtlÖxpØtl×{xÛzrÜ~pÜ~vÞ€pÞpÞqàqÜ{mÞ~nÞ~ná~kà}mÞjà€lâjägâ€gß{eá|hßzjÞ}fÝzhÝxdØwfÙtfÖvj×tdØvh×rb×paÙr`ÖscÖrbÖsdÕobÖubÚxeë¡wöÀŒøÍ˜úÔ¤úبûÚ¨üÜ©ýÚ§üÏ—ì´Êxa¯NU«KT¶UZ¹U\Â_\¼TVŸ@Nq@X :V<V>X:W>Z:`@`Ae!Ga@f$Id: lena.pnm,v 1.1.2.1 2006/12/02 01:20:05 dgp85 Exp $Jb"Gd"FbE^>[:Z:\;a<^;`<`=æˆqæˆræ…nä†näˆtçŽqì”sì”uë‘qè†häbãt]ßt\Þp^ßn\ßqcáv`ä‚jç‹oèŽmå‹næ‰lçˆqæˆqèŠlè‹nê„iæhÞscÜgXÎZV´FL£:G™2Hœ6F£9I¨:G«>N¯?L°BR·FP²ER±IQ²DN¯DM¯EN³GP´HP¶HO´EN¸FK¾JO¾MNÁNVÁOTÂQVÇTVÈY]ÈWVÊXZÊVRÌWTÌ\YÌZTÎ`_ÍZTÌ\XÍ\YÊ[ZÊ_\Ì\ZÎ[\Ð\[ÑegÐ`bÑ_`Ò__Ò^`Ô^bÔ^^ÔacÒ\\Ò]aÑY^Î]`Ð^]Ò_`Ôa\Óf_Ô]\Ô\\Ó^\Ô\YÑZXÏZ\Ì[fÌivÄw†Ç{„É‚ˆÇ}€Æƒ‡Çƒ†ÀƒÂŠÂ‹Ê“Ò —̘͌”͘‘Ïž˜Õ¦šÔ­¨Ñ¨›Í£žÒ¦›Ò©ŸÐ§–Ѧ”Ò£˜Ö¯¢Û¶§Ú´£Ø³£Ï¥žÄ”—¼pz¼]f»U[¼NWÄWYÇUXÈZ[ÆY[ÆX^È^bÌ`dÈdjËahÌ_eÐ^bÎ`cÌ\_Ï^^ÍZ^ÎZYÎ\_Í\ZÏ\[Ñ[^Ë\^ÎXYËVWÅT_ÆVZÅS[¾W`½SX¾MW¼OZ¼MWµLY°J\·KV¸P\ÄV\Í_fÒfaÓmlÔrlÙtjØsjÖsj×umÕuqÙvlÜzmÚzrÜ{nÞ€sÜnÞ€q߀nÜ~nà€l߀p߀màkÞ|hà~oâ|gâ~gá€iàzfß|hàzhÝvfÞubÜwgÚudØrd×qg×sb×pfÖpfÖpaØpb×p^ÖpfÖn^Ön_×pbÖveß„nð°‚øÆ‘úКøÖ§úÚªüÜ­üàªþئøÉŽâ¡v¾c[²PXºX\¿]]ºXXœ=Lq>[;S:U<Y=Z<Z>Z?^?bDcCcBfCdBdEbA`EZ>X9Z:^8a8]7_=^;æ‡tæ„læ‡kçˆræŠmëpì’pê’lêŠmçhäx`àr\ÝlWÜiZÜm]ÝkYàw^ä‚gèŠlèŒhæoæ‡på‰lèŠoéŽsé‰iê†hå|dár\ÚfWÏWR¸EM¦8Gš2E˜3F 6K§<L©8L²BO°DN³GT´FQ±FL±IR­DR²KO³DM¶CLµGQ´GP·JP¾HN¼HPÁMTÃPRÅRVÉTSËVVÍXWÍZVÆUVÊWTÍZXÐZXÒ]YÐ]ZÏ\VÐYVÌYUÎZZÎ\]Î^\Ð_^Ò_\Ð]]Ô`]ÒZZÑ`aÖ__Ô^^Õ_\Ò`_Ð^aÐ]^Ñ^^Ï\[Ð]^Ò[ZÓ^XÓ]YÑ\`ÒZ\Ð[]Ð[aÌfrÆt‚Ãv€Æ~‚Æ}€Ê†ŠÃz‚¾s|Â|‚ÄzÃ|ƒÊŠŽÒ–ŽÎ™Ì›‘Θ’Ηϣ ÔªÕ§›Ô§˜Ð¥œÑ¬žÍš‘ʔ̟–ЪŸÕ¨”Ó©šÖ³¢×­œÝº©Þ¸¥Ø³¢Ð¥˜Â„ŠºdoºPWÀRXÂSZÃY_ÆX^È[^É\aÈ`dÉ\aÌ[`Ï]`Ð^`Ê\eÊ]cË]`É[]Î^_ÌW[ÎYYËZ^Î^^ÇX`ÆT[ÄVaÉUWÂSX¾T`ÀRX¾OX¸P^·QZ´JX²IV»IV¼MZ¿X`ÎbcÔgeÖnjØoiØxuØsnÕtqÕroØupÚwoÚwrÚxqÛ|wÜzsÝ‚rÞ}qÞ~nÞqß{lÞ{gÞ|là~jß{lß{ià|hÞzhá|hÞxjÝzfÞsdÝweÝscÜreÚtdØpeÖn`ØqeÖo`Öod×ocØp^ÔrdÔsc×scØn\ÕpcÔqdÖudç–qô»†úË’ùÓ¢úاûܬûß®þà®þÖžóÂÖrºYZ¾YZ½XY£GQuA\<R=U >X>V>X=[>Z>`@dDfDbCb>d@`>`@\@\@[>^<`9b:\8Z9]8è„hæˆoç†iêŒsèrìoì’rêŽnè‡håeàsXÞlYÛ`RÚ^RÚeZÛl]äu^ä…gèŠjê‹iç‹mèŠnè‰pé‰lìkèŠpê†eæ€jãrZÜjZÎVP¶FL¤6F—.Cš6I£5J§:K¨<L°?J¬AP±CPµFN²AM±BP¯FL±FN¯FP³EM¹IQ³CL¶EPºJN¼MT¿RZÄVZÃRUÊTWÌVTÌSUÍWVÉZXÌYYÍYXÐZ]Ð[\Ð\YÎ^ZÑZWÎZXÎZ[Ò^ZÏbaÑa`Ð`]Ñ\\Ñ\\Ñ\bÒ]\Ö^`ÔYYÔ\\ÑXYÓ^ZÏUVÑ\\Ð^^Ð^]Ñ^\Ô\\ÐY]Î[cÊanÉ_pÀfxÆy‚È}†¾qÀz„ƆɄŠÀz„½u~¾tyÇ‚…Æ‚ƒÈ‚„ÌŒŒÐ“ŠÉŒ†Ì””ИŒÐœ™Ô¢•УѮ¦Ð¦ Î¤ŸÎ¡šÌ¢Ð ’У™Ô° Ò¦“Ó¬ŸÖ±Ú³¤Ù°¤Ý¼ªß· à¸¢Òª¢Á|‚ºU^¿PVÃUZÂUZÈRWÅ\_Æ\bÈ^hÌZbÍ^dÌ\bËbjÊ[^Ë\cÈZ\Í^bÌ[aÆY`ÊZ\ÊW[ÉY`ÉSYÅX]ÅTYÀR^¿PX½R]ºO[¹QXºPX³JX¯IY·P\»NZÂZ`Ï``ÒhfÙlgÚshÛshÚtk×rlØsnØsnÚvnÜvnÙxtÚzsÛ~xÜ~uÞ~sÞq߀nà|lÛylÞ{nß}mÛ{lÜzfÜvfÝvfÝzfÞvdàxcÞubÜtcÜrbÜp`ÜvhÚsd×ndÖnbÙod×lbØn`ÖocÔse×od×pd×o`ÖnbÔrcÓkbØv`íªzøÄŒúИùبûÛ«ûܯþà¯þÞ¬ýҘÉuf¼\Z§DOz"D\@T?T>XAXD\D`@`DbCcFdEd Eb@b>d@^>W?X>\FbHb=`:_?^9\9`<é„mæ…lçˆpêŒqë“pí–qì‘lêŠhè€eäxZÜmUØcSÖXOÔ[RÕ`YÛo`â{bæƒfê‹léjèŽkê‹nç‹lêuëpëŒpé‡læ‚iâubÚfXÍZTµBI 6J–-F–.D¡4E£5Hª;H°?K¬>M®@L¯AM±DO²@L²DL²FO±ER°BN´FP´IRµIQ¸FO¼IS½MTÂTZÅOQÈTVÊSRËUVÈWVËYVÎUTÎ\\ÌXZÒ\]ÐWWÎZXÑYUÏ[YÎZYÍ`^ÎYZÒ[ZÒZZÒ^^ÑZ\Ï_`Ò]\Ò`cÏ\^Ó\[ÑXZÐYXÐXZÎ]]Ï^`ÊbnÍeoÍpÌpyÅjw¾`k¶dvÃtxË€Äv~¾zƒÅx~Å|~Év{ÇzxÆzzÅ‚ŠÈ‚Æx~Ç‚‡É…Ȇ…ƖʇƒÎ’ŒÍ“Ì•‘Ó Ò¤ŸÔ¨ Ñ¢™Ò¦–Ï¢šÒ©™ÕžÒª¡Ô¯š×§–Ó« ÚµžÝ´¤Ø´¥Ûº®âÀªá»¦Þ¶¦Ìžœ¹jr¼PYÀPU¾T\ÀX`Æ]gÉ[bÆ`iÉ`hÇ^fÌ\bÈZ_ÉZbÌ\`Ï\^ÈY`ÈZ^ÌVZÊX^ÆXZÆUXÂS[ÂRW¾NV½QZ½R\¼NU»R]¸JS´O[³HV·Q^»NVÆY_Ð^_Ôji×poÚndÚshÜvmÙukÙriØtmÙuoÛwoÚyqÙwqØ}xÙ{vÜ|uÝ|mÞ|qÞ}iÛylÝvjÞzgÜxhÜweÜvcÜubÝxcÛwfÜt`ÜthÛraÜrbÚtcÜp`ÙpeÙqcØk`ÖndØm`ÚkXØpbÖo]ÙpbÚp`Øo^Øo^ÖpbÕl^Ôl`á‰fó¶„úÌ•ùÖ®üÚ®ûܬýß®ÿà­ÿÝ©üÌ“ážy²MR~#CX<W@S>U=W;Z?Z@_CdBgCe?cEcAb@cAaBZ@Z?`F`B`B]:\>Z9Z;_<`:æˆoçŠmèpìoí–zì“mê‹mèƒlæzbàrXÚdTÐVKÍSOÎRN×dZÝvhâ~lä†gèŒlìŒléŽiêŽlè‹léŠlêŒnéŠné„hè}`ãvdÜh[ÌVPµCL¡4F—*B”/Fœ2EŸAX¨>M®@O©>M­>L°AN°BK°FS±@I¯CM²DL±?L³@JµJS´JPºFO¹JR¼OT¿NSÄQRÃTWÆTTÇTTÌVTÉXXÉX\ÊVWÊZ\ËZXÅZ\ÌXUÊW\ÇXTÍ]XÌ]^Ì\YÑ^\Î^^ÐZ\Ñ\_Ñ]^Ò]^Ò[ZÑZZÒX]ÍU]ÌalÉZkÍkxÎ~ŒÍ~ˆË~„¾rz¾fm¸Xe¼Ydºix¼hrÄnsÀpxÄrvÄx|ÅrwÈqwÅvzÈ‚ƒÊ}€É„‚ÉxzȂʆ†ÈŒÇˆŠÊ”ŽÎ˜–Ì™‘Ï”‘Í–Ò¤—ҜϗŽÑ¨œÖ§”ϜҬœÙ¨–Ó«¢Ú¸§Ø®žÖ²¦Üº¦Ý¶Ü¶¢Ü¼®Þ¼­à¼¦à¹¥Öµ¨¾~ˆ·T_¹NV¿V_½[dÃV_ÄY`Å^eÉ\dÇV\ÇV[ÆXcÉ[`ÉY^ÇX]ÍZ\ÌX\ÇW\ÃTZÀRX¿R\ÅQWÁNW¾MXÀOUºLV¶MYµGT²FT²IW¸MZ½OVÅR\Î]`ÓfdÖmlØrkÜvnÚulØulØtjØsmÚsmÝtpÙvpØzw×wuÙxsÝ|pÞ}rÝ}nß|nÞzhÝugÝwfÛtbÝxhÜxfÚsbÜr_ÜveÜteÞudÛucÚqdÛp^ÙrfÚpcØpbØpeØn`ØpfÖl\Ùm`Øl^Öl`ÚoaØo^Ùo`ÕsbÔmaÔl\Ùq\ìŸsùÄúÏŸûתûÜ­ýÞ±ÿß®ÿà¬ÿÚ ñ¼ˆ©HP\>RAV@U?XAZAZ?X>bAeDf>d@bBd@eBdD_C^D[@^>aD`>]=\A]?\:_:b9æ‡rèŠmë’xì”ní•vípê†jæ€fãzfÞiZÐXPÆJJ¾EJÇOLØ`XÞrbã|dæ†iêmê‘pêŒkìŽoèŠmémëˆkíŒjê†jèƒgät[Þk\Ð[PµDM¡6H–3G“.G˜5M;N 7J¦=L¨CSªFP­GT¬AL¬AK±AJ®DM­AL²DK±EM´FQ³IS³HR¸KSÁJUÀMSÂLQÆTZÅSTÇUUÆRUÆXZÈZ\ÌUTÉW[ËTXÎWUÌ\_ÎYWÊZZÌVUÍ\[Î[^ÏZXÒ]`ÐZ[Ò]^Ð^\Î[^ÐUXÒX\ÑTZÕelß›ŠÐy€Ñ„Ê}|Ãv{¾ow¾biÀbiÂ\c¾\i¹_n¸dlÀlwÀlvÀnsºfm¿kp¾jrËzxÇqwÅutÈ„È|{Ê…ˆË€~È„…ˆ‚Í“”Ò…Ï’’ÍžšÑ›Ê”ŽÌ˜—Ò¢‘Ô¤—Ò«¡Õ£‘Óž×´¥Ü®—Ö¬™Ø¸¨Ü±–ܰŸÚ·¨ßº¢âº¡Ý´¦Û»°ßÀ«ä¾¦ÝºªÇµguµR_¸Xc¼PXÂS[ÂR\ÀWZÂX_ÉYZÂV^ÅX^ÆX^ÇV\ÆV]ÆX^ÆX]ÄV]ÂT[ÂPTÁPXÂPVÀQWºNX´L\·LW³KU²DR°EQ´KX¼OXÉRWÎ`hÖedØpiÚriÜtnÜvnÚul×tiÙrjÚqjÚwjÜwmÛvnØsmÚvrÜ}qÜ|rÞzkÞ|nß~rÞzlÜtdÚqcÛtdÜqeÚpbÜrbÝr`ÜueÝtcÚvgÜqbÜr`ÜpbÙqeÙpb×pgÙo`Øl`ØpaÚp_Ûn_Øo_×pfÙrcÛo`Ørd×o_Õm^Õk^à}dò±‚ûÅ‘úÒ üبýݰþá³ÿä³ÿÞ£ò¾€‘.CM 8P>R=W?V@\@Z=\>dBhCc@bBbBdE`=]?Z=\B_@g#I_@^BW<^B`?_6d=f:çŠrèrë“pí˜vì’qêŠjç‚håx`ßp]Õ^OÄKM°@J´@JÈUPÖdXÞugä~fæ†dêtìlëŽmêŽjêmêŠlìŒmê‹oë†iè~hãvaÝf[ÎXS¶FO 6J”,D0F™2J›8LŸ6H£:L§=L¬=N©@Nª@Nª?K­AN¬AM¬BI²FM²CL°CL³FO¶EL¸JQ¹LV¼NXÁPVÄQVÄTVÄPRÅTYÆWYÃZ^ÆXZÅTSÍXXÊVUÌZZÏXVÉYZÎZVÐXXÎ[\Ñ[YÐZYÐZ\ÐXZÐXXÎY\ÐXXÎw‚Ïb`Í`dÇaeÈmqÆllÀgj½cj»^jÀbh½\g¼[j¼\h´`r¿gnÁnzÂlp¾nvºfr¹erÂhpÅw¼htÅrxÆy|Ê~}ʈˆÈ„ňŠÌŽŠÐ–ÍŽ†ÍŽ‡Ð˜É“Í ›Ðœ—Ц Ö¬Ñ–Ò²¤×²£Õ¬œÖ± Û¯”Ö¥–ײ¢Ý¸žÚ®™×±¥Ü¼­Þ¹£Ý¸ªÞ¼°àÁ±âÁªÞº¥Õ°¨½‚ˆ¯Xf¶NZ»NXÀPXÀV]ÄT\ÃSZÄT[Å[`ÆV]ÇW[ÆUXÇU_ÆWYÂV[ÀUZ¿TZÃOW¾MS¾PZ¶KW³JY´LW²FS²GU²@J·KW¾KUÇT^Ö^`ÖhfÜlbÜqhÞwjßvnÛtmÚthÙslÚsjÜtnØwrÚvoÚwrÚunÛvnÛ{uÝxkàylÞ}oß}qÜwhÚuiÛrbÚn`ÚnbÝraÝsbÜsbÛvfÚtcÜqaÛsgÚp`ÚrbÚufÚpeÙpcØodÙpbØpbØo^Úp_ØsbÚqaØthØqfÙl_×oaÕpfØsfè–sø¼ŠýОüتþÞ°þâ¶ÿâ¬ýÔÑ|_Z8P>Q=T>W>Z?\@_?bBfBeDdAdCeA`<[<Y>Z>`@d!Db?]<\=X;\9`;b:hAf=éŽmê”ví›sî—rëvê„då€dãw^ÚiXÌQK¶BF¤3C²9EÈRNÕbVÝsbä€fç‡hìŽnêŽhêmèŽléjëŒníŒnìŠmêŠnè}hãv\ÝfZÏWO¶CIž4G+BŽ-C–2F›0E¢4G¤:I¦?N¦?L¦@O¨=J¬CN¬AK¯DJ®CM­BL¯FS±FT·FN´HSµHN¸MV»FK»OVÁRYÇQRÄTWÄS[ÃVYÉSRÈUVÇRUÊZ[ÊWZÇ[]ÈZ\ËXWÊW[Í^_ËZ\ÑZZÎWYÎ[^ÏWUÐXYÎV\Ög_ìšrÊNMÀV[¾_d¿\d¼_c»`e¼X]»]g¼\e¹\f¾`n½^cºbl¾ms¼nvÁlp»ip¸doºhpÄptÀsxÂprÄpwÆtwÆ|~Â~…Át|Ë…†Ë„~Î’‹É…‚ĉŠÌ–’͌ӧ—Ó›ŒÐš–Õ¦˜Ù®ŸÙ± Ö²¥Ú°œÖ¥–Ô¯£Ú²˜Û²ŸÛµ¦Ú¶Þ°–Ú­¢Ú¸®à»¦àº¨Ü¸¬Ú»±Ü¾«â¾ªÛ·¢Ã–™´cm²LX³NX¸NW½NX¾T\ÁYd¼T]ÅU\ÄSXÃTYÅS[ÇSXÂTZ¾S]ÀTY¾Q\½NVÀOV»MX´P_·GT´LX±FS®FT¶FR¼LZÁS\Ï_aÖffÚnhÜtiàvjÞyoÜujÝtgÛsjÛsjÜqiØtnØrjØtnØsnÚupÙtnÚunÞ|kÞ|qÞyiÜxoÚpdÚpbØnaÚndÞo`Ûo^ÜpbÞqaÜvfÞp`ÜoaÙqdÚscÛrdÚpeÚo`Øk`ÛpbÙn^Øl_ØpaÚueÙrfÚse×tjÚrdÖpcÒqe×n`Üykï¤|ýÊ•þצÿß²ÿà­þÕ•ê¥p‚)AL :Q>Q 9U=X>YB^@dCgGfCcBdBhBb@[:Y<X<]@a@c@`BY=Y:\:]:c<g>c9\8ì’pë™xîœrî”mì‹kéƒiåxbßq`Õ`W¿JI¢:J–+B¯>FÇQOÖe\àtaæ~eèŒkéŽnìkêŽlëjçqè‰jì‰kêŠnê…hçzbãrYÞf[ÐVN¸FNœ2E-FŽ)C“.G™4HŸ8M¢>N¥;L«:Gª>L¨@K¯>K¬@M­CO«BQ¬EP®DO®EN²HT´BN¸GM·FR¸DNºNTÀNVÁPSÃQUÀRXÂVVÃQTÅPSÅTXÊ`cÊZ[ÈXXÌ\[ÉZ[ËZ^ÎZYÏVVÑZXÌZZÌZ[ÌXXÌQWÏXZåœ}Îo\ :T¶NY¼\cÀZbº`i¼X[À^e¸X`º`jº[eº\e¸Wb¾biÂjpÁflÃko¼eg¹fp¼jr¾lvÀnuÁkoÆs{Ànw¾x€ÄrtÅz‚Ɔ„ŃƒÄƒˆÈŒˆË‰‰Ò¥˜Ôž”Л•ΣšÔ¤™Ô¦šÖ°£Ø®šÕ£–Ø´¢Ú´£Ù¨”ر¤Û¸¤Þ®—תžØµ¤à¹ Ý¶£Üº­Þ¿°Þ¼¦à»­Ú»¬Ú¼®Úº¨Ýº¡Ô®¡¸t{­N[¯FT´IV´OZ¸U`ºPW¿NS¾RY¼RXÃR[ÁTZÂRXÀT\¾PYÁLV¼OY¸JU¹LY´LZ³KX¸HQ®FR°AQ³FVºNZÅW^Ñ_aÙjfÝmgÞtlâwlàxnàylÞxlÞtfØuqÙrmÖspØtlØqlØrm×pmÖpi×upÙunÞxpÞ{kÞ{nÚrbØphÚlbØn`ÝnaÜneÜp`ÞsdÞtbÛteÚpdØoaÚrfÚodØqaØpeÚo`ÚpbØohÙoaÚqeÙsgÙteÙp`ÚtdÙugÖrf×ufÖteØshâƒmù¾ˆÿÒžÿÛ¥ÿÒ–ï«xŸBHT=N>Q>T@T@V<\Bb?gCf Fd@f EgAhC`BX>Y=]?cBbA^@X:Z?[>`;d>k?d>T2I2ì“rî–wî•wìré…jæ}fâtdÚdVÊTR«<F—*B–*B¨;IÅRMÔbZÝo\ä|`èŒlêŒkì’jëqêŠlêjèŠmìŠlêŠlêƒiè|bâmYÝcUÏUQ¶CK4D)BŽ)B˜2F›6H :K =L¤@N¬<L©BPªCR«@N¬AO©DO«BP«DP¯GT°CL®DP´FN´HPµKT½LS»NU¾PUÁPSÄNP¿OUÅRTÄRRÄVZÄTVÆWYÇV[ÄVZËUWËZYÎZ^ÌVYÉ]_ÐZXÎZZÐZXÐXXÏ\\ÐXVâuXÂRT²IZ½PX¿V_ÄVYÃ^c»V`½Xb¾_iÁei½[h·Yd¸\dºdp¼dnÃilÀkr¸fq·fq½go¾jsÂouÆrvÁjnÄknÅpuÅxvÃx}ÄvyÆ€|ˆ‡Ì‰†Ð’†Ð“‹Ò¤ Ó¢Ó ›Õ¤˜Ö¤Ô¦šÔ¬ Ø¬˜ØªØ¯ Ø°šÞ°ŸÙª¢Ü¹§à´ŸÜ¯œÜ´§ß¸¡áº¢Þ¸¬Û¼²ß²ݼ¬Ú¶¦Ú½³Ü¾µâÁ¬Û¾¨Á–š¬Wc±DT´MX±JU¶GP¹JS·LW½QX¾OWÀQZ¾PXÀOXÂOU¼N[ºLU¶OZ´O]²HX¶JW´JV±HR­ET´GU¹NZÄP[Ò`eØjdßpiÞvnâzqâ{mÞzmßxqÞwlÚtjÙtnØrjÖro×mhÔtmÖnkÔokÔmfÚnhÛrlÝ{mÞzpÝsdÛtjÛpeÙndÜo`Þr`ÞraàsdÞugÜteÛpdÛr`ÛpfÜoaÜqaÛpd×nbØqhÚpgÙpdÚrjÛqhÝtfÚnbÛseÜtdÚukÚugÝxlÛxnßxjó¢~ÿÈ’ÿÌŒï§q£HNY?Q;T>V@U>V<[=\=dAa>d@cBfBgA`AZ@\A]@`BdA_AV=Z@[>\=bBi"Cg!DW9F3D3î•~î”pî“oìŒkç„hçx^ßlYÒZT¸FNš.>*C–0F¬?LÈRNÒc\Þp\ãyaç‰gìŽlì’jêrê‰nêŒiìqìŒné‹mæƒkæ{`álXÜfXÍVQ³EK›/D(CŽ(B“2J›8L¢4G¢;R¢@RªFW«HVªGY­HX©BP­GR¬COªAQ­EP¬DR¬IU³DR´KV·JT¸IT¼NRÁRXÄTY½JPÁMQÃSVÁSXÂUXÂUZÅVZÇUYÆUVÊUZËX[ÊXXÌ\[Ì[\ÐYXÎWZÑZ\ÒXYÏZVÐpg»NR»MU½RZ½KVÂV]ÂW_¿V]½V_½Za¾Z^¿_h¾\e¼\g¼\eÀdmÅlpÃnqÀks¾en¾ku¾jsÄovÆorÄirÃacºktÀt{¼krºjtÈ{Êyż…Њ…Í’ŠÓ—‰ÔšÓ¤—Ø¢ŽÑšÒ­ Ø¥‘Ï –Ó©£Û¯˜Ö£”Ô¦žØ¸«Þ¶£Ü±ŸÛ¸©ßº¥ß±žÛµªÞ¾¯á¾¥àº§Ý»¬ÛÀ²Ý¾¦Þ»ªÜ¼®ßúâóáÁ©Ë¡šªYg¦CUªBP¬HS¯HQ²JS¶JR¼MT¼PWÀRX»NX»PY¹NX¸NZ¶MV³K^´IVµJU³LX¯IY®AN²FTºJXÅR\ÑZ]Ùihßnfßwnâzräznãzjâynß|qÜyrÜwnÚrlØmiÔjlÕlkÔjhÓklÒgd×lhØoiÛwkÝylÝvhÝthÜpbÜpbÞp_ÞreÞofÜsfÞt`ÝtiÜpaÛo`ÙpiÚqbØpfÚpdØoeÛsiÜrcÛrgÜteÚpeßsdÝtgÛtjÜtjÜwiÚujÞznß}pà{nî’uù´}æŽi›<FX<W?WBW>V=X<Z>]>`<d<`@d@cCd?a@Y@ZBcFeDg@bB^EX<]A\@d>n(Jp,ObAI
+3?0R9î”zï”tïlì…eèfâr\×cWÄTS¨;H”.D(Aš3E°BJÃQSÕbXßp[ä|hé‡jênì’pë’têŽtêŽrê‹në‰lê†gé„jç{bàoZÙg\ÎTP²BI™-BŒ,BŒ(@”1E6JŸ:K¢>P¨?O©DV©DU§AR¨L^§DT®CNª>M­BN°@M¬EQ¬?N°EO²GR´KU·LXºKR¼PX¾NXÂTZ¾TZÀRUÃQYÂTWÆRTÈXYÆUXÄTTÍUVÊWWÌTXÌZZÐXXÑYXÍUVÎV[ÏSW×ppÕdWºMU»Q[ÂSVÀVX¾RW¼PW¶MVºSaÁU\¸Vb¿^dÁ\gÃbiÁ^bÀ`iÃjmÀ^a¾gn¿mpÁjrÆljÌmoÈnmÀeo¼enÁrzÈnrÉ|yÇrpÈ{€È~|Àw͆Ά‚ΖӘŠÐ‘ˆÔ£•Õ™ŽÓœÖ­¡Ø°Úª–Õ¨ ×´¦ß¸ŸÜ±¢Ú¶¨Ý¹¦Üµ¢Ûµ©ß»ªà¸¢à¹©Ü»®Ý¿®ß»¥Úº©Ù¹®ÚÀ´ÝÀ²àÁ³à·áõãÄ­Î¥š«bož>MŸBN¤>J¬BN®DNµIR³LW¸R[¹KR¹PW¸LX´JX°HW¶JV³L^¹PVµIU²ET±BR¯FV¸IXÁS\Ð^e×jjÞqnàvhã}nã|kã|mä~pàylßzoÞtlÚsjÙmfÔhdÑfdÍfiÎcfÐb_ÒbaÔieØpiÜxpÞymÞviÞrbÞobßp`ÞpeÝqeÞtgÞuiÞsdÞq_ÜpeÞpdÚp`ÛoeÝobÙrfÝqfÛshÛrcÛsgÜrdÞvfÞqbÜvhßxjÞyiÝxjÞwjãnålç„lÖo\’2C_AU?ZBVAV=Z@[?a?`=b@f>e>f@f;b>[<U;^Bg$JdC`>ZEV@Z>bEbCq)Ls.QfBT;D6K;E]ï•xî’rì‰nèƒlæxdÜl^ÏVS·DH1B“-D–*Bœ4F®BOÄVXÕdYÝn\æ€hè‰hìlî‘nèqìsêsê‹mî‹oêŠiè„iæxaâr\ÛbWÊVU°CK›,BŽ*D*@—2Gž6H¡:M¤@P¦=M¨BTªAO«DR¬GS¬CQ®GSªBR°DQ¬BQ«BO®DP±ER·HR´HR¶MV¸KT¼QY¾PWÂPVÂPZÁNTÀPUÂSWÆVUÌVZÈTVÇUVÊVUÈX[ËX\ÈZZÏXYÐYVÎWWÍW[Õf`é§~ÇPQ½MX¼SWÂPS¼OU¹MV¶KW»PW¼S]¾V]¼Y[¾ahÄfk¾[a¼`gÆhe¿_d¼]bÂgjÇlpÄjlÄopÆnp»jwÃkoÆuxÁmsÉppÄlsÂwzÃqxÀ{…˃„Åz|ȇʇɖ‘ÏŠÎŽŒÐŸš×¬™Ø ”Ö¨¡×°ŸÜ¨—Ú­¢Û³¤Ý¹¡Ý´£Û²§Üºªß¶¢Ýµ¨Þ¼±âÁ¯à·¢Þ¼¨Ü¼®Ûº¤Þ»©ß¼«Ü¾²Ü¾±Þ¶¢Ú´¨Ý¾µßƺҲ´¬~Œ©t¢Sbž<K¤@O«DQ´GR´O[·KV¸OX·JS¶JU²JX·HS³ETµJZ´DU¬DU¬BT¬CX¸KWÁN^Ñ^d×npßwsáznä|lå|lå‚sç€rã~oÞxpàwjÜxn×ohÕebÏbdÉ\`Å]dÈZ[Î\`Òbg×njÝrkÞzkÞvlßseÞpeÞn`ßrcâucâudàxjßwbÞl_ÜnbÜpbÜn^ÚodÜqfÛpfÜseÜtdÚshÜreÜtjÝzoÝtdÞvhÞxhàweàxjâzlã€pè€hØkb™7J`@X>V=V<Y=V:V9Y?aAg Ch!Cf?d<`=_:Z=X:^>dAgBdF\GZAYD]BfDh$Gr+Ng"EV:J5O;„:SËurîvìŒpê‡læ~fäq\Ù`TÃLN¦5G˜9M•2J“,D˜4H®CNÇUSÔdXÝq]æ{bèŠhëiìoìjì‘pënéŠrë‰kë‡iê„fæ|cát`ÚbUÉVR±@I›,DŽ*D*D˜1Fœ4F 3I¦@R§?L«GY¨@M¬>P¬@R¬@MªDR«CR­CO­DO®ER¯DO°FNµHPµJQ´LT³OTºMT¿NSÄSRÄT\ÀQTÁQVÃRVÆZ[ÄUVÆSVÄXZÈSXÊX\ÍVXËSVËWXÒ\\ÌYXÐTZßwpÜmUÁNUÂQP¾NSºIP¸JS·IP¸P]¿QV¾X`¾Y]¾T\Â]a¾X_¾ciÉbeÂ\aÀ_cÂflÂlnÂei¼hrÆchÌnnÍquÅpqÈprÃjl¿lxÃsz¾ekÇxzÌzÈ||Í~zÇ€|Í‘ˆË‚~Ëš’Ô¤”Ô™’Õ¦Ù­–تŸÚ©ŸÙ³ªÜ²œÚ¨Ú±¢Û¸¤á´œÛ°žÜ·¬ß¸§àºªÞ¸«Ý»°à¼§Þ¸¢Ö¶§Ø»¬Ú¸¥Ó¬¢Ñ°¯Ò½¼ØÅÇàÎÈæÔÍëÙÐæÛÐà×Ëξ¸«`hœ5H¦;M¬DQ±MZ¸IR±KW³LW³HP®DT±HTµGU±IV°FX«FXªBY³EV¸K\ÀS^ÒbdÖnmßsmá{på~nä€månæ€nâ~sá{nà|sÝxlØodÓd`Ì\_ÂV^¿PZÀTXÅT\Î`_ÖedÚpiÝulàxkßufßpdàrdßsgàrbâxgßvjßtdÞn^ÞneÝpcÝp`ÛpdÜpcÝrlÞrgÝrdÝqfÞreÞukÜqeÜrhÝuiàwiáxjâ{häoæoÞwh¸KPk@XAU@V?W=W<V:\>bDc?fBc@h@f<d:\>Y=^AeCe DfG]BXC\A\@`Dh!Fr2Sh$HS;J<N<y0Nºioà‰{êŒvê‡mèiäwdÜj\ÌVR´?G™.D˜3G›6I™1E/F¬AMÈTSÕbZßqbæ|cé…hëŽnìsì’pîpí‘uìnìŠhê†jè…dæ~dár]ÚfYÊTP°ENœ2F(D/F–2FŸ:L 5J¥?M§FT«H[¬BQ«@O«EQ«FQªBQ¬DT¬AP®FT°CO°DO°GR´GU·IPºNY»PX½TYÀU]ÁTWÂNTÀPRÂPV¿UXÀQXÊXYÈTUÆV]ÈV\ÉYZÎVZÊVXÌVXÒXXÒVXÒ\Väx^ÅUWÀSS¼LR¸JT·FO·LX¸KS»U_ÁW]¼X]½SX¼U\»PXÃbfÂZ\Âdl¾XbÀbgÅhm¾dlÃdh¿afÂgmËlnÁilÄnsÆln¾jrÉrpÂrxÂsxÇz|ÌzzÍywÍ€Ì|zË‹ˆÎ–Ò”ŽÒ—“׬–Ý©šØ¦›Ø´©Ý¶ Ü¬›×§œÛ·¢Ü±Û¨šÛ·§Üºªá·¦Ú®¤Ü¿³à¼¦Þ³¡Ú·©Ú·¨Ö­žÒ­©Ò¸¼ÚľäÎÃèÎÂêÐÃìÓÅìÓÇïÖÆòÝÏòàÑòàÉÒ´­ŸDV›4LŸ8J¨ER®FT®LV°HP®EP®CQ±CO°FU±GV¬FX¨FX©>T²GX³K\¼O`Ð_cØlhÞukâwlå{næ€pä~lç€pä|nãzkæ{lß|rÛriÕgbÌX^ÃPX´DN³JV¹IVÄTZÐ]^ØkjÛoiàwnàxnÞtißr`ÝrhàrcârdàteàsdÝpfÝn`ÝrhÞpcÜreÜpdÜodÞqfÝqeÝpdàobßtgßrbßvlàwkàwlãznâ~qæ€nä|gÂVVz'HXCU@X>X@V 8[AX<`@`>d@h@c=c<`8[:Z<]<d?jEb?_HVDX@^D^Bf$Kq.Py7Yf FO9Q@m'N¡YiÓ~täxë…fè†næycßn`Ô^W¿MO :K™.A˜2G˜3Hœ5Gž3H¬FRÉWTÖd^àtdå|fé‡gë‹iìlê‘nìŽnìpëŽmìŒjèŠmæ„læ|bàq]ÚaVÍRL°@K›5F*DŠ&B”2H4J¤8M¤<L§?Sª>Nª?P¬@O«?N­FR¬ER«@N®ER¯FR®DQ¯CO³ET³JPµLV¸MVºLSºNU¾QXÁRW¼PVÄPUÄV[ÄX\È[\ÊX_ÊVWÇ\bÊVZÆ\ZÈX^ÈTTËTWÐZ\ÐZ]ÔgbÐ[VÄQR¼KR¼HPºMXµKT¹NW½O[ÀT]À^^ºRX¼RX¼X_Æ]cÃ^dÃ\c½\d¿]bÂ^b¿em¼dl¼cjÀ[bÀjlÅdoÂlrÆhlÊqoÅjiÁisÃhlÂrv΂ÈllË|xÏ€}ËŠŠÎŒ‡É~‚ӟء֠”Ú°¢Þ²šÜ©žØ¦œÛ¶¨Û«šØ§œÙ²¨Ý¸¤Þ°žÝ´¤Û¸¬à¹¦Þ² Ù¯žÙ±£Øª›Ó°²Ô¿½àËÂåÏÃæÌÄè̾èÔÆæÎÅèÎÀçÑÆêÓÄîÖÆïÞÒôáÎïÚ¾»|’1Hš4K¡>M©CR¬GT®DQ®HV®CT¬FT¬FW­BS¬DT¥DV¤@V­DXµN\¾Q^Ï`cØgbÞsláwiå}nä~lä}kæ€kä}nå|nâ}qà|oÞrfÚg`ÌZY¼JP¬?J¤;M§;M³IXÅU^Óca×kmÞtpâxlàujàsdÝoaàpbào`àtcârdßpbÝpdÞncßqdÞodÞqhÞuiÝqgÜtiÜreÝrdÞrfàsbásháwháthâzlå€næzkÐ_Z.JZ>VET=X?X<\>^>`>c?f Da>e@f>b@]AX=Z=cBjBhC\AVAXBX=^B`Go-R|:[t5XdHW=o)K XhÅxzÛƒuä’wéˆvèhâtcÛdVÊQP­@K™.Fš0Hš4H™4Gš0Hž5H®CNÄTUÖd\àq`å|bê…fìŒkì’kênìqì‘rêjêtè‡pèƒkæz`Þm[ØaZÌOP³>Kœ0F+C+Fš2F 6I¤;L¨>Rª>L¬<O®GT­DT«DP­DP©CP«BP¬BN¬AO®DT®HQ²HS²ITµJRºNT¸OZÀOTÀNV¾RTÀRVÀPTÃRVÅSUÄXYÉVXÆTXÄZ`ÍWZÈWXÈTYÎYXÌXWÐ[\Ò\]Ñ]VÈQT»IN»LQ»NTºLU½NU¼T_¼U`ÀZbÀWa½RY¼[cÂ]`ÀX^¿[fÀ[^½Z`¾Z`¾_d¸T]Àcf¿`dÀhnÂckÆru¾bkÂlqÃbdÄnrÉhkÄlpÊrqÇprÉopÇimÐ~zÉzw͈„Ô•ˆÔ…Ó˜ŽÚ«—Ü® Ö¢œØ±žß³œÚª ×¬¢Þ¹«Þ­šÖ§žÚµ¨Ýº¢Û®™Ö§›Ò ˜Ò¨¡Ò´´ØÀ¼äÌÂçÏÂèÏĿɏåÌÀæÌ¿æÌºçÐÂçʼèͼéÒÃêÐÃîÙÉïÞÐðÞÈØÄ¸¤^l0Iš7J¢>N¨FS«BQ¬ES°ES­DP¦BS¨FVªDS¨BV£=U§BV´KZ»P\Î`dØjhÝrlâuhäzjåmä{iæ~lå|mâznä|oàzrÜskØljÏ_`¾NW¢8J•1H–2J¡6MºJXÊ[_ÒhiÜnjÝtkàvpßseÝrfàobâreàufár`ÝpbÛoaÝofÜrjÝpbÝndÞpfßqhÞvjÜrgÜshßshßsiâulâviãviåtæ€pÛmc¢<O`@T>T?U 8Z8Z8[;b>c>d@d@d@f=b>`@W<Y<b<i>i?`BU?ZBYB\AcFi(Or-Ts0Tn+P`?p&J›SdÀprÒ~wÙ‡vß‹uè|dãybßm^ÑZQ·EO 5J”.Dœ4K4I5H6J¢8H±DOÆTTÒbVßq_å|dè„fënì‘lì’lì‘pìŽnênëŒoéˆmédåz_àn[Ø`TÈMQ®MLž5J+E,F˜2HŸ6H¥8J¦?N§@N¬?QªBP¯@N¬BO¬CT¬@P®L[®HW«DR®FS®AN³HU²BO¶JRºNV¸OWÂQT¾TX¿QTÃQXÁRZÃVYÄSVÆX[ÊTXÊWVÆW]ÌVVÊZ[ÊVYÌ]XÌYYÏ[\ÖaXÉPRºHP¸MT¹IQ½KRºLT¼OTÂX`¿V`¾V`¹T`À]bÂ\cÀVZ¿Zb¿Zc¾]b·T^¿^]ºXb¼al¾`gÄfgÁgnÊjmÈlnÌroÈipÊvvÄcfÂprÇklÃqvÇrsÄjnËz|ÎvwÍ€Ò‹†Ï†…Õ™×›Ž×œŽÜ¯£ß®˜Ü©šÚ¬¡Þµ¦àº¥Ý°¤Ù²¦Ú´œÜ¦Ô¤–ΜšÏ¤¨Ñ²¶ÞÄ¿åÌÀæÎÂèʹæÌÀå˽äɾäÉ¿äÍÂäɼæÌºæÍÀäÅ»æÊµéÐÃéÓÈîÛÐïÛÍðßÄд­—BT”2F˜7LŸ<M¨>P¦>MªDR­@O¬@Q©DV©@Q¨AU¨?R­>T³KZ¿PYÍ]d×efÝnjàxläpä€mâ|kä{mãznã~pä|páxpÝqjÙleÍ_\¼NW™:J„%C€"AŒ(D¨@RÃTZÎceÕhiÛrlÞvjàujàtkàn`ârfàveàtiÞrgÛpdßqdÞrhÞn`ÝndÝrfàthàobÜpdÞqfÞsjàtjäxiâvmåylénßvi¹LPmCU=U9T :V 7[:]@b?d@f@fBc>f@c@`<X>W=_@d>iBbAZ=W@\?^B`Bg"Mq1Wv6[l)Pg%Jz1R˜Nc»mrÎxÏ}tÖ~uÕ†yèvaâr^ØaRÃLL¤7G›.Bœ6L¢5H¡:Lž9Jž1D¢;N®BLÅTVÔdXßo[ä{hçˆnëŽhîpìlìrëŽrëŒnêŽnêŠmè‚däzbàn[×bUÉTR°FMŸ6J‘-E2L˜3G:L¤8L¤:L¦<N©@Q®@M®CP®BN«BP¯FQ®JX°DQ°BQªAR®DQ²DN²HT¶JRµKR¼LS¾NUÀQTÁPQÆPOÅPUÇRTÃRVÄVZËVWÊUUÊVVËVWÊZ^ÌYXÌ\[ÌZ\Ð]YÇW\ºLT¸FP¶HR¸HNºPZ½RV¾T^¿X`¸MX½XaÂ`hÂY_¼X_¾[bÀZaºU\ºZa¼U]¸V^½^fÄbfÅghÄkoÄbbÆntÎttÆhnÌolËlnÇooÈilÄrvÉzyÇosÌwuÉkqÏ€|Ö†€Ò€€ÔŒƒÔ‘‹Ö˜‘Û£–ݪ™Øš”Ù®›Ýª“ܨšÚ¯ Þ·¢Ú­žÏŽ‡Ï–”Π¥Õ¸¹áÉÄäÌÃêÓÈèË»æÈ»åÃ®äÆµäɺå˾å̽ãǺäÈ»äÆ¹è̺æÏÂâÆ¹èʶéÓÇëÔÆîÛÎìÚÌêØ¿º”—”:N”-Fœ2F¡;L¢>O¥CU£@T§AS¦F[¢@R£?T¦:O©@T´IXÁPXÏ[_ÖfcÝokàznä~pä~mázmä|kä|lâ}oâ|qÞzsßqkÚlfÑbdºQWž7M~=q@x=ž4I·LWÊX\ÒfhØlmÞumàvmÞvgßriàocÞofáreßsiÜndÜmdßtlÝoeÞphÜpgàrhàpfÞnbÞodàpfäneæpfäxoê~lè|mÈc`†%@[:R 9U8X 6Y :Z:^=eBgCgChCdBgCaBY@T ;\Bd?hBa>\@Y@\B_Cf1Vh2Zm'Rr6Zp1Xn*Rx2T™Lc·lsÉ|xÐwÌzuÍzsÖ}râq^ÜfVÊTP°>Jœ2Iš2Eœ7K 8I¢8G¡4C2Dž7K®EOÃTVÖe[Ýscä{bæ†péŒnìŽlëoìŽqëŒlêqêqèŠkèeãy_Ýo\ÖbXÈRO°CMž5I‘,F’(C–2Gž3G¥:O¤;N§@P«;Q¬>M¬?R°FT«FR¯FS°HU­FU¯EO­CT°DO®ES¶KS¹KT¸LT¸KS»OVÁPVÀSUÄRUÁT[ÄQWÅVXÅW\ÈVVÌVXÊXXÍXXÈYYÈXXÍ\^Ò`\ÉRR»OXºJQ»JO¶LXºHP¼PUºS]ºNV½PXºU^ÆZ\ÁY_¹UZºQXÂ`g¼TX¼V`¶U_ºU`ÀW\ÃcgÅ^bÆgjÂ^gÂlpÊkmÊorËjiÇdjÌqsÊfjËnpÐtxÆmrÏrrËtvÎzÑwtÎ}wÒ†Ñ~|ד†Ø”†Õ“Ø£˜Þ©–Ù¡˜Ö¡”Ú³£Û¦ŽÒ•ŠÏ”’Ê•—Õ´´áÀ·æÍÄéÍÂèÌÁêʸåÈ¼ãÆºà¼áÅ´åʼãžãÈ»âÈ¿âÄ·âɼåǺèɹæÎ¼âƺå˹åÍÂêÐÃìßÒíÚÈäÓ»¸ˆ‹2G“0H›0FŸ9N <P£BS¢?T¢AT£@S BV¤>RªAS·GTÂPZÎZ\ØgdÜqnáznä~qå|lå~mç~kä{là|pâxnÜytßvkÚljÌaf¾V[ @R‚"Dh=p>Š$@¦;LÀQXÊ\aÖhgÜpiàuoàzoátjßpfàpfápdÞthÛqgÜogßpbÞqfÞpgÞqiÞrdàshÝn_àreàqfäqfæuiç{qé}jÜl_5FbAT8V?Y>Z;Z<`@dBjBgDdBfAf@`@\AU@Y>h Dj"Bd?\=X?YB^Aa#Je&Oh)Sl-Vt8az/Z~3Y–Pi³iuÈzxÍ~ЂzÈwsÇrqËulàfRÔ^N¼FJ¡7G˜2Hœ6HŸ8J¢6G¡:GŸ7E 6HŸ4E­EMÃQRÔ`ZÜq_äybèfêŠrëŽoënìŽnêpêŒlìŠnèŒlèƒbäycànZ×`UÊPL´DNž2G‘(B,E˜4Jœ;N¢<P¤?P¨ATª>Pª@S¬DV®GU©CN®HV±FPªDN­ER«CP­CR²HQ´IRºJP·JR¹LT»PUÀPSÀSUÂTXÃPTÄTXÇWVÈZ`ÈZZÉXZÈQTÌZ\ÈWZÉXYÐ^YÉXU¼JQ¶NX¸GN¼KQ¸OXµKWºQ[¾RY¶MZ¾[`Å\aÀRW»X_¸T_¼Z[¼W\²QZµQ^½Z`À^dÁ[\ÄdjÄc`¼X_ÇfjÆkoÇvsÆgfÂnpÍtrËlmÏrnÏqqÌuxÎvtÏrsÒ|xÏzzÌ|}ÌyvÏ|ÔŒ‰Ó†ˆÑ‰Ù¤–ܪ™ÚÙ¥–Û¬š×¡•Ì„~ƔϦ¯Ý¹±èȺæÈ¼ê˶åËÀãĵßļâ¼âȾäʸÞÀ¶æÈ´äÊÁâÈ¿ãǽáǺãµäȸàżãǹåÎÆãÐÇäÒÌèÔËíÝÑîßÔíØÂÜÆ¹¡[kŽ/E“1I—6Fš8Mœ7M>NCU¡?R¢@R <P¦?R®FU¼Q]Î]_×ddÛnláxoå~tå€tä€sæoå|jã{mãznà|sßxqÜolÓefÂV_¦AP„%Bd8^9y:˜1G³FSÃR[Ñ__ÚkhÞumÞuláwkàrcßpdàrfÞpeÜqfÛmcÞpgÞnhÞrjÜnfÞqnàrgÞpfÞuhàvpãwnçxnç~rÞscµBNk@VAb8\`0S^IZ<\=eCfAgAe@gBfBcB^@X@X>b@jBfB]AY>[@\?`=d$Lh%Nm+Vt4_u0\‚:`–Kh­fwÅvxÍ|zÐxÓƒwÂqn¾nl΂zØZNÈQN¬=H›1I›6I¡8L ?Nž8J¢:M5G9JŸ6G«>LÇTSÕ^XÞr`ã{gæ„hèŠrêŒpëoì’têŒnêŠlìŒnêŠgè…dåz`àlZÖ^SÈRR°@Hž6IŽ,G+F•8L›8L£<N¨?Q¨AQªAP©;N¬?L«DO¨@P­AM¯DQ«@P­@N¬BN®FS³HS´HN¶FQ¸IQ¸QY½PT¿PVÀSVÃRUÄRUÇTTÈUWÇUVÊTWÇ[\ÆVXÈWXÉYYÌZ^Î[S»MR¸JQºJO¹KR½MWÀRZ¼QZ¼PZ¼NX¾W_ÃW\¾V^½W`½V\¾U\·V^µN\¬M\µTb¿]eÆbcÂehÂaf¾ekÂhmÄhmËwtÌmnÌsrÌvrÆptÐwsËnpÎxxÔvuÏwvÐtkÈruÊsrÎqsÎvvÏ{vÓÖ’†ÛžŒÙ™‹Ú¡”Ü©—Õ–ŒÏŠƒÈƒ„Çž¦Ü¾¸âĸãĹæÀ±çŴ俵âÂ´ÞÆÁäÌÃæÐÄäȺâĺâIJ῰åïáĹâ¶àÁ·ßĻ߯ÂâÏÇäÎÄéÒÂèÐÊéÓÄêÔÈìÜÒïàÔìÜÒìÛÀͳ­—ASŠ-CŽ1G“0F˜6J–9L™8Mš;Pž;P¡<P¥@S®GXÃQ]Î]_×ihÞqlázmç~mæ€qä|lå}oå|lã}mã|qà|vàskÚppÑglÂZc¨BT‚$Ab9W;fB†,H¢DUµKVÈX`ÕgbÝojàtláwpàthÞpgàsjÞpgÝndÞqfÞocßncàrjÝohàrcàrjàqeâpdâulæukê}pærÍZV€ @Y@V@V;Z>Y;\>a<c>h>hAd>gBfA_>Z>X?a?n Bm$E]=Z@]D^D_@^Ab"Gh*Xl.Yv7b:_”Jg«at¼t|ÊywË~{ÐxÎ|xÀon΀wÔ‹ÌRP³DHŸ2F›2Fš7H¤>N¢>L£:I¢8H:Hž2Dž4H®AMÃQQÔc^Ýq\âxbæ€fåŠfëŒnì’sêqêŽsêuínêˆlçƒjä|dÜn`Õ`TÇOMµ@HŸ3GŽ+FŒ1J4O˜<P¢BR©AR§BP¯FS«AUªBN®BN¬AS¯DM®DSªCR±FR­CO®BQ³JT²HN·GN¸JP»MP¾QYÂPT¾RYÂUWÄRQÆTSÄUXÁTYÆVXÆV^ÈZ`ÎZ[ÍZZË]_¿TX¼TT¶KR·HQ¾NSÁSZºPZ·LW¼T`½R_¾W^»T^¼V]ÂTV¶JT´S\°HY³P\¹Ta¼Zc¼V_ÊehÂblÁfnÆabÇghÌlhËijÌrqÍgjÈtwÎvsÒrsÑwrÒ{zÎzvÍnlÀhoÎwtÈlmÍyyÓyvÏ{zׄ׈֒‡Ú©šÖ”„ÑŽ€Å€}½ŠÐ¤¨âÁ²åÇ¼æÆ¶åȹâ÷ửâÄ¸áÆ¿æË¼æÌÁæÅµãǽâÀ²ÞÁ¶Ý»²Ú¶¬Þ¼®àƾàùäʽèÒÆéÔÄçÎÂèɹåÌÀäɼå̾èÒÃêÔÅîÝÍîÚÆïÚÁ¸…†“@M†0F…,D3G3H˜7Kš8Lž7Kž6O¦?O¬HXÃRXÑ^[ØifÞphâxnå}pæ~pã}lä~täziâ~sâ€vá|rÞvnÚphÒb`ÀVZ¥<J‡%B_ 6P8`@v$F˜<U®GWÄV^Ñc`ÚjgÞpkávlávmàujàpeÞphÞldÞqfÞlbÞpißphßndÞqhàpjàqhâseåthèxiê|mÛmb£7F^;R:R :W?X<]@bAh@fAg@g?j!De@`:X<R 9^;fBpBfBV<[@^>aC_AaBfLi&Tr5b~8b•Pn¨bwÀv}É~|ÈÌzwÎ|sÂqrÊ{qÔ‹|Ø{¹CK¤9I™3Fš5N 6G¢:L¡:K¤;K¦9Lž6Fœ1DŸ8J¯=IÃPTÔbYÝnZâw`æƒièˆjéŒvêpësêuêŒrê‹nê‹uè‚lã~iÞn^Ôe`ÆRR²LT 2F)E4I–5K;O¤<M¨BQ¦?M«DP¬ET¨?O®AP¯AM¬@L®CQ®DP°BN¬HQ­FN²FS¯FM¶ER¸JRºPT¾OTÂQRÆX]ÂUWÀRTÃSVÉQVÆVXÈVXÈVZÈW\ÆZaÍbaÆYZ¼X_¼Q\¶PZ½U]¸IU¼RZ½NZºNX¼R\¾S\ÁX\¾U^¾SU¼OZ¹RV¶Xd¶V\¸Zg¿X\¼\iÂ`b¿\dÅklÁ_f¿hlÈeaÅotÊlhÄbdÊrsÎolÌtrÓtnÒyvÓzvÉloÄrxÊssÊosÌrrÐtwÏyyÖ‚{Ò€ƒÔ”ˆÚžŒÕ‘€ÈxzÀt|•ŸÙ¬£âøã¶ä´âµã÷ã¸àûäôæÉ»à½²åıßÀ¸à¸«Ý¼³Þ¼²áÈÀݸáúçʹäĸäÂ°åÆ±âÁ²à²ÝÅÀ߯ºàÊÂàÉ¿âÎÀäÐÆåÐÅèÔÈîÚÈÜÎŶŸª™`tŽ>O‰,B‹.BŽ0B™2Fž:G›4K¤:N®DQ¿RZÏ]^Øhbßoiâxkå}så~mä}qå~räylâ{pã~ràynßwnÞphÓdaÄY_¥<K†">c9UBWFg Gˆ3M¢BTºIUÊ\\ÖbbÚmhßtnâtnáufârbßnbÜndÝmbÜmfÞmeÝoiÞrlßqjãrhásjäxjèylêzläti¾NPq>Z?Q
+9V 8W;W=`BfDjEj"DfAe@dBb;^<Z<[?iBnBiA[=X=aE`BbDd Fj*Qf&Qg"Nu3]IjªdtºryÊ{|Ë~{Î{ÂxvÂutÊ~uЈyÖŽ}×’¨;Lš2F›6J›6J¡7H¤;K¢>P¤:G¢;N 9L 4Hž4G°DPÁOSÔ^UÚmZâyhæ‚féˆhë‹mêŽpéŒréŽtéŒrëˆlé†lç„käydÜj[Ö^VÄTS²DL7K*CŽ+B”+Eœ9L¡;Q¥@P©?R«BR©CRªBQ­DUª@O¬CNª@O¬BO°DO±AO³FPµFR±JO´JQµJS¹MU¾OZÅRVÄTYÀSUÃSVÄNQÆT[ÇVWÍXXÊV]ÊWYÌ_aÔe^ÂQV½NT¼LVÀQW¾TZ¼PV¸OWºLT¸RX½QW¾T[¸V^ÃT\ºOV¼Yc¾Xc½\d¾X`¼WdºZbÀYaÆeiÆfhÁbhÃjnÅ`fÀlrÎtrÇkoÌtrÌfmÂnsÒurÊjjÑvmÎnnÇsrÍnlÉinÎrrÐpmÎ|zÐxtÎvwÓƒÙ–‰ÔŠ|Ãpp·o~Ì¥¨ß²¢à»±å¹¦âÄ·àÀ¹ß¹«ãƺæÉ¼åȾ᾵⺬ßĻܾ»Û´¬Ú¿¼à¸¦ßøæÊºãŶིྶ޼±ÜÁºÞúàúà¸äÊ»ãÌÄäËÂåÎÂãÎÀæÎÀæÎÄèÒÅìÖÉðÚËîÞËÖÎÀ¬„“…4F‡-F‰)A’0F•3H–3H£:L®BOÁOZÐ\^ÖicÝolÞwkäpå~pã|lætä~lâ|nãzrß}vßztÜqlÔdaÃVX¥>M‡&Af:X=X@`<p82Hª?LÀTZÏ[]ÖgeÞoiâvlãtgâseàqdÞqdÞmcÜmfÞkaßnfßphàriárjátkæxlêzlèxhÑ^Z‰*FZ;S:T ;V ;W:\@gAl"HjDhAhBg>f?]<Z:W>d>qCjC`@W@[BbB`CcD`Ie"Jh$Rl(Sƒ;_¦[n¾rzÅzzÊ{z΀|΀vÁvxÅ|zÓŒ}Õ”€Õ}דš:N—4H7J <J¢8J£9J¢7H¢8J¤<Nž8NŸ5GŸ6H¬?MÂPOÐ^XÛpbâu`ç„jè†nëŒpèŽjéŒvèzèŒsìŒtèŠrè…mäzbÞpaÕbYÉRSµBMž5F’+C,D—0C7H¢9J¦=N¨@Q¨@R¬GS­CP«BN©DP¯EO­@M¬FP®AO°HV®IQ°DP²HN¹HO¸MS¼JS½MSÀPWÀTVÁRUÁSVÆV\ÆTVÆTZÇX[ÈTUÉW\×i`Í_\¼HT¼IR¼HR¼KTÁTZÂPT¸N\¸NU·NX¼NV¼QZ¼TZÁV\ºNX¿Xb¿W\ÄXZ¹XbÀX\»_hÃdmÆbcÁfnÆba¾`jÉnuÉfkÍorÐppÉfkÊnpÐpoÊt{ÏrpÍptÌllÌnmÀirÇlkÄlsÈrtÌoqÊxyÐ~{ÓŒˆÐ€{ÄimŒױ§á½®Ýµ¬â´ Ûº¶ß¶¨äƶ࿴弧æÄµå´àÀ¶Üº´à¼¯â¹ݼ´ãǶܶ¬Ý¼²Ý»±à¿­ßÄÀàļàļãŹæÈ»äƼâǼâȾäʾäËÄäʽäÍÂåÌÂçÍÂçÒÆèÔÈêÕÉñÚÈòÞÈàÖıŒ•¢FP$@‹)D/F”2Gž6K®BO¾MTÎX\×hdÞrkàznã~på~pã|nå~sã{mäykâ~sâu߃ÜlgÒdfÁRX¨@L‹'Dj<Z>U:W 9f9~;™4J²FVÈV]Óa`Úgeàthâtgàteârfán_ÝlbÛlbÞqgàpgàndáqiãtläulçxmê{kÝj`¦8Ib5T 8T 8T9Y<\<^<h@i@jDl HgBfBfA\?V;`?kDlA`>^?\D\=fFbCc I`Lb!Nm$R{9^Ri¼nxÆwxÆy|ÈxvÎyÈzxÄxzщ~×~Ö“€Ö‘€Ø‘~˜<Nš7Kž9L£<I¤<K¤:K¢<M£=K :L¡9Jœ6K 9L¬ANÁNMÐb\Úpbâvdæ‚jèˆpêrèŽxèŽté‘zéwêŒrèˆsèƒiäzdÜo`Õ_XÇTV´FN <O‘*DŽ(B–0FŸ2F£:K¦=M§<K®BP©@N®@N¬BS¬CQ­FR¬BO«DP­FQ±EO­@I±FR´EJ¸JN»LRÀLQÀNSÂTVÂTWÀVXÂWXÈTVÂVXÂUXÅUXÆWZÎd^ÔcYÁRU¸LT·PW²EQ·LW¿OTÃRW¼OXºRXºNV¾QU¶R\¼NW¸PY»TZ»U`¼RZÁWZ¿Yb½^fÈfgËhlÈgjÃ^dÅlsÍpoÌmpÐwwÐsrÈhqÏzwÏjlÌnnÌonÈkrÏpoÈfhÆlpÊhjÂbmÌrsÌprÌtnÇnnÀkqÄrp¿nxÉ’‘ܱ¨âº©â½®ß¼¬Ú¶²ã¼£Û½´à² á¸§Üº°ß·©Þ¹¬Þ»´Þ¼°ÞÀ¶äÁ®ß¼µßµ«Ü¾ºàÄ´âÈÁß¼±æÄ®åÌÃåǸáÈ¾äÆ»ãȾäʼãÊÁâȼäʺãÊÀäÌ»è̽åŰçϾèÏÀçÏÀêμíÔÃðàÊâÖ³}†}%A"=ˆ'@.Cœ6L¨?O½MVÌ`aÕfdÜocàxsä€rã}sä|oæ~pâzpâ|räzrå€pÞxrÞldÔb_ÀRWª@LŒ'Dl:^=W:W 4`;m:Ž)C¨<LºGSÌ`aØdcÞohàuiâtgàtfÝqeÜm`ÜlaÞkbàndßriâtjátkäyné|räsd¾PTr;Z>T:V<V<Z>`Af@j Bh@i@h=fBeDa<[@_AgAjBf H\>\>c"Gb@cBaD`I]Hb Nv3\˜Vn´lxÅyÇwxÃw|ÊzwÆxxÁxzÎ…~Ó}Ö”‚Ö”€×’ÕŽ}š4Jœ8H¡:L£<M£<N¤<K¢<K¡?JŸ<Jž;Pž6J >P¬BNÁUYÕaZÜo`âuaç‚næ†oè‰pèŽréŒuêŒvêŠrèŠpé‡oè‚jæzeÞpbØbXÅRT¯BM <R’*DŽ,H–/Fš8K <M¤?N¥?L¦AM¨>Q«>M®FU®IV´BL¯@M®BP¬BO®EM­DQ²DQ³EL·IPºLS·JQ½NV¿PVÂRXÂTWÄVZÆXYÁVY½RXÄPUÅ\_ÝŠuÊSTºHM±BP°EP®EP¶EPºNRÂRX¾V[¿RX¹KT¿SU¾OX¾TX¾U^»PV¹R\¼X_¸Vb¾X`Ã`_ÆhkÈadÁfrÈrrÎijÏuvÒwuÎnoÇrvÌljÌnpÏvwÌqyÅntÎurÉflÉmrÇlrÈlmÉhhÆmrËrlÆkn¼em½js¿|ˆÑš”ܦ–ᶡඦ࿴࿰ྰ߯¢ß¸¥Ü¸²Û±¦Þ¶£Ü¸°Ú´²àº®àº­Þ´§Ý¶²àÀµâÅ¾çÆ·á¾¶èÆ±äɾ߽¹èƲäÈÁåÊ¿ãÈ¾ãÆºâȼåʾáøãŸãÊ»ä̾ãʾâȼæÎ¾æÒÈäÏÄåÑÃæÎÁê×ÊðؼÜξ›Vjv:~"@Œ)A˜4H§>K»NVÎZYÓd`Ûngàyoãzlä~på|oäzmæ}päzrã|oã~uãwnÞnkÒdcÄTZªBNŒ-Hj<\8[9X6X5b 5v6+E°>JÆWZÒ][ÚfcÞnfàriàteÞndÝk_Ük`ÜjbßjaàtjárhãtlæznèygÑc\Œ(F^;V<V;\=Z>^<d=i>i>h@f@eBh"DcA^@[?dAh Fc<\;V:^?`Bc?h$IZB]H\Mp(T”NjµlxÇ{|Ê|ÆxvÃxyÂquÀsvÇ~xÒŠÖ”Ö”‚Õ“‚Õ’Ô~›6Kœ4J¡:K¢>P¢<L£;Kž>N¢@N¤=K :Lž:N ;L¬GSÀQWÒc\ÛlZávbä‚må†nè‹pæŠpæˆrèŠtéŽsè†hçŠrç„jå{fßp_×f`ÄRS°DOœ6L‘,G,F–/Fš5K¤:N¥<Nª>K¨<O«?L¬?L­CR¬FU®CPªDT¬@L¬@M¬BN®DO±FN´FN¶HPºKR»MT½PW¿MVÁPVÀV[ÂQVÄQU¿QUÀNVÂNWÞ„pèmÆNR´AMµDP·IU´DO·JQºNZ½MR½QY¾PT¼MT¼OU»QX¼QVºMW¶P\¹PZ¼W`Ä[^¿^hÄZ\Ã\fÄmuÌjmÇ`jËsrÒssÍsuÌrnÆ\`ÄnsÎqnÐptÌyzÏnqÐqsÊrrÂenÂioÀ^hºbnÅllº`j¾blºdrÍ•’Û²§Ú¨™Ú¢˜Þ­œá»°æ¼¤à¼¯Þ´¤Þº©Ý±¥ß¸¦Ø´²Ù°¨àº¦Ü³§Ú¦šÚ²ªÚ·²âÀ±âļåÄ·äÄºáÆ¼á¼´çÆ·ãÉÁÞÀ»æÆ²âõãÇ¿áÄ·ãʼå;ãȺæÈ¸åʼãÊ»äɼàÈÂáÆ¸çÒÅçÐÅèÑÃèÕÆéÚÒéÒÄîØ¸É®¨~*D~ ?‡#B”/D¤;H¹HRÌ[ZÔd]Üofàvlå{næ}nä}lä~oä~tä{rá}rá|wàvpÜomÓbdÄXY¨FRŠ,En=]<^<]>Z7[ 7f5~>›2F²DNËUSÖ`ZÚh`ÜobÞviÜraÞk^Ýj\Ük`àlbßreärhåwpæzmÜma¥>Ib:X>X<X=Z<X>`;iAj>hAgBi!EhAdC`@Z?^>gBfD`CY>^@dAfB`>_AVA_Kj%R…?a¬cwÇy}Î}È{|Ãrt»nv¿psÉ€ÒŒ}Ò€Ô’Õ‘~Õ”‚Ö“~Ö‘ <Jž8I¡:L£?L¢;L£>L¢9J 7I¢:JŸ9Jž7L 4F«CPÂSXÑ`WÜnbâzeå‚mä‡pç‹oêŽoèŠsè‹qè‰té‹tçŠræˆoå|iÝpcÔcbÆRS³EP<SŒ,I,G•3Jš5J¡8K£<L¢<LªAN¨DR«BO¬BP­CR«CN®FP¬CP®AO°BN®EM²DP¸FMµGO¶KR¹NSÀSYÃQTÄSX½RXÀPNÂTVÂORºPVÀTZê©„æƒbÂEL±=JºDN·KT³HV´NZºRZµLT»R[ºLVÁOT¾NWÀPXºMX¾T^¼U]ÄX^ÂV`Ã`fÆ^d¼YcÈbbÌ]cËjqÌjpÑotÒztÍgh¿ajÂlrÏleÅioÔurÌjsÍorÍloÃchÃmn¾`j¾`fÁ`bº`j´\f¿t~Ó–Ý ‹â¸§Ý¬¢Þ®žß°¤å´›à²¤ß¶ŸÜ±¦ß¶¨Þº«Ù­¬Þ¸ŸÛ²¨Ú¨›×°®Ù´¯à½±à¿µà½¶ä³âÄ»äÄ¹äÆºáÇÀãÀ±åÎÁàÃÀ⽫áÉÃåÉ¹áÆ½äȺäʼáǺáÈÀâȽßÄ·àĸÞĺÞĶàÊÂäÏÂæÔÈçÑÅéÚÎìÚÏëÞÊãÑ¿£ft˜;J„ BŽ*Bž8HµFOÊUVÔbZÝnfàvjä|lä~oã|mä€qä~qãztä~tàznßxoÜlgÓcdÇTV«CR+Er>^;\>^9\7[
+6^ 4m5‡"=Ÿ8F»IOÊVUÒ`XÕj`ÚqbÛpbÚj[Üj\Üh[ßn`ÞqdâtjåwlàvkÃRTt=V<V9U<Y=[=`Ed<j?f>h<gBfAf@cCZ>^BcBiDcBZA^DaCjBiF`@X@R=`Iƒ<d§`wÄy€Ð€€Ì}|ÈxzÁrqºnsÇ~|͆}ÔŒ{ÕÔ|ÖŽ|Ô~Ô“‚Ö‘}Ÿ>P¡<J¡:H¤<J¡<M 8L ;L¢>M¡<Jž6Hœ7Kœ7K§APÀOTÒaZÜpeâweå‚lç†néŠnæŒqèŽoêoéˆpéˆsé‹xçˆuä|iÞrdÔe`ÈVT²EQœ5MŽ,HŒ-K–1J˜7Mž4F¥;L¥<Lª?MªAO«AL¬BO¬FSªDS®FT°FS¬DQ®DO­DV²FR´LX¸NV¹NU¼KR¾PTÂMU½OVÀSX¼T\ºPX¸JU´K\Îmdô»ŠÚuY¸BK¯BL·EO´HU¯KW¶KV·JV¼LT¼NXºKU½PU¾RW¾PWÅZ\ÂZ^ÄVZÄY_ÃZ^ÀZbÅdeÇ^d¿alÇhlÍopÒppÓwnÈbcÃhpÆjiÈilÃhsÎtmÎjoÎqrÐmjÅflÆehÂahÄde¹Q`®Pd³ViÁ„ŽÖžšÝ™†Ü¯¤â°›àÀµà°¢á³¢Ü¦šÞ®—ܯ§ß¹£Ü¹¬Þ´¢Ý¯œÕ šÚ¶¬Ü¸µá»ªß¾·Ü¶¯à½­ãÀ³ß»°ãÁ±áżáÅ¼ãÆ´ßÀ»ä°ãÊÁÞÁ»äIJáŽáŸßżâĵàµàÀ¹ÞµÜÊÄÝÇÀáÊÀàǺåÌ»äÎÆæÒÆçÓÈèÔÄëÜÒîßÏëáÊÑĺžXft=Š'Cš2H¯HRÈZ^Ôd`Úmfßugãzlä|näzmä~rå}nâ~wä|rá|tÞxpÜniÔb`ÄSZ©DT.Jr?b>`<_:`:\ 5\ 5] 5l5‡$> :G³HNÂVXÌvyÒ“ÖŸ Ö£¤ÐŠÙi`ÛndÜ„}ãzjäwfÐcZŽ'D[=YAW;[@Z;`Bb@h?j$Ig?f=iEhDdB_C^Bd>fDeE\?\BbBhBi!Cb>X=P 9T@r*S›Ut¿t|Ñ}~Î}~Äx{Ápw¿prÈzuшÓŒ}Ռ֑|Ô‚ÔŽ|ÔŽ€ÕŽ|ÕŒ|¢<L¢@P ;L¢:H <Nž>O¢=J =Ož:Mž:Lš0Fœ4G§>P¿MPÒd_Üpdáxcähç‡nëŠmêŒrê‹oé‹séŠsêŠoéŠvçŠtä}iÞr`Ôc\ÇSV²FTš4N.JŽ+G”2Lœ6L¡7K¦>N¨>N¨:L©>K¬<J¬BP¬AO±FR°IV«BQ­DQ®ET¯GX²MZ²LY¶GQºLS¸PW½KRÁQZÂRVÀRV½LR¼LT¹IX¹Q]誊õ¯Íb`°?R²IU®HY²FSµFV¹GOºNWºHU¼PWºNTÀPTÂOR¾PUÅSVÄV]ÅYaÂR[¿[bÌaaÆ^gÄblÉioÊinÒsqÐhgÄfnÊklÌihÄhrÉnkÍlhÆlvÍruÍpuÅmrÈbg¶^lÂce²P`ªOd´`p̘”ݦ™Ü¨Ûª á¯•⯣㷜ޱ¨Þ¦œÛ¯¡Û¨œß¶žÝ°¤Þ¬™Ô ™Ó££Ü´¨à¾´ä¿²Þ¸²á¾®Þ¾¶Û·°à¶¦ß¿¸ÞÀ¸äĶãǾâÇ»âöÝÀ¸âÆ¹âÆ»àÄ»åÆ¶àÆºÞÁ¶Øµ¯ÚÀ·ßÄ½àÆ¼äÌ»äÎÁàÊÃâǸàɾâÍÄåÎÁäÐÅåÐÇêÖÆéÜÓîàÐîßÄ˰¬}"B~ C’0IªFTÈXXÐccÚldßxoäyhâxläyjâ}pà|qã{qä~tã}sà~wÜplÓ``ÄX[¨FUˆ+FpD`;]>a:_:[7\6X 5Z
+0g7+D¤UkÉ ¤àÀ¶æÏÄëÔÄîÒºàÄ´Ô žÜ¯²ã÷┌Ömd¢8Hb?V>U<Z>ZC]@^>fBh@g@gBdBg!Fi"Jf I^DcCf@b@^>ZAaDhEgAd?`=Y@T@h"LIj·n|Ì~|ÐzÈvvÄpt¼pxÄvvÌ„~ÔŒ„ÔŒyÕŽ~ÔŽ„ÓŒ}ÖŠzÔŠzÔŠ~ÖŒ{ž<NžAQ 9Kž:JŸ:Kœ8Mœ5G 9Jœ4Gœ4F3D3G§>M½OTÓ_ZÚnaàygæjç†oè‹nèŽsèˆpç‹sèˆoéˆqè‰tèˆräzcßpdØe`Æ\\²KVž8L,I-H˜0Hš2E¡:J¢<M¨<K§;K§>L°@L«BN¬CP¨@P«BP©BQ©>NªEV¯GX°N\³IT¸KS¸IP¸LS½JR¾OW¼NT¿NTÀLP¼KT¶FVÈ_^úÅ‘ïžwÏYW®>R²GT³HV±ER´GR¸HRºMX¹NXÀPX¾PX¾LSºJUµGV¹NZÄX^¾T_Å[_È\aÄ_cÍfaÊafËhlËfhÄejÉnnÍvsÐpoÉlkÎjeÇdkÅlpÏljÑqnÆ`c·P[º`j¿\b´Ve¯Pc¼u„Ô‘ÛŽÝ«¢Þ²¦àµ¢ß¤–⬔ڣœß«•ݳ©Ý³©Ü¹­Ù¡–וŒÖ—د¨ß² ßµ¬â·¦âº°à¾²Þº¶â»¬ßÁºÛ¸±â¼®âÁ¸ã¼¯ãƸà»ß¼ÞÀ¸ßÄ¿àÀ´à·޼´Ü½·ÞƹÝÇÁà¶áõáǼàÈ¿ãÈ»âÏÃàÊÂà²âÌ¿âÌÅå̾åÒÆæÓÇêÝÐìàÑï߯äÓ»CZy>‘2H­AQÄWXÔf`ÝndÝvpâyjâyjá{jä|nã|qä|mä}näxmá|tÞokÔcaÂZ_ªBPŠ0Jp?a<Z<_=\8Z:T
+2O 4N 8\Bœj~ؾºèÐÁîØÃìѹâÀªÞ»¯ÞºåÍÂíÑÂîÕÂæÖÀÄ–lBVARAVAX?\F[>cBi!DfAgBe?hBeDf$HbB^@fDdE^?_?^Ad@h Eb@\<V:\Fk$P‹Af²gzÈz~Ï~zÎzxÆvxÀpvÃxz΂Ò‰€Õ‚Ô~ÔŽ}ÓŠ~ÔŒ|ÓŠ~ÒyÒ‰zÖ‹{˜8J™<Lž:Jœ<Lž:Kœ;Lž9JŸ<J =Mœ9I›2Fš4F£>LºMPÏ\TØnháwhå€jçˆpéŒtèrçŠqé‹uç‹vç‰wç‰ré†mäzeào]×dXÈSS²CQ™9Nˆ$B‰(F–.G˜2H 7JŸ6K 8I¦;M§<L¨>Oª<M¨?O¦AM¤@R§>L¨DW©BS«DR¯HR°DP´HR¶GT¸LT¸LT½NU¼LT¼OTºJP¶BP°BXÛ“€ûÈ’é‹cÀPV¯>P²HX´ER´BRµJT¸JV¼LVÁRVÈTX»KVÁNVÀPX»MX¼R\¿X\ÂS\¿P_ÉaeÊajÈclÍhhÐnrÎknËgiÊdkÑssÌjiÇZ^ÃfnÎjhÐopÎkfÄ[^°Ub´\f»W]²Sc±\nÆz‚Ù›Ú¡–Ø™–୕㺪ᰜۨšÚš‹ØŽØ¢˜Þ±£Þ¸«Üª˜ÒžŸ×¨ Ü´¬à­™Ü²ªß²¢Ýº°à¸ªà»°âÀ³Þµ­á¼©ß½´àº®à¼«Ý¼´Þº¯â÷àļܽµÞ¼°Ù¹²Ø¼·àǽáǾãóæÊÀßȽ޽´áøàȼàÉÀâʾáÍÃÞÈÂáÊ¿äÍÂáËÄäÎÂåÐÄçÓÈìÞÔìÜÎêÛÀ²ˆx@Œ*C¥?NÂRVÑcaÚlißwnâzmâ{lå~lå~nã~lä~nä~qá|rà{sÝpnÑcb¾VZ¬ITŒ/Jn>\?\<\<W8U 8P 6L?XEŸ|ÝɼíÔÂìÒºæÀ¨Û²¥×¼¸àÊÃåÎÆìÖÈìØÏíÙÌîÓ¹Üijj,LLARDTBVB\C]AfAi"EfDbBh&Lg F`BbBbBcDfDdE_B]?`Dj"Fl(JaAW<P<^B~5Z¦_xÅxÏ€Ï~yÆv|ÂtzÃtv΂€Ò‰~Ô‘‚Ô•€Õ‹}ÕŒ~Ô}ÔŒ~Ó‰~Ò‰{Ô‰{ÔŠ{œCV›:Kœ:L;L8I6I¢6G 9H¤8Fž6H2Dœ4H¦:I½JOÒ`XÙh`âxfælæˆné‹téŠnê‹nè‹pæ‰xé‰mætè‡näzdàp`Öe\ÃTV®CRš/H†&BŽ)E“/H–3Jœ2Jž8J¢;L¦<O§<N¬AN§@N¨>L¨@L¨?N¦<Lª@P¬CO«DN¯HV±GV²HW´IR´HS·HQ¼NX¸KR»NR»FN°CV¸NYñ¼—ù¸~ã}eÄRV²BT¯@P´BO¸HR¸HU½KT¶M[½LQ½RXÂPTÀPYÆRVÀS\ÀU[ÂX^½YdÂZ\Æ]eËdcÌegÊ`dÏnmÐghÇclÇmkÉ\`Â]dÈjkÎljÐolÍhe¾[cµ]e¹X\­GXªI^¹j|ÏŒ‹Ø‘‹Ø–Þ¬ŸÞ¬žß¡Œà±£ÚœÙ¡”ؤšÖ ™Ú¤’Ù¢‘טפ¡Ý³¥ß²§ß®™Û®¦Ü¨˜Þ³¥ß¹¬àº°Þ¶¨à¸ªÝ»²Þ³ªâ¾¬Ú¸³Ü¸®à»°Ü¾¶Þ¼´ßº¬Ö´¯Øº¶Ý¾¹àÁ¸ãƺâȺáÄ»ä´âʾàǽÞÀ»áÄ´àȾßÊÃáȽâËÂàËÃßÈ¿ãÊ»áÌÄä;äÐÄêÖÊìßÒêßÊμ·…4N‡(C 9L¾OTÐ`\ÚjaÝtmßxlá|nå|læ~nå{jã{oåqâ|rß{rÛqjÔdb½RV§BNˆ(Dm=\;W9X8R 4M8J<f4Y°™¨à̽íÙÄèȰۮ˜Ö·±àÉÂåÑÈìÚÎïÞÐòßÎõâÑôäÕðàÐèÝÂi‚HBNBSAYBXAbBg"Hi@c?dAi Ff EbCbDaBb?f@a@[BaBbBh@dCaAV@V>p#NœQm¼t€Ï€„Ï}}ÊwyÇtwÄtxÈ~€Óˆ~Õ„Ö‘Ô’ÕŒ}Ñ€ÒŠ~Ô‹~ÓŠzÓˆ{ÔŒ|Ô‰xœ6I™:OŸ8Iž:L =M¤=L¢<L£>N¦=J ;Mž9KŸ1Fª<L½PTÎ`Z×j_àubäjè†méŒtêpëŒrêŠqêŠsê‰rèŠrç„læ|fÜqbÒ_XÇVU°BJ—.J‡&DŒ&E+E’0Gš4I :L¢:M¨=L¨BP­?J®@L®?M®BNª@RªAN¬CR¬@P­AP±FT°GS¶FR¸HRµJR¿KR¿NU»PUºJP¸KU²?UÈf^ûΚô©vænÊXY°DT®BR±HU²FV¸LU¾NV¶FS¶PWÂSZÄLPÂR\ÀQXÄWY¼SaÄ[_ÂR\ÄaiË`bÉ^dÎhiÎa]Æ\bÄhlÈc^¶O]¹`lÏmiÎiiÐkhÊbcÂZ_Æhg¿S\²L\®M^ÂnwØ“‡Ù›–Ûž•ޤ㯜ܗˆØ‰ÜžØ¢Öž˜Ø¤Ù«œÖ’‰Ñ“‘ל—Þ®›Ü¬¢à°žÙª Ý°žÜ´©Ú®¤ß¸¬à¸ªÜ´©Ý¼µà¸¬Üº±Ý²«ßº¬Ú´°Ü·¬Ú²£Ù°«×´²Ý¿¸âÅ¿âÁ¹à¹âÂ¸ãÆ»âźßÀ¸á¾¯âÇµßÆ»Ý¼²àÄ´àǼÞÈÀáȺßÈ¼ßÆ¼ÝúߟàÇ¿âËÄäÓÌêÜÒëÞÐáת‚‹‚&Cš4G½NQÒ^XØnhÚ|qßznâ~lä|hãnãpã~lä|mâ}qÞ|rÛskÑc_¾RT¤CMŠ,Eg:V:R9O8I9K?{Mm¬²æÏ¿ë־⼥ћ‘Òº¹åÏÂëÕÈìÙÉïÜÊïÛÊïÙÉòÚÌôßÍøæÒóæÌŬ­LFNDP@U@Y?^?bBfDgCiBh>`@^>^>b@c@bAX>[AcBh DfAfD[@ZAj$ODeµj|̃ӀyÍ||ÅuzÂvxÈ||І€Õ‹~ÔÖŽ€ÕŽÓŠ€ÕŽ|Ô‹‚ÒŒ€Ò‹~ÒŒ|Òˆ{Ò‰{Ÿ:Lœ:KŸ;K ;J£;G¢<L£<K¨>Q©?L¤8G ;K¡5F§?JÀOPÑ\XÚiXáveäfèˆréŠpêŽuêtéŽrépéŠqèˆpç„mæ~gÝpcØbYÊQQ­=I—2E‡&D‡'C(B’,Cš3G¡;L¦AL¦:Jª@M®@M«BP­BM­DR­BP°@M²@L®CQ¯@M°EO±HP´IS¹DN¼FNÀLPÀOS¾NU¹NW´GS´BXÓˆ|úÍ—ö©xáuaÄKR¯ER®?Q«BTµDP¶LVºDOµJVÀRVÆX`¼KTÀPUÂNZ½V\¿TZ·P`½`iË]bÎffÐhgÄXZ»VbÄda½PZªN]ÀdhÒigÒkiËghÉccÄadÄ]Y½QU´MZ¹P`È{}ØŽ‹ÙˆÞ¦–ݦœÞ¢–ݛؑ…Õ‹†×ˆ×š”Û´¨Û¡”Ô•’צ Ü¥—Û©œÜš‹Ù¨“ܪžÜ³¤Û¶®Þ­›Û·®ß³£Þ¸ªÚ­ªáµ¦Þº¸Þº¯Þ¹®Û²«Ü²¡Ö¬¦×®¥Ý¼·äÁ¶á¾¸äÁ³áÀ¶àÁ¶âÄ»âĻᾱàÀ´ÜÀ¶Þ»¯àÀ¯Û¾´Þ¾°ßǼâȾÞļÞĽàÈÀÝÈÂÜÈÂáÎÅáÉÂâËÁçÕÍéÞÕèÜËÖǼšQf˜.F¹HJÏ\X×jfÜnlà…zâ~uå~qç€jæ€oå~nä{nâzoßxnÜohÍcc½UY¤=K|=]5N8I 4I<RF“j†Ò¼èÐÀé̲֩•Ì•“ÚþåÒÄìÖÇíÕÅëÔÁìÓÃëÓÄëÒÅîØÈïÙËôàÎöèÌØÌÁ`(LLEOAV@^C`CcBeChCgDa?dA[@cAf@bC\>Z@\@dBf>fC_DZBdL„8\¬cxÆw€Ð€~Î~~ÇwzÁqvÆ{|І€ÔŠ€Ö‹€ÖŽ~Ö‹~Õ~Õ~ÓŒ~Ó‰zÓŠ~ÓŠ~Ò‰{ÔŠ|Óˆz >Rž:M¢=L <L¦:G¦<N¨;IªAN¨=N¦:H¢7H¤4E¬<J¸MSÎZUÚj_ávcåfèˆlëŽqêrìnéŽvêrërêŠpé„iå|fÞqaÕbXÆPO¯@L—.EŒ0HŠ-DŽ.E‘*B›5L:L¨>M¨@P¯BO°DOªBP®@J°DN°?J²BN²@M±DN®@N³DNµEP´GP·JS¸HO¿KQÀPU¼U[¸V_¶FQ´GXüÆŒñŸzÜo]¾HT¬=P¨BS¯FV³GS²FRºHO½LUÀOSÀVY¹FN¹LWÃ[`ÀJR¼VbÊ^_ËccÍfkÇaaÁXc¾`jÂY_¸Pc¸\gÅcgÐhkÏhbÊ`^À\fÆc`¾RT´PW³LX®I`ȂەˆÜœ‘Úš–ß§˜ÞŸ’Ø”ŽÚŠØ“ŽÔ’’Õ—–Ú¦”Ö…Õ’ŽÚ¦›á¯œÜ«ŸÛ¤’ל“Ùš”ßµ¨Þ²¬ß¹«Ø°«Ý¬˜Û°¤Ü¬£Ý¶¬Û¬«â¶¡Ú»³Þ´¤Ø¦¢×«¨Þ½µÞº´äº¨á½³á¿±Þ»´à¹§á¼°ä¶à¹޼µàÁ´à·޿´Ü¾¶Ü÷ÜÀ¼àźáÈ¿áÊÁ߯¾ßÌÄáÌÁàËÅâ˾áÈÀàÉÁåÐÇçÕÍèÙÌåÙÄÀ¢¦˜6L²CNÍ[VØiaÛtjàtã€ränçƒmæoæ‚oä~oá€rÞzpØlfÎc^¹NRœ<Jv:W 6H 6C<[(N¦‰ ÚƼéÒ¿äÄ­ÐœŠÆ›¡×¼éÓÅí×ÅìÒÁêпç;çÏÁéÓÃêÍ¿èÆ·ëÐÄòÛÈ÷æÑãÔÀxC`JDRCYA]B_@a@c>e@b@cA`<`<d<^:W:V=`Da@iGiDcC`I\Dv&PŸRnÂtÒƒ†ÑyÊzxÇvzÆwv̂ՊÔŠ}ÕŒ€Ö‹y׆{ÖŒ{Ô‹|ÕŒzÓ‹}ÔŠ€Ô‰xÒ‹{ÓŠyÖ‡x =P¢:N¢>L¤;K¥=L¨<L©?K¨>Iª>L©?K¦=L¤6Hª@L¼KRÎ]YÜiXãv`åfèŠlê’sêqêŽrê‘uërìŽnêsè‚jæ{dÜqdÖbYÄTT®EM—0F‹'@)B‘,D•0FŸ5H¢;N¤>N¬@L¬CP¯@M¬@M®CL²EN³BL±DM²BL°DJµDK²EP³EL·HP¼KTºOWÁLPÁNR¼NT¹Q]µHY¹OXô¾™ú¼…óžkÙoc¹AMª:L«@M¯FS®DS´JT¹JU¾PX¼PU´@L¬@P¿W[»MZ¿X^Ì\_ÊafÈ^`Ìb`À[bÀZ_¿OY¸VeÂ^fÑjmÍrpË^`ÂTZÃ`aÎfbÂU[¸PX¸LX´ViÎ~‚ÚŒƒÜš•ޟܧœÚ˜ˆØ…ÖŽ†×Õ–”Ü£šÚ–ŠÔ’Ú¥ŸÜ§Þž‘ܤ—ܦ’ئ›Ø£žÚ®¨à§•Þ¼´Ü®¡Þ¹«Ø®©Üª¢Ý§ Ú®£ß·¦Ù©©Ú¢–Ö£¤Û²ªâ¿¶ã¿µãÀµà¹²â¹¨Þ¼¶á½°Û´®Ý®£â»ªâøà´޾µà¶àúÝļÜĻ߯߯ÀãËÀàǼàŻ߯¶ÞŸÜüÞÅ¿ÝÇÁßÉ¿àÉÂâÍÆäÐÅèØÅÝμªev®BKÊYUÔhaÚulà|lã€mä~jç€oæ€jæ~læ€hß}oÞtcÖojÆZZ¶HJ—:Gg6J8F>o>a¶Ÿ¬ãÐÄìÒ¼Þº£Ê…Ê¢¨ÜÈÂéÓÆìÕÃëÓÀèϽçË»å̾è̾åɸáÅ¶áÆ½èÑÇïØÊöäÌçÛÅ„McKDP@ZAZB^Ab@b?fBbB`<`=`;b<\>V9]Ab?c>iAbB]EYCgHŽ@d¶j}Ђ„Ô‚~ÏzzËxxÆx{Ë~|ш׌‚ÖŠ|؇zÖˆyÔ‡|ÔŠ{ÕŠ}ÕˆzÓ‰~Ò†|Ô‰xÑŠ~ÒŠz׊y¤<J¤=L¥?N¨AN¦=LªAV¨BO¨?L¬@P¨?K¬BO¨=M¬@MÀQPÓ^VÝfYáu`ä‚hèˆkêpê’qêqê“xêrëqéŒnéˆiåmàt`Öe\ÃSS®EN˜4K*B.G‘-F›1F 8J¥:L£>N®>Mª>N®@O¯CO®CR²EM´AL²@L³@J²DP´ISµHS¸GP¹IO¼JQ¹JR¿MSÀOUÀPTµFR²@RÅ[YüМø¹†î•qà|aµHT¬:M­GT±DR´HW¸MW¸GS´HR¨>Q®GUÂNUÁU\ÄY`ÄX`Ê\_Í]`ÀT]Ä^fÃY`µN`»ZkÉ_cÈ`gÏhgÈdfÂ]gÈbfÏjdÂ`dºRWµGTºYeÔ‹‚ÞŠ…Ú‹†àžŠÜœÙ”ˆÖ“ŒÖŠ‚Ò„†Ù›’Û£˜×Œ†Ø—˜Ý¦žß§œÞªžÛ¡”Û›ŽÙŸ—ݪœß²ªß°œÚª¤Þ§˜×±¬Û©œÝ´­Û®¤Û´¨Ú¦žÚ ˜ÙÕ­¨Þ¸ªÝ³¬á¸¨â¾µà½´âÀ·à´¯à¶¦á¿´á¾®Üº´Ý²ªá¾®à¶ß¸߽°à¿µá³àÊÃàľ࿰޿±Ú¹«Ø¾·Ûľܯ¼ÝǾàŸÞÉÀàȾßÉÁàÊÂáÍÅäÒÉâÖÈ̶¶±htÇXXÖcXÝtfàyhã}hå~iæ‚læhä€jå~lâxeÛscÔhbÁRQªBIŒ-D]:]#JŽfʸ¸èÓÃèÌ´Û´¡È––ΰ´ßÈÁéÒÂêоêоèλèλäǹݻ­âŸáÄ·ãȺéÎÂêÔÈïÚÌõãÈèÜÄ‚K`JDR@\C\D`GdDdC`8\:\9]:d@`;[@]A_>eBfBd>Y<U>[F|*Rª\tÌ|‚Ñ„ˆÒ|Êz~Èt|Ê~шÔŒ‚׉׉|ÖŒ~Ö‡|Ô‹|Óˆ~Ò‹ÒˆyÔŠ~ч|Ô‡xÔ‡}ÓŠ~Ö‰y¨>M¨@N¦>Lª@K§BO«@J­DN®AK¯@L¬?K«>Kª=J­@L¼NRÑZXÚncát`å€jæŠnêŽnê’xêqê‘wépëlèlèˆpè‚gâveÙfYÆSW±FO4G0F’+E—0Fš4J¢8I¡>O§?L¬BO¯DN°AL­DP±BP±AL´CN´DN´BL±@L´EMºIPºFPºHM¹MS»MRÀMPÀPR¾MP¸JR³BUÑrhüΞøº‰ê’gÑ`V¼KS°@N²GR°@L´JT²GS²EN¤8Q­JXÀTb¾VZÄNYÆY\ÌabËTXÄZbÉ\]ÁR^»_jÀ^dÄR]Ä\fÎhgËdlÊrtÐnnÌghÆ`aºKR­DT¼ajÑxyÛ…wܛےŠÚŽ‚ÚˆzÔŠ†Ô‰„ؘ’ÖžžØ‡zÔ‡ƒØ–૜ݦकޣœÙ£œÕž™ÛªœÜªœß² Û¨£Þ¬œØ«¤Ú¤—دªÝ¦›Û²¦Ú£˜Ö˜‘ÒšžÚ¤ŸÞ¸¨Þ­¢ß±žÝ¶«ä»¬â¾¶â¼°Þ»°Üµ°à´¨ß¼²à¼¬Þ¼²Ý¶°â¿«â÷ÝÁµÜ¼¬Ý³¢Þº©Ö§˜Õ¶´Ú¾´Ûº´à´àÄ»áĹ߯»ÝÁ·àȼÞžàÈ»ßÉÂÞÈÁäÎÄãÒÏåÒÆÒÀ¾Éˆ‘ÖbYÞp\âzdå{bæ|aç€gæ~dä€já}fßwbÚugÒaW»PP 5D~&F†Ig¶œ§ÜÊÁêÒ¿â¾£Ôª™Î¢ØÀ½äÍÂìÔÃéÐÀè̽ç˼äÊ»äÆ¶ßÁ¶ÝºáÄ·áĶäË¿êÐÂì×ÊòÚÊõßÇåÙÁ€D]HBPB[C]EbHf"EfEa<_;`:`8]6Z8\=`>gBf<c<^:O 9M
+:fL–DcÃu‚Ò‚‚Ò€~Îx|Êw|ÈyxцԌ؎|֌։|Ö‰~ÕˆxÓŠ|Óˆ|ÒŠÒ‹ÓˆzІÓ„yÑ|Òˆ|Ô†|©@N§DO¬BL«>K¯@L°EP¯AL°?I°?G«<K®>Iª9G¯BJÂJOÒ`[ÜjXáufä€gçˆpêxëŒpêŽréxë’qëwípêŒtècâxhØh[ÆRRµCN¡3F’+E•.E™.D5G¤=K¦?L¨?N®CN¬CP´?J¶GN´EP³BMµDP·JO¶EO³AJ³DM¶HPºKS¾KR¹LR¾LRÀRTÃOT¾PV·FR³>Wà‘yûÊ÷½ˆâz`Ñ\T»IV«=L¨>L®@P¯DO²CR®@R·KWÀO]ÈVZÁMRÅ[^ÀOR¿TYÇa]ÆR[´HXÁZaÈW`ÅZeÄ_gÎ`bÍgnÌceÄZbÆ]`Ç`_¸HT¨<R¸eq؉|ØŒ„×€|ߌzÜœŠÔ}€ÓxvÒ}ÕššÚ “Ô„}ÒˆŠØ˜Ü•ÞšŒÞ¬›Ü¡œÜœ’Ü­ Ý®¦ÙŸ“Ú ™Ü¦œÞ°¤Û¨£Ú¦–ש¤Ü£šÚ°¥Ö˜’Ò”“Óšžß¦–ß´§Ü¶ªÜ®£Ü§ Þ¬›ß¶¬àµ¤Þº¯à¼´à»²Ýº²ßº¯Ýº³à¼¯Þ¾¸Ý·¬á¹âÅ·Û¹­Ñ–˓Դ´ß·Ý¶ÝÀ·Ý¿¹àÅ»ÝÁ¶ÝúݼÝÁ¶ÝļÞļÞøÜÃ¾ÞÆÂáÏÊãÏÊçÔÆÞÎÂÔŒÝjVâw^äydä{aå{eå}fã|eâydÝvbÕm^Ë[S°JUœD^¬y‰Ò¼»éÔÂæÄ®Û¶¢Ò¨ Ïª¦Û¾æÑÄèÏÂèξæÉ¹æÉ¶æÈ¹âĸÞĸ޿´à¶âĶäȼèÏÂêÒÆìÔËòÛÎôÞÅâÕ¿u9RLBTDZB\DbCgBe F_=d>`<b:`>]<bEcCjBhAc=T 8H8S@„5]³fxÏ{~ÓƒÎ}€Ëv|Èw{ÎÕŠƒ×‹~ØŒ€Ö‹~ÕˆÖ‡xÔ‡zÔ‡}Óˆ{Ó‡{Ô…xÔˆ{ÒŠzÔ†zφ{ІyÔ‡zªBPªAK®BP®DM®BL­BN­EP¬EOªBL°@L«<G¬>J°@HÄHKÒ\TÜjZáxiä€hæ‡tèrêqêsê‘tênísí‘pìŠlê€iâvdÕhcÆVT´CJ£2F‘*B”0E˜0Gœ5I 5F¤>Oª=K®@L­BN±BO³BL±BN¶CMµHR²HN´CN±AL³AH¶DM¸OV¹JM¾LP½OP¿LMÂPR¾QT¸ET¶CVꬖüÊ”ô²~Þq[ÊSO¹OT¬@M§=K©>O¬EN±GT¿LQÂNZ½IQ¾KVÂPV¹AJ±LY½][¿NW·IYÅ^bÎ\fÆXeÉ_dÎ]dÈ_cÈ[^¼R]»Y`Ä`^´MZ©F]¾fqÓ|wØ‚zÜŒ†ÙŽ€ÒvtÔyoÓƒƒÑxyш֒Í|ƒÔŽÞ ˜Üž–Ú˜‘ݘ“Üš’ܨګ§Ü¤˜Ý²¦Ø ˜Ø•ÚŸ’Ø£ Ú¬žØ¤¤Û§›Ú¦œ×“ŠÏ×¢žÛª¢Û¦šÜ¤“Ý´¥Û²«Ú°¦Ø§¢Þ²¢Ý´®à´§Þº¬ß»²Ý²«á¾°Þùܼ²à¼®ÞÀ¸àÀ´Þ·§Ôž”Ë’–Ò©¦Ý¶§Þ¼°ßÀ³ÝÁ»ÞÂ¹ßÆºÜÀºÞºàùÞúÝÀµÚ¾³Ø¼¶ÜÀ¶ÜÄ¾ÞÆÀàÌÄâÏÊçÒÁÞ¸Ü}nãv^äfädå|`ãyfâybßxbÚo[Òe]Ä^d¸sƒÎ¬°äÏÀèӻརմ¦×³¤Õ´²ÝÈÁåÐÂç;çͽæÊ¹âŶàĴ䯻ßÀµßÁ³á¸âÅ·äÇ»åȽæÎÄíÓÇìÖËñÙÍòÛÃÞͼf$DOET<ZB\@_?bB` Gd!He@b>_>^;^?eBb>h>f>_<P 4L
+:lL¤TrÄu~Ò„†Ò‚…Í|€ÈvÌ€€Ô‡|׋‚Ö‹ÖŒ|Ô‹‚Ô‡xÖŠ~Óˆ}І{Ó…|Òˆ}Ó†zÔ…|ц|Ô„zÒ„|Ð…{Ð…~«FRªDP©CM¬DM®FP±@J°BL­CN°BL®>G­@Q®;I´ALÅNOÖ\TÜk\ávdä€gè†lê‹rèŽrêpë”wê’mìŽtìnìŒkè‚jâydØi`ÊVTºIK 2F’)A”,E˜/C6J¢4H¥<Lª@N±EP­FR¯DR²DO²DO³EL²?L±HP´CK°?H¯@H¸BL¹HN¸GLºJP¸NT¾LO»PVºIQ°@Q±AQó¿“ýÉŒðžrÚfUÅSVÂRU¯ES¦<P±DQ°FUºHR»IP½HT²BP½LR¶BO³KUÄVYÁTZ¾V_ÆX^ÇT`ÈXbÎ]_ËbdÌafÊYZ»U^¿^c¿VZµNZ®M]ÄivØ|uÚ„×…Ù†|×…€ÓurÎpwÏusÖš”ÊzwÆlvÕŒ‚Ý˜àœ‘Ü›–Üœ“ß –Ú¦žØœ–Ü£ÖŸžÜ™Ú¦–Ø¡ŸÙš”Ú¨™Ú¡ŸÝ°¢Ø¦ Ó†ÍŒ’Ø’Þ²£Ü®¤ÛªŸÚ£šÜª Ý³¤Û´¤Û¬¤Þ¬¡ß¶§Ü²«ß°¢Þ·ªÜ°ªß¸­àÀ¸àºÞÀºàÀ²Þ¸ªÎŽˆÎ”‘ر¦Ú¹ªÙµ®Ù¸³Ü¾´ÞÁ·ß¾¶ÞÁ¸ßÁ´àºß·ÜÀ·ÝÀºÛ¿·Ø¾ºÚ¹°Û¾´ÚÁ»ÝùÞÈÂáÉÀäζܡšãu`æ~`å{bã{câu\ßyfÜveÖmdÒ„Ô¬®ßɽëÒ½æÁ¥Û² Ø´¤Öª ×»¶àÉÀçпæÍ¼å˺äɹãķ߸ãŸáÁ´ÝÁµá÷âÄ·âĸäÊ¿æÎÂèÑÈêÒÇíÕÌðÙËðܾӸ¨QDNCVF^E^C^B`B^@b@c@^7^9Z<[<b@f=e>`>Z<O
+9]BŽ9`ºfvЀӃ‚Ñ}|Éz}É|~ЃÖŒ€ØŽ~Ö‰|ÔŽ‚ÔŒyÕˆ}ׇ|Õˆ|Óˆ|Ô†zÑ‹|Ó‡~Òƒ|Ó†{Ò„zÑ…yÔ„yÒ„xªBTªCN°FQ°EO®CN­BO«CM¬AL°DO¬@N¯@N®BK¶GLËOLØ]SÞmXâwbâ{hæƒcèpéŽoì‘tì–sì”rí’vì“rí“tègâxgØf[ÈWT·FLž0D“.D˜-D›0Fš6H¤<L¥<K§>L°@M«AL±AN³FP²EM²EN¯@L³EL°DM°AN´@JµDK²DN¸HM½HLºJO¾MP¹KO³GQ®<R·LUö̦ù½ŒçŠfÖg^Ñb[ÀV[µFS®IU®AR´FRµIU¸>J®GX³AN¬<MºOSÄOSÀU`ÃTV»HU¾Q[Î^`Ë]fÍagÉXZ½KX¸TaÃZ^ÂS]¸T`´L\ÁkpÔpk×}Ü…xÙŠ€ÔspÐwvÔyxÓ…„Î|ÄgfËy|Öˆ‡ÚŠ…Ý”…ݘښ”Ü¢œÜ¢šÛŸ×˜‘Ô”•Ø”ˆÖš–Ø™˜Þ±¢Û®®Úž–Ú¥’Û“ˆÔ’‘Ò™›Ü§”Ú¥ Üž“ܬܮ¥Û¥žÛ©–Ú£›ÛžÝ²¡Û­¤Û œÝ§—Þ´¬Ü´°Ý¶²Ü¶°áÀ·â·áÀ¸Ù«¡Ò›”Ѧ¤Ö¬¤Ú´¨Ø²©Ý¹¬Û¼´Ü¾´Þ¼°Ü¼°Ü¼´Û¹­ß¿¸Ü½²Þ¾¶Ü¾·Ý¾·Û¾º×½·Ø¶­Ù¼¶Ö¼¹Ø¾¹ÝÈÂâʹ߾³ãxbå|bæ}dãxZßu_ÜpaÛvqØž¢àúêѾæÇ®Ü±–ض¦Ú´£Ô­¨ÚÃÀâÌ¿çξä˼ãȹâǶàõß¶à¶ßÀ²àĹàøâĸãÄ·âÊ¿æÍ½èÐÆêÐÃìÔÇìØÌðØÉðÚ¼À˜’KFPATA^F]B`B\=]@dAc>_<\=\>\:b>g=e>`@]>hA€.U¬YtÊz~ÒÒ€€Ï{{ÌwxÎ}Ö‡׋€ØŒ~ÖŠÕˆ~ÕŠ~Ôˆ|ÕˆwÔˆ~Ò…zÒˆ}Óˆ|ЇzЈ}Ò†zÒ…yσ{Ô…yÒ„x«CR¬FO¬BN¯?H®BL¯@L¬CP¬BL¬FT¬<J®BI±?G¼HJËNLÙ_Vßl^äxlá~lêˆfêpêlë“sî”rî–tí”tí”qípè†iãzgÛj\ÊXT´FL 3D•+@’*B™0Dœ9K¤?L¤9L¨@Q¬BOªDN­AN³EK°DN´@J²DO²EM²DP°BK²AI²EM´DM¸HNºIN»IN·GNºIO¯FRª<TÆb]üÒ ø¿‚àƒpÞr`Ð]VÈYY¶FR¶GR³HV³JZ°BP³DQ°?P©@R¶NX±>Q¼NV¼IR®@T¸R^ÆX^ÂT`ÆY^ÆRZºJX¼W`Æ\a¾S_ºR`ºUeÌsv×yrÕwvÚ{oØv×|sÏuuÐvvÔ‚Òˆ€ÀdiÈy~Õ‰ˆÚ…ٌՀژ‹Û¦¡Ú •ט֒ؓŽ×”Ô’•َܪœÛ¦ ß°œÙ¦£×‰…Òƒ„Ö™•ܬ¡Ü¡’Û’Ù¡›Úš’୘ܪ§Ü ”Û¤™Ø¤šØš’ݦ“ج¨Øž˜Û¦–߸®àº­ß´§ßª–ß°œØ¦ Ð˜•Ö¬¢Û³¨Þ¸©Ú² Ú²£Ø­œ×¯¦Û¸®Úµ¬ØºµÚ¼µÚ»¶Ý¶¨Ý¸°Û¶¬Ú´ªÛ¼´Ø¾¸Ø¼µÕµ³Ö²¤Õº²Ô´¯Ø¹±Û»Ýź݄yâz]âzbãxfÝxk؎ܲ®âʼæÍ¶á¶œÙ³žÚµ¢ÙªœØ·´áʼæÎ¾æÊ¸åǺáŹà´ßÁ¶ÞÀ¶àÀ´áÁ´àĹâÄ¸âÆºâÂ¶âÆºäÈ»çÌ¿èοêÏÂîÕÈîØÌðÚÅïܼ§v|KCRJX"I^H^E`AbAa@b>\:\<Y9Z<`?d?fBfFeEmJ~$M›HhÁn|Ñ|€Ñ€Ñz|Ïy|ÎzxÒ„Ö‰~׋ÖÖŒ€ÖŠ|Ö†Ôˆ}Õ†zÕŒzÔ‡{Õ‰yÓ†~Ò„{ц~І€Ò†}Ñ„zÐ…zÒ€z¬EN­BP¯BN³@J®@L¬@N«BO®BP±FO­@I®>I°@K»GJËNKÖ_WÞlZâ{hä„nè‹qëŒsì“mì–vî–rë”tî–}î–xì”vé‡jä{gÛiZÍXP²BL 2G’.F”.D—3I›;O¤<M¤<L«<K¯EO®@K¬@L¯DM¯BO±CN°CL²AL°;J±BI¯CN²FN²EK·JQ¼HO¹KO¶HN·FK®AO©7QÖ‚rüÑŸõ´~êkÝq\Ó_WÈ][´DN¸HP´HT²GU¹KX¼LW°HY¶ET´DU·LS¾PV´JYÀRYÈRXÉ[_ÃX_»O\¾Q\Ä^cÈ]dÅ\bÂU^¸PcÌsv×€€Ôwt×€{×stÕslÒrtÑvqÒˆÈoj¿bkËx~Ø‚ƒÙ†~Ùˆ†ÚˆÙ”‘Ö’ŽÙ™‰ÖœšÚ—Œ×—ŽÖ‘’Ù’‹Ù •×¢¤×Ž†Üª™ÛšŽÖ’‰Í€ˆÖ–ŽÛ¦™Û —Øœ—× šÙ›”Ù¤šÛŸšÞ¡Ø§¤Ú˜”Û­¢Ø£ ×˜’Ú©šÚ¯ªÚªžÜ´¥ÜŸ’֙ь֟›Ø¢œÚ¬£Ý´£Ø©–Ö¦Õ¤œÕ¬¨Ô®­Ö³ªØº²Øµ±Úº°Ý¾¸Ý»´Ø²°Ú®¥Úµ¬Ø´®Ù³¯×¸°×·´Ôµ²Ò¬¡Ô³­Ð²­Ó´®×¹²Ü²ۧ¦àqbßxd݆„Û°®à°èɴả޷¡à¼¨á´¤Ü±©ÚÁºå̾èÍ½åÆºâÆ¸á´ÜÁ¶ÞÀµß·ßµàµß·âŷ䯏âĸâĶãĸáÄ·äȹæÌÁèÐÅî×ÊîÙÍñÙÄìÚ¾‰RdOJRGXF^E`FbE`AaBb<Z<W 7^;`B`=eCc?dFp"Lz(P’8\±\tÎy‚Ò||ÒxyÎv{Ðx|Ï€~Ôˆ‚ք׈~׈€ÕŠ~Ô‹‚Õ~Ôˆ|Ôˆ{Ô†}Öˆ|Ö‰~ÒˆzІ|ц|уwÓ…xσ|Єzу{­BP¬FO¯BN³@J­CN¬DP®CN°AK¯EL±AI²@I²>J½CGÊPMÖ]VÝm]äzfå…lê‹nëŽmë’rï’pî–sì—wî•uî—~ï’rê‹pâ{fÜn^ÎUQµFL 6D‘+D“.C–3F?O¥@P¤8E©<K®@L¬DO«EP°BM¯@K±AJ±@L°BK±<I®@M­FR®BM²FN´FOºFOµFL¸GNµCJ¨;N¨:Oæœ~þΗ÷µémß~cÓh\Ê\X¾PU´BM²ES¬DS´IQ¶IX¸GRµFW¼R\µ@Q¹P\ÀLW¼ITÄY[ÄV\ºKX¿TbÈZbË_fÌ]bÀOX´K]ËjrÖxtÕxvÔ}~Ù‚zÖqoÏhhÒttÒ„ˆÎpg¿aiÍt~Ù~uÚŠ„ׄƒØ‚ƒØ“‹Ú™‹ÓƒƒÎ…ØŽ‚Ø”’Ø—Ú ”Ø™—ؗݦ•Ô›–Ó„wÓˆш†Ù¤“Ü –ÚšŒÚ ŒÙ£ Ú£™Ü¤šÚš”ÜžŽÚžÜšˆÙ¨ Ú–Û¨—ר©Ù¨¢Þ³ªÜ®¥Ý³¥Ô›•Ò™˜ØªŸÜ®£Û¬¥Û³§×¬¤ÏŸ Ö¤šÚ´­ÙµªÛº²Ø²®Ü½¶Ù¸²Ú¶¯ß¸§Ú¶«Ø¬£×ª¢Ù°¨Ù´¬Ù³°Ö¯¤Ö´°Ò±­Ð­«Ó®¦Ò¶´Ó¯ªÔ°±Ö·«Û¾ºÜ’Ûš˜à¿°èϹäªڪ‘Ú³žß±ß³ªãÀ·åǺæÉ¸æÉºäƸâŸàÄ·ÞÀ·ß¶àÀ³ß¸àÁ¶áÃ¹âÆ¼ãǼãŹáĸâ¶߿±ÞÁµæÊ¿æÍÂéÏÄîÖÌïÚÌòÜÄäÖ¿v6QKDVH\D`BbD^@`FbG^?]=X:^=c?c<dBdGjJt%Pƒ1W£PnÄrÓ}Ó|Íy}Êv}Í~|Є‚֊Ռ؋€×‰~ÓŠ€ÔŠ‚ÕˆÒ‡{Ôˆ~Ò…|ÒŠ}Ô†|Ô„xÐ…}Ñ…zÒ†xÒ‡xЂyц~Є{¯@N¬AL­CR³DN¯FP®AK«<I«CL®DP°?H±?J¶BHºHLÌQLÕ`UÜl\ä|dç‚fè‹nì‘qì•tî–oïští™vï—wî–zî’xê‡gäzdÞk\ÌWP¸HM¡4E‘,A+A–.Dœ5F¥6H§;H©?K®:H­AM®AN°FL°@J°BN²>I°?K¯>L¯DN®>L²AK³BL²DN¸FLµJP¸FL²BK©9O®<Pï¼™üΕö«vì˜jàxjÙhbÎZV¿LQ³HQ²ERºNS¶LSµFR­BR¶JVÀHQ½NXÅP[ÁT`¾S\ÇS\¿OYÁT\ÌY_Ì_jÈY[¿MW¶K^ÈktÖz|ÔrsÖ|zØ~xÔvqÒpnÐqnÑvrÊnjÀafÍpyÖ}xÔ{}Öxx؇ؖ“׊„Ò…ƒ×…Ôˆ‹Ô‹‹ÚŒˆÚ¢›Û¨¢Û¨œÖ”—Ø‘ˆØ–ŒÏ{Ô––Ú¢˜Üœ’Ý¡‘Øœ—Ö•’ÛœŠÚ¨žÜ¡”Ú›’Ø¡—ÜœŒÖ•–Üž‹Ù¨×œ˜Þ±£Üº³à²£Ü§™Ö¢Ø¥¡Ù§žØ¤›Ú¦›Ü«™Û´ªØ´­Ø³ªÖ¬©Ú°£Ø¶ªÚ·®Øµ®×²©Ú¸¯Ú±©Ú¦œØ¯¤Õª¨Øª§×­¨Ø°¬Ö°§Ôª¨Ô°§Ó­¤Ð«¨Î£ŸÒ¯«Ð¬¬Ô®©Ó±«Û¸¨Ý¹«ãÄ°çÆ­â¸¡Úµ¢Ü¹¦Ü·ªÛ»²âöæÉ¹çƶäÄ´ãÄ·âĶàÁµàÁ³ÞÀ´Þ¾®ß½²àÀ·àÀ¶ãÅ¸áÆ»äŹäµâĹ޼¬Ýº­àöæÌ½èËÀêÌÀî×ÊïÙÉòÝÁÛŲ\AL@V@Y<]?^>`Ad#EdD]@Y;Z8`>d>a?dIk$Pq$P1\–Imºf{Ñ{}Ö~~Òx{ÌwzÎy|Ï„‚Ò‰„ÖŒ}ØŒ~Ø~׊‚ÖŠÖŠÖŠ‚Ò‡‚Ó†~Óˆ€Ó†|ÔŠ|Ò†zцyÔ†xÒ†|Ò…{̆}ÒˆzÒ†}­FUªAM«BP­BN¬DPª:L«@M­BN­DP²AI³DK³BL·HMÅQRÐ^TÛl^äyaæ…gêŽlì’rî–mî•xî›vð˜rñ˜mî—rî’rìˆiä{gÜkZËRO·IOŸ5G“,B+@—0B4D¦8Fª=L«:H®=J¬<L®@M¬GQ­CR±@M®>J¬BL®@L­AM¯CM±@I²@L´DM¹IO¶DN¶BH²AL«8N¸FRöÍ©üÇó®xêŸ|ãx_ÞzcÏ\WÌ]XÀLUÅVR·KT´DPµFV°CR»JVÀKP¾JRÂPVÍRQÆOTÈX_ÇbdÉV_Ì]dÊ``¾P_²F[ÂhrÔuw×|wÔwwÔsqÒnnÒpnÔxvÒ|rÂad½_jÌpsÕxtÓuvÔ|Ô€ƒÓyÖ”ŒÎ|{Õ}yÖ…‚֊ْטؑڞ‘Ü¥˜Øž“Ôƒ‚Ò€ÖŽ‹à›‰Üª›Ü ”Ø“‹Üœ“Ûž“Ú˜–Ú—‹Û¤–Úœ”Ü –Ø›‘Ö˜’הڗۨܦ¤Ý¨›Ø¤ŸØ¥žÚ¨Ü©žÜª—Ü«œÜ­žÖ¤œÖ¦ŸÚ±©Ù²ªÚ¸²Ô¯©Ú²¥Ø¶¬Ù³¬Ø²®ÜªžÚ«œØ¨¦×›”Ô¤Ö«¤Ö¯«Õ°«Ô¤™Ò®ªÓªŸÔ­¥Î¤žÊš™ÈœšÎªªÒª¢×µ«à±俨߰’Þ­Þ½ªàºªÞ¸°áöåÊ»é̼æÈ·â³⽫àÀ¶à³ÞÁ¶ßÀ´à¿°ß¿±ß¾°à·àÃ·âÆ¸áøâÅ·äÆ¸àº®Û±£Ü¸®áúæÉ½æÊ¿êξïÖËñØÂïØ¸Â›L>O =X>[<_@]:cAb>[>\;Y<`Ab=f=gDdFbEn%RŠ:c¬[vÊt}Ö~Ò}|ÐwzÌvzÑ~~Ò‡Õ‰€ØŽ×€ÕŒÖŠÕŠÖˆ~Ô†Óˆ€Ó†Ô‡}Ó‰€Ô†{ш€Ñ‡|Ò†€Ñ†~Ò†}Ò…{Ò‡{Ó…z°FW«AOª>N¬CPªDQ°AN¬>Hª@M¬>J¬=I¯@L³?G·ENÂQNÍ\VÙn`äxcæ‚hèŽsí”sî–mï™sî›sî–yï–rî™zï‘qê‰näydÜhZÎUR¶INŸ3E“'@’)C˜.Ež6F¢8H¥=LªAL°?I²@M°=J¯AK«@M°>K¯AL¯BN®@M­>I°@J´CL³BJ·EM¾IN´FN·BH®FR¨6NºLQøÐ£þʼnô¨qñ¡nä|cÞqcØj^Ò`ZË\UÌVTÂNQ¼RX¸GR´HXºGR¸GV¼MU¼KV®>N¼QWÊTWÂTZÄ\cÈY\¼Va¶N[Ã`nÒqsÖvrÔz|ÕytÍlpËhlÒwwÔ‹„Ã_aÄhoÒuvÕÒzyÒzyÔ~|ÕŠˆÆsuÌpnÕ†}Òz~ØŒƒÙœÚžœÜš’דԉֆ{Ö‡„Ô…ˆÚŸ™Ü¡”ڈݘ„ÜœŽÜ–ŽÜ™ŽÞœÜ¦˜Ú›”چܢ—Ûž˜Úœ‘ØŸ—Ö–’Øœ™Ú”Ú¢—Õ œÙŸ˜Þ®¡Û°¨Ùª¢Ø£Ø¦šÖ¨¤×ž™×¦œÙ®¦Ø®¦Û¸°Õ´°Ô®¨Ú°¤Ø²¬Ø© Øª¤Õ¡£×œ’Ô  Ôœ™Õ¨£Ö­©Ö­©Ô©¨Ð¡•Ï ¢Ñ¢™É–—ÊÇ™›Ô±¥ßòäêݬ–×¥˜Ó¦žÛº¬à¼°Ý¼²âŹæÊ¼è̸æÇ¸äIJ۸¬Ü¼²àÀµÞ¿µàÀ±ßÁ¶ß»°Þ¾´à¿³âøß¸âÅ»ãĹ寏äÀ±Ù¨ž× “ܲ¥ß¾²äƼçȽêʽîÑÄîÔÀéÕº–^fKCR ;Z=[<]=^Db>dD\BX=Y=]<c<hAfB]<Z?u)TžMj¿o}Ó|Ö}{Ðy|ÐuxÌy|Ò€~ÕŠ‚Ö‹ƒ×ŽÖŒ}ÔŠ€Ö‹€Õ‰|׋‚֌Ԇ€Õ‡ÔŠ‚Óˆ‚ÕŠ€Ô‡|Ò‡ÔŠ}Ò†}Ó…~Ò‰~ÔŠzÔ†{§AQª=Kª@NªDPªDQ¬CP«?M©AL¬@M¯AJ­?J¬=H²CLÀMLÐ\RÛn\âwaå…eêŒnëpî˜qî•rî˜uî”rð—wï–vî’oê‰räv`ÝjZÌ[U·JM 4H”,C“+C™.FŸ3F¦;G¨?Iª@K®<K°AL´>J¯BN­?L°BJ²BN®@K­?L°CK°?J³@K²FN¶DN³CM´HN´DJ®BM¨7N¿TXùÔ­û¿…ô¬wéštÞs\Þzc×h_ÖjbÈQSÌjbÆTSÂMN¶CN¼HP¶CP½P[¸FR°HZ¶J\ÁSXÃYdÀT\ÄPTºP`´L`Âck×rxÓjlÖwyÕzuÎlqÒnpÌvwÑ€w¾Y^ºZfÐprÙ|tÖ|yØyvÔ‚~Ô†€ÒztÊgkÐprÒprÔ‡‚ÕØÜ¥ Ù™˜Ø”ˆÖ‹†ÎptÖŠ†Ý¤—Þ —Þ ”ÜšŠÕŠÛŒ{ÞŸ‘Üœ”ÜšÞ¢•ݧšÙ›–Û–’Ü ‘Úž•×›˜Ø™’ׄטŽÚ—•Ü›ŽÙ§žÝ¦šÛ¦˜Û£™ÚžÖ¨¡Ü¬¢Ùª¦×Ÿ›Ú¨–Ø®¨Ùµ­Ú¶­×²¬×¨¢Ù¢™Ù¦¢Ø©¤Ö®ªÓ¢£Ô›”Ó£ Ö˜•Ö«£Õ©¦Õ¨£Ï–Ì ŸÉŠ…Å‘”Í¢ Ý¿³ä®޳œÕž’Õ£˜Þ¹®Þº®à¾²àĹãǸçÇ¶åÆ¸ãÆ¶ã²ܺ±Ü¼³ÞÀ´àÀ¶àÁ´Þ¿´ß¿´à¾´ßÀ³á÷àÃºáÆ¼äÃ·åÆ¶äÆ¹×¥˜Ò–ŒÒ•ŽÛ¥”Úµ«æÆºäÄ¶æÆ¸îÑÁíѺáÉ´o,HQ@T<X:Z:\<`Ba>c$L_&LV;`>a;eDhDcDY@ZA„3\®cyÏ|~ÔƒÑz|Ìv|Ïz|Ѓ‚ÑŠ‚ÖŒ€×Š|ÖŒƒ×~Ö‰€×‹ƒÖŒ×‹~ÖŒ€ÕŠ‚Ô‡}Óˆ‚ÔŠ~Ô‹xÓ‰Ô‰|Ó‰|Ôˆ}Ò†}ÔŠzÕŒ{Ô†z¬<J¬DQªFT«?M«=Jª@M®?N¬@M¬@L­DP©?Lª@L³DLÄQPÏ\TÜn[âw_è…gìjî’nì—rî–uí™rí–uî–ví˜{ì‘tê‡kåzaÜl^ËXW´JS 7J˜,D’.G˜4J <O£:H¦=K¨:I®=J°CM²BL±DN±@J¬>G°BN°CJ®>I°DJ²>H°?L´FO¶DJ³GNµIO´DN¯@I¨7PÅ\Tù׫ü€ô¨zí•jâ•zÜoØpeÝr`ÓaX×]VÑZS»HN»HQ´JV¶DO³ET¶IYÂRXºGVÀ[bË\Y¹GR«AWµHXÈlsÔxuÖhjÔtpÖurÑkjÓsuÑrpЀr²T_»\hÐnlÖyvÔtrÓzvÕ€Õ|ЄÏopÍktÒxzÔ~ƒÑxyØŒzц‹Õ‡~Ùœ”Ò‚€Ñ}‚ÔŠŒÛ™’ߚݡڜ”Û”ˆÛ”ŠÕ݂ݞڞ˜Üœ’ޜߢ“Ú˜”ØŒˆÖ‚€Ö”ˆÙ£–֕ֈܖ‰Û ”ÚœŽÜ ”Ò֜ץ Ù¢˜ÜªžÚ¯¨Ø­¤ÚŸ–Ú« Ú±¬Ø¯¦×«¡×¥ ØŸš× žØ¦¢Ø¤¡Ù­¨×§¤Ö˜ŽÔ¡¢Õ˜ŒÔ¦žÔŸ˜ÌŽÆŽÇ––Û»°æÄ®á¹žÖª—׫žÙ¶ªÜ³«ßº±àÁ´æÆ¶æÆµäöä±ã±àÀ®Þ½±Ý¼±Þ¾±ÞÀ¶ÞÀ¶ß¿´ß½³ßÀµàÀ·à´ãĸáźâĹ䯹ãŶܰžÔ”ÑŠ‚ÓŒˆØ›ŽÝ¸©à¶¢Þ°Ÿä¼®íʼêήƕŒT<P>U@Z@]>`@_?^;]AZ?\>_;`9_=`C_H_Hn!LœJjÀn}Ô€Õ~}Ïy|Îv|ÎÒˆ„ÔŒ„ÖŒ~Ö‚Ö€Ù€Ö‹‚Öˆ׊‚ÔŒ…Ô‹Õ†yÔˆ|Ô‡|Ó‰|Ôˆ~Óˆ{Ò‡|Õ‡{ÓŒ{Òˆ{Óˆ}Ô†}Ô†z¬@L¨AJ¨AJ¨BQª>NªANª>L¬@M¬=I¯@K¬>I¬?H´GPÄNOÐ`XÞo`ãzbæ…nëlî–oì—uí–ví˜zí˜xì“tì–vî‘xê‡iä|jÞjZÎXW°FP 2H”.F”*E˜3IŸ9H¥>N«?L¨?L®@L°?M¬HS®BN¯BO­@L²?K°BJ®@J®AN±>H°CN¯BL´DM³AJµEK¶BI±AM£5Q½VTùÔ®ù¶~ô¦uî™lî‰géŠhàwaàr^ÞmXØj]ÌZ[ÁQTÆPO»IP¨2F¬BS¼MSµDP¿UZÇXYÄRXµHT°FXÆdjÕtsÔspÒmnÒfdÎnvÑmnÔ€tÆrr²P^»XfÐrpÕvuÙ{xÖz|ÑzwÓ‚|ÏvxÕ|{ÒryÒ|×…‡ÔŠ‚ÌrxÌnsÖ’ˆÎƒŠÑtpÎ}„Ú‘Œßž–Ýž’Û“ŽÙ‹ÚˆÚšŽÙ•ŒÛ”Ž×’‹ØŠ†Ü–ˆÚ –Ù–Ú‹€ØŽ‰×”ŽÖ‰ÓŠÚ€Ø˜ŽØœ•ؘÕ҈ГšÕ ™Ö™Øœ”Ø¤šØŸ™Û®£Ü®¦Úª¦Ö¡ž×§š×§Ÿ×¡šØ¢˜ÖžžÕ–“Ù§¤×¨¦Ö§¨Ø¥›Ò–šÔš‘ЉˆÐš–Ì’”È“’Ôªžä®æÀ¤Ú§Ù¬œÛ·¨Ý´¦Ýº¯à¸âĶä°å²âÀ²à¼®Þ¸¦Üº«Ý¹ªÜ½³Ý½°à¼°áÀ´ß¿µÞÀµÞ½²áõâ¸áÀ¶âÅ¸ãÆ»âȾä²âÀ±Õ›ŒÒ‹„͈ˆÐ‹‹Ö¤—Ø£–΀zØž–ḭêÄ®æÏ¸‹LWQ@T@]"J]C`A`@aB`A^@W=\8`<^9Z<^Cn&Vz0\Œ:_²cyÐy|Ó}ÒzxÏwzÏy|ЂƒÒˆ~Ö‹„׋،‚ØŒ‚ÕŠ׋|ØŠÔˆÔ†Ô‰}Ô‡~ÓŒ€Ñˆ}ÓŒ~ш~ÒŠ{Ôˆ|ӌҌӆ|Ó‡tÒ‰€Õ†x¬@JªBNª>JªES¥<K§@O¦BN©@J©@Jª>K¬?L±BL³BN¿NSÏa\Ýoaä€lç†hëlí•rë”uí“vì™pë•sí’vì•yìŽléˆkæz`Þk\Ì[V±GOœ6J’0F’.D˜.FŸ6H§>P¨>Oª>P®ER®DPª@Nª@N°BL°@L°@K®AL°@K¯@K°@L°EO°BJµBL´HP²@K²BL«?M¢7V¾`WúÔ¬úµpò®{ìfæ‰må€dÜ|jßpYØogÛdZØg]Ò\VÆNS±@NªBR¯<L¬:M»NTÃRV¾OW¿MV³GXÄjnÒjnÖnjÔnmÔnlÌdjÏjpÐwwÊzu¶R`¸VfÏjlÖpnÖyvÙxt×€{ÎwuËrrÍr{ÒttÕ„Ô…†ÎrrÌx}ÍzÑtÕ~zÕ~xÏvÚˆzÜ”à˜Þ›‘Ü•ŒØŽØ‰ÜŒ‡Û˜Ü”ŒÛ™’Ø–Ž×†„Ü€Ùœ‹Ø•Ù‹ƒÛ”‰Ù‹Ø•‰Ò–‘ØŽ†ÕˆÑ‚‚Ì…ˆÓÙ––Øž–Øž“Ùœ–Ø’Úš˜Ø˜Ú¯§Ù«¦Ô¥ Óž›ÕŸ˜Ö¢œÖž™Õœ™Ò–˜Ô”’×£ŸØ¬¢Ô žÏ™™Ê…~ʈɋŠÕ¢”ݺ¨ã½¢Ü¨ŽØª—Û²ŸÛ´¥Ü¹°Þ¾³âÇ·çÅ´ãÁ°á¾°á½¬Þ»ªÛµ¬Úµ«Ú´ªÛ¸°Ü»°Üº°àÀ´Þ¿³àÀµàÀ³Þ¿²à·âÀµâĹâùäÈ¼ãÆ¸äǺؤ”Ћ„ÍƒÑ’ŽØ¤—ؤ’ÀjhÆz€Ùž—೤æÃ¦Öµ¥^=TDR 8\C_"J` D`?b<_:Z:Z;b9d>\;\Bb Oy.Zˆ8d¦XrÈt€Ö|{Ó|}ÐvzÌvyÍ€‚Òˆ…Ò‹„Ô‹€×Œ€ÔŒ€ÖŠ€Ö„}֋׊ƒÖ‹‚ÖŠÓ‰ÒŠ€Òˆ{Ò‰‚ÒˆÒˆÒŠÔ‰|Ò‰€ÒŒ|Ô‡|Ò†yÔˆ}Ôˆ¬@L¨@K§;J¦>H£>L¨:L¥<I¥@J§>L§>Iª?L°@N°BOÅQRÓbZÜrbä{cè†fëŽpì’rë”uê“wì”té”sì“rê”vìoì†jæzcÝl\ÎXR±HPŸ8H”)D/H˜.Fž5J¥<K¦;Lª@N«?L«DQ®BN°@K¬@L¯BN¬>J®BM®AJ¬>H­@I±AH´DH´AJ³EN³AI°@Iª>MŸ4PÆl\úÌœõ±|ó¥lì–qëŽgç†héeÞobÛj\ØpfÛm_ÍXU¾FN³FS®@M¥8N²HQ¼LS»HP·EO²@TÇmrÔtqÒmjÎioÓifÐegÏlnÐppÊqo´P\¸ZjÈjnÓllÔllÖxt×€}ÐtsÐsrËloÑtrÔÎvpË~Ìx~ÖŠˆÖ…‚Õ{yÍjpÔ}{Û–Ö‚‰Ùˆ€Ü‰Ú‰‰ÝŠ‡Ü˜‘Û–˜ÙŒŠÚ’†Ø–ŒÜ’‡Ø“ŠØ‹…Ö†‡Ø‚|ß ’Ø”’ÙŒ†Ý‘‚Ù—ÕƒÊ~‚Ï‹ˆÑ•”Ô“Ö™’Ö•‘Ø””ÛŸ”ܞו”Ø—’Ô“”Ø¡–Û¬£Ø£œ×ž˜Ö“‘Ô™‘ÕÔžšÓ‘ÓšœÔ•–Õ££Òš˜ÅŽÅƒÎ”’Ù±¥åÀ«ÞªÖ¥–Ù­ŸÚª Þ²¨ÞÀ¶âȼæÄ´æÃ²ãÀ°äÀ­Ý¹­Ü¯¤Ú¸­Ýº­Ü¶ªÚµ¬Ýº°à»²Þ¾²àÁµàÁ¶áÁµßµàÀ¸âøâÀ¶ãȾäȽäǺäǵ⽭̇ˈˆÑŽ‰Ø£”Ø¥ÀkjÀnqÓ‹ŒÛ œß´¦èÑ´©v}P<P<U:X:]@d(K`$C^<[9X7^=h>c<\>_Fl#R~1\žIj¼o}Óz|Ô{Ôz{ÎvzËz}Є€×‰‚ÔŒƒÕŽÕŒ„ҀЎ‚Ö‡~ÔŒ€Õ‡}ÓˆÒŠ|ÔŠ~ҌՋ҆~ІÔˆzÒ‡yÒˆ}ÓÒ‹}ÔŠyІ{ÏŠ~Ò‡~«<G¬JT§=J¨@N¦>J¨BK©@L¨=J©>I©>L©@J­?L´DPÃSXÓ`[ßn^æ|dè†ièŽlërì–vë–rë–yë–tì–ví•xì’qêˆmæ|dßr`ÎYT¶FN 6H”*D“,F™0H¡8L¥>M¨=L¨BO©BP®BQ­AN¯@K­BN«;G¬>L®AJ¬@M®AN¯AJ°BJ±FQ°@I°AK¬@Jª?H¥8Kž2NÎziúΞú®jð xîškçŠoæ‚_Ýrfæ{^Ùm`ÜfYÖpfÑ[R¼JO¸GT­CT¶JU½JPºLWºET®EYÆdlÖrqÑpqÔkjÏjiÊY`ÍinÏtuÁaa±ZhºXeÆ`iÓqqÔwuÑqrÒrpÓtoÎrtÐnqÔvqÉryÈqjÂgrÐ|xØ„…ׂ~Ö‚ÎosÏ{ƒÔzzÜŠÙ‘ŠÒ‡ŒÓ{ׂz؇ŒÜŒƒÝ£”ے؆ƒÙŠ„Ø†€ØŠ‚Ù‘†ÚŠÖŽ‹Û†€Ý˜†Ù–ÒvqÌÑ‹‡Ò›–Ó‹ÖŠˆÖ”Ž×ŒƒÛ–ŽÖ—Ú”ŒÚœšÚ”ŒÙŒŠØš”ؔۘכ•Õ–˜ÖœšÓ’•ؘԜžÎ’Ҕˌ‘ΈÎȔبžã¼¦â±‘ÚšˆÕžÜ¯™Ü°¢Þ¾²áÁ²æÈ¸æÅ²å¯ßÀ¯Üº¬Ü¶ªÙ¹«Ü¸¬Üº¬Û¸¬Ü·«Üº®Ý¼±à¾´ßÀ´â³ßµ߿¶à¸àĸßøÞļâȼæÉ»æËºäɸ՘‰Ë†ƒÒ’ŒÚ¢–× ŽÂpnÀw}ʆˆØ™”ÜŸšã¶¤æÒ¼‚BTN <R<X;[<]AaA^A\=W<[<`@e@`<\AcLv.ZŠ7a¯_wË{~Ö„ÔyyÍvyÎx{Í€|Ó‰‚ÔŠÒ‹€ÖŽƒÔŠƒÕ‹†ÕŽ€Ôˆ~Óˆ€ÒˆÒ‰„ÒŠ€Ó‹Ћ„Ó‹}Ò‰}Òˆ~ш~Òˆ|Ñ…}ÒŠ~ÔˆxÒ†€Ðˆ€ÐŠ{Ô‰|¨>Kª@M¨?L¦=J¥>L¨>L¨>K§<I¦<J©>L®<Hª@Q³DPÆPRÒd\ân\æ|fç†jèlì”që–vì–tì“yí”rï”oî”tí”pêˆjäzcÞn`Ì]Y±GSŸ;P”-H“/I™2J¡7J¤:L¨=LªAN­@N¯>M®@K®@L®@K°<H°BN°@I®@M°BL³FN°DK²BK²@J°DK¬BK©<G¥8K¢1MÜ’súÄ‹óªzó©në™tëœmÜtjæˆdÚvoÜl\Î`\ÚpcÏ]YÊQUÂRW·MVµCR¸HQ¶BPºHWËilÔtvÔppÓnnÒhiÊfjÉehÌmkÂgj³O^ÃajÉcnÓtuÔsrÖpmÔ~}ÐkkÐjmÐppÐytÈhmÅimÉrvÒ~~ÑyzÓ~zÒpoÎhoÔ}~Ú‰…׌Ø€Úƒ~؆„Ó†ŠÔ€„Ú…~ÕŒŽÚŠÙ†Ï‚ˆÒ†”Õˆ‹ØŒÚŒ‡Ý’…ܘَ„ÒxÍyzÄw€Ôƒ×œ˜ÙŽ‚Ú’‡Ø”Ø™Öׇ„Û˜‘Ù”–Üœ‘Ù“’Ø““؈֕‘Ö‹ÔˆˆÖ‘ŒÖœ˜Ó—˜Ò‹Ô•ŽÒ“‘Ê€„ÉŠŽÇ‚ˆÔ”Žá´¢ä¶ŸÞ¤‹Ø ŽÜ©“Þ­Ÿà¹©äÀ­åİçª㿪ཨ๤۸ªÜ·¬Ú´¨Ú¹°Ü¸®Þº®Ü»°ß»­ß½¯ß¿µàÀ´à¸á¹áÁ¶à·âÄ·á·âÄ»âǼäÊÀçʼæÊºà¶¤Ì†ƒÌ†…Ôž–ØžŽÇwrÇ‚ԌՖ‘ÒŠŠÜ£œçֹ̳§V@P@T?Z@\<^?_@^=^&MU<[<aAcB]@^Gj#Sy+Z NoÀo~Ò‚„Õ~}ÐxzÌx}ÑyzЂӇƒÕ‰Ö~׌|ÔŒ‚Ô‰~Ôˆ~Óˆ}Ò†zÓ†{Ó†~Òˆ€Ó‹{Ó‰€Ò‡|Òˆ~Òˆ{Ї{ÒŒ|Ò‰{Ó‰|Ї|Óˆ}ІyÒ‡zÓ‡z¦BUªBM¨DS©=L¦8F©=J¨?J©@L¥<Mª>K«<I²BN¼JRÌUTÚaXánYä|dç‡kénì”pé”sê–ní“xì”tì˜rí’tí’uëˆnæzgÞqbÐ[V´JR¢4H’,CŽ.Fš1EŸ:K¤:L¨<Kª@O°AN«BL¬@O¬?M¬AM²@L¬AL­@J­<H«?L¬@G±@K¯@G°<I°BL®BL«:F¥5Fž/Lâž…ø¾…ø¶zñ¤{ô¯pëoéjÞv`éˆfÜtbßq\Ûj^Ûj\ÍUUÐZT¼LS¶JR°?O®DWÈegÊ`jÕjiÑlqÎfhÌglÊjpÎtt¿X^°TfÀUfÆ^dÒtpÕtrÔssÖuvÖwvÎnlÑmoÍvrÀ`gÁbnÌkqÔzyÕ~xЄÏpsÊgoÑ||Ù€؇€Ú„Ûˆ†Ô{~Ôx|ÚŒ‚Ø‘–Ñ~ƒÔ~ÐÎv‡Àsă™Ôˆ‘ØŽ”Ø“ƒÜƒØˆÐ€Å~‰ÑŽ×Ž‡×Š†Û‘„Ö’Ö‡†ÛŒ„Ø“ŽÚ’ŽØŽØŠˆØ”Œ×Š‰×“ÙŽŽÖ”˜Ô†ˆÕŽˆÑŽ’ҋҊҔЈŠÐ‹ŒËŒŽÅ‹ÌÚ¬œâ¸¢à®‘ÙŸÚ¡ˆÛ¨—Þ·¬ãÀ­ã¼©ã¼¦á¸Ÿá·žß²™Ù¬™Ø¬žÚ¶¦Ü´¨Û¶¬Þ»®ß»­Þ¼®Þ»­Ý¼¯ßÁ³à´âøâ÷âĸá¸à·á÷âĺâǺäË¿åʽæÌºãŶ΃ȅ…Ò–Ò˜ŽÇuyʇŠÔ—Ó‰~ÉvwÌ€†á´¥îÙ½¨tvLBO?V@XD^A^?_>]EU:^$KaBeBdB^E_Jk+X†8b³dtÏ}Ò€€Ò}|ÍuxÊvyЀщ„Ó‰‚ÔŒƒÕŒÕ‹€ÔˆÑŠ€Ò†~Ó„|Óˆ‚Ôˆ~Ôˆ|Ò‹„Ò‰}шÒ‡}Ò‹}Љ|ц}ÑŠyω|Ò†|ч|щ~χ}Ò‰Óˆx¨=K¦?J¨BR®BP­AN¬<J¨@N¨?M«@L¬@H³@M´@J¾GPÉTTÖbZàpaåzeæ…léŽpì“oì–uì—rì˜xë™xì˜pì˜tínëˆpæ|fÞshÐ]\¶NT¤:N“)B‘.C˜-C6J :K¥>L§>L©<M­BL­=L®=J¬@N¯9G®?J°@L²AJ®BN¬AN®?H°>I°BN±AM«>I©:D£2Gž0Iì°†ûÅŽö¸…ö¹yíœzð©|èpæ“pâ‚jä~_ÜdTáycÙo]Òf^ÅLN¼HP´<L©<NÈgf×mkÑhjÇXaÐjhÈfrËdjÍrrÂ_b´WeÀZfÄ]lÓtuÐlqÒllÓutÖzvÐhiÐuwÌipº[g½]lÌhqÓz}Ô€|ÐssÒ{uÈdmØyz׈ˆØˆ„Ö„Ú|ׄ‰Ö|Ð~ÐwŠÓ‹šÎ‰¢ÅŒ«¼…œ²ž¨v–¨j†¬by¯gx«Uh¹^aˆˆÔŠŠ×Š„ÚŽŠÛˆØŒ„ØØˆ„Ø‘ˆÖˆ‡Ô†…Û†Û–ŽØˆ×…Ö”“ÕŠˆ×’×ÔÒ„ŒÔ‘ŠÑ’—Í{΋ŽÊ„ŒÅˆÊ…ˆÖ¤™Þ³›ä¶ŸÛ˜†Ù “Þ²žÞ¬¤ß¸¤çñå¬ỨܴœÛ± Ø­žÔ­ŸÕ°£Ù±¨Üµ¨Þ¸¬ÝºªÜº­Þ»¬Þ½°à¾³ßÀ´à÷àĸãøâĹâøá¸ß¸áŹáÅ»ãɽæÊÀåÌ¿çк֪¡Æ‚‚Ñ–Ñ’†ÇzzÎŒ‡Íˆw¾ed²Ud¿mvÙ˜éÌ·åÒ½t0HJ<R@WAZA`FaB`D`"LZEc!Hi,PgDc@aH`Gt&X›JjÆqyÕ€~Ö~|Ñy~ÎwyÎ|yÓ…Òˆ~ÕŠ€×ˆ~Õ‰|Õˆ~Ó‰|Ó‡|Ôˆ}Ô†|ш{Ô…zÓ†|Óˆ~ÔŠ~Ô‰}Ó‡~Ò†{ш}чyÒˆzЊ{Ò‰~Òˆ}ÒŠÒˆ|Ó†|Ò…x¬>Kª@M¨@N¬BP¨AP©AM¦>P¨>J¯@J­BMµDL¸HRÄHNÏTTÚ`Xàm^åzcèˆiëŒrì”tì˜tì•vî–rì—vî“tì•pìmîˆlæ|fßqcÎa]°DKŸ5E‘*C'?–.Fœ4H ;L¤>O¨AQª<P«BO­BN«>M©>L¨<K«BO®BK®>J¬>J®CL°@I®@K¯>H°@H¬<H§:FŸ.Ež-Jé«úÆ’ü¾‚ò«xõ½†î”kí›pèŠiæißq\Ü~kánVâmÞhVÍb\¾DK¬3JÂSVÏbhÖjiÐhkÈenÄ[fÉjuÌom½\e³VfÁcjÄZfÓutÕqmÑkjÐllÒqrÒjgÎnrÉgi¿[lÅipÇfsËjrÔ€{Ð|ƒÎrpÀ_kÐkr×yxÛ‚€Ù„Ú‡…ÓyÈs~³^~´d}¯i‡®h„¸rŠÅx‡Æ‚ŽÈ|ˆ°b~½zÃxz±v†•U|+VÍ~€ÚŽ‚×ˆ„ÛŠ€ÚŒ…Ö†ƒÖ†Ò…‡Ó‹–ÛŠŒØŒÖˆ‰Ú‹ÖŽÔ„‚Ø…~Ò‰‹Ð„ŒÐ‹ŠÓ‹’ÔŒŒÏ„ˆÑŒÈ‚ŒÈyy˃̊‹Þ¬–羦ߠ‡ÙŒØžÛ­¥á¹§à¸­åº¢èÀ«á¼©Ü³ Ù²¤Ù³¤Ùµ¦Û²¢Úµ©Ü¸¬Ûº°Þ»°Ýº°Ü½­Þ¿²ß¾°àÀ´âÁ¶á¶âöâºâ¶ãĹáżàÃ¹ãÆ¹âÆ¼æË½æË¾æÌ¾åƸɈ„Ì“’Ћ€Æ€Æ|r³TS¢MXš>W°XfÅ{„â¶¬îØ¼Ì¬žT?K<P :X@ZDaDcD^A\DZ>bBl)LcDbFcNj!Q…2\²`vÏ}Ö‚~ÑxxÐyzÍz|Ђ~Óˆ~Ó‹ØŒ~׊Ԋ~Ôˆ€Õ‰~ÓˆÔ†{Ô†}Ò†|Ð…~ш€Ôˆ~Ô…{ч~Ó†{Ò‡yшzÓ†uш}ЉzχzЈ~Ð…|φ~цzÎ…}¬DQª>N¬BO¨@N¨<J¬@KªEP©AM®AL±@M·HL¸FPÂHNÌSUØbXào\åzaç†lêŽnì–rí”sì—rí˜tì–xî”pí—nî’pîŒmå~gÞo^Ï[S´DL¢6H‘'A(C•0F™4J¢7K¡<M¨DR«BP©=L¬?K¯<Gª;J®<HªBM®>J®?L°AM±<J±=J°CL´@G¯BL®<H§8Eœ/Gš+KꬂúÌ™÷¹…÷ÀŠò¡wðª~êŠhè˜lè€\åjä‚[à|mèzVÝzkÕZQ¶ES¸HVÇ]dÌ`aÇ^hË`fÌbfÇ`hÌqrÄag­N_¾\gÄ\fÏopØvrÔstÑkjÒnsÐloÑspÆfg¶VgÊdkËjmÍsyÌntÑmoÎwrÈ\jÓ{ÔzÖvyØ{yÓw}¿dvžDfŸP{¤\§Wv Ji—Eh”Ab¡Nm»gvÈm|¨QsÈ|…Ëx…Ð„Š·wƒ”Nzª^lËyÎÕŠŠÚŠ†Ø‡ÖŒ‹½u€²uŠÂzÍz€Î~„ØŽÖˆŽØŒÌyŒ¶i§ZxŒ7XłІ̇’Æv{Æ‹Är}Җഢ䰖ݠŠÖ—ŠÚ©¢Þ¶®á¹¬ä½­æ¾ªäº§ã¼©Ý¶¦Û±£Ú´¤Ú¸§Û·¬Ú·«Ú¸ªÛº°Ü¼±Ü¿µà¾³ÞÀ±à´߾³à³â·áÂ¸âÆ¸âĺàÄ·ãŸâÄ·âùäǺäȽçÍÀèÏÁæÊº× –ΌΈ€È‚v²ZW”:G”1IAT¢H^Áv†Ö¢£ìϾíÚ»—_bM
+8J
+6Q
+7Y;\?_<\@\<\?]@f@lB_@`I`Mr&TšLjÂny×|×|ÐvyÌv~Ï}ЄÓ‰‚Ԍ։€Ø‹Ö‰|ÖŠ~Õ‰|Ò†|Ò†zш|Ò‡|Ô…|ц}Ôˆ|Ò†|ц|ц}Ò‡{Є}ч~Ð…yÑŠ}Ј€Ð‰|Є}Ð…zÐ…uÍ…{¨BQªDR®?L¬CR¨>N«BP¨AS«AN¯BP¯BP¸CL½DNÃHPÊSV×d[àq^å{_è…fìoì”së”sì˜rì—sì–sî˜tî˜nï‘sîŒnäƒpßpaÓ]T¶HL6D‘+@Œ$?–+@œ9K£8L¥=JªBN®AL­?L«BP¬AK­DN±?I¬>J°DJ®@K°?K±<H¯CN²AG±>J°:Dª>J©<F0Dœ,I檈øÉüÆŒøªyö”ïšlæ˜tè‡`äŽnæ~WÞvhèƒ^à{dê{XÐ_`ÆPUÊZ_ÄU^Ç[aÉ^bÅ^iÊ]dÊns¼]`¬N]¼XdÀ\hÏjlÔptÒklÓpqÐhkÐehÐtr¾Z^¼XcÊhnÆamÊksÍrrÐ~‚ÇclÃ`lÐsvÕz~×€ƒÖ~…Ðq|·To”Cmª\{Ä~ŽÆn~ÅizºXp±[r§PnœKnˆ6^“>d–Oză”Ù’“ÛÔŒ¼wŒ›_”T‰’S…ŸZ~©Wx¶`sшڈӆŒÇŽ˜°z’žV~»pˆ¹kˆÆxЏŒ±—^¢pw5pf O°mʆ’ÂvŠÌxؤ–㳚঎؀ؖ‰Û¦žâ¶¡æÆ´æÂ²æÀªâ¸¥à¶¢àº¨Ü¹ªÜ¹ªÚ¶¬Û¸¨Ú·©Ù¹«Ú»±Üº¬Ü½µàÁµàÀµßÁ¶áÀ´à·áÁ·âøãĹâŸâ¶áĺâĸâĺãÄ·åȺæÊÁèοêоܮҕ‰Ì‡|¸h`œ;H‹3J–<Mœ<O¨L^¹anÖ›šçÆ»ñÖ¸Ùº¢f:M :P8T 9[:a@^<]?Y?\;c>f>h D_G^Kh N€3\®_vÉz‚Ó}|Ô}zÎx|ÐwyÑ‚~Ó‹„Ò‚Óˆ}Ô‰ÕŠ€Òˆ~ÕŒ|Ô‡|Ò‡Ôˆш‚ш€Ñ‡€Ñ†~ш~ц|Ї}Ñ„zЈ|цzшzЇ~Ï…yΆ}І}Є|Í‚zΈ|΃zªDN¨BP«>MªDQ©DQ©@O¨?M¬BM±BP­>N¶AJ»CHÆJPÑTQÜdXäpZæ{bé†kìlí–vë–uï–qîpí—vî–ní—vï’pí‹lè€fáp^Ó\R¶HNŸ4D'>,E•0D˜0Dž8H¢<J©>LªAQ©@O¬=M±@L®AN±>K°AK³BK±=KµBP²@J²@L¶@F²=H´>F«<F¨4?ž-C+Iå{ùÒ¢ö¶ˆøÃŽöªzòµ…ðžfç›uéƒ^Ûvdä}XÚpfè…bàp[ÜhYÊTWÀQYÂ\aÂT\¿P\Æ\fÈflÃ`d¬M`¶]h¼XdÎmr×rlÒniÒjhÎefÎnpËfj¼[b¸XdÄbjÊdiÈhnËjqÍnrÎmkÁ`lÎnrÔutÑzÓw~Ó{~Áh€«Rvºn‰Èo|Ín~¾]s¶Yu°Tq¢Ll›Ff—Df‹?ks&\N|À‰¤Ö” ÙŒ˜Ø–»l’¤hš¦lš f™œiš™e™†J8^–Jh®a{°b·p‰¡Vv“Lq‹Cjl$Nw6]h$WŠFd„FpQIw4VÅ…•̈‘ۦ㮛׎€Ô‰€×’ŽÙ§ Ü¶«á± ä¼¦ã¾¬â¼«Þ¶¦Ý¸¬à¼¬Üº®Úº®Üº¬Úº­Ø¶ªÚ¹°Ü»°Ü½°ÞÀ´ßÀµàÀ´ßÀ¶áÁ´á´á³á´ãÇ¼ãÆ·ãÁµâøãÅ·âøäźçÇ»éοæÇ°Þ©Î‚Ànc©QRŽ2E“:M—?NžBS¤BVµ\kцŠâ¶¬îѸéÔ¹LOO:N 9X?\=_>\;`<]9Y9Y:c?iCbA[B\In%R˜LjÁp~Ó}~Ñ~|Ðx{Ív|Ì|Ó†‚Ô‹‚ÖÔŒ‚Õ‹Ôˆ€ÕŒ~Õ‰~ԌԊ|Ô†|Ñ…|Óˆч~Ј‚Ñ…|ц~І~Ј~Є{ЄxЃ|φ€Î…|Ï„{Ά}Ð…z̓xЈxÏw®@N«DP®BMªBQ¨@N¨JV§@P¬CR®FS°GP¶FL¾HOÈOQÔYVÝf\âq^æ~fè…hìkí•tìšví–qî˜vî—tî—vî˜uï“tîŠféião[Ñ\V³HQž7J*E$?–,Cœ2F¡6H§=K¥>Mª<Jª?M¬>I¯FQ²FM°=M°?L®FS°>J²@J±AM±?I²?K¸?E²@F¬<G§9D /Cœ,Páš~÷ƘúÌšù°võ¾‘ù¯uòžhå‹nátOÑaVáw[Ül`ë”bÖbT¹KX»KVÀSZÁ^bÀT`ÄcfÂ_a§FX²RbÁXeÊhmÓplÔllÐhkÎjlÍlmÊfj¼V`¼[fÈflÌ`fÊflËjoÈfkÃflÅ^hÍltÒz}ÐjnÒtwÓz‚Áaw¬PrÂx‡Íz…Çlz¹\w›Fl›KnœMm‹<\0Sy(Ps&Qo"\Iz©¦Ìƒ–шš¼~§¢n¡¤r  \Š”R|V}@vr;tx=y†Wu9il.dn2`t0`n(Uq+W^NUHXLQAUDJH]"Q¾‡”㸪ܞ‘Ï‚{Õ‹‚Ô˜œÜ´¤ã¿¬ç¿¬ä¹¦ã¼­â¼ªÞ¶ªÛµ©Ý¹ªà½®Þ½­Û·©Ü¸ªÙ¸¬Ù·©Û¹®Ý½®ßº¯ßÀ´àÀ³ÞÀ·àÁ´áÀ³àÀ´âÀ²ãÅ·ãÆ¹âĶäŹâÅ½ãÆ½äĺåÈ»æÆ°ß¯–Ïw¼po¬NQ˜;G“6G”7Hœ8Gœ<N¥FZ°TfÈ‚Šà®¥êƲêÓ·¾‚T:L 9T:X<_B_=]<^BX8[8`8c>h@^?TD^Hƒ<a­_rÏy|Ò€Îx~Ëv|Ëvwу‚ÖŠÔŽ„Ô‹‚ÔŠ€×ŽƒÕ‰~ÓŒ‚ÕŠ|ÖŒ€Ó‡~Ò‡~Ò…|Ò†Òˆ€Òˆ}Ò‡Ò…~І~Ñ„zІ}ЇzІzÌ…~Є|ЈxÏ…|·{Î…{Ð…zÒ‚v°BQ¬BNªET®FPªEQ©BN¨DNªAN®BP´FPÀFJÄFNËKNÔTOÝh\äsYè~dé†iì“lí˜qî–qí—qíšwí–xñ—tì˜uï’rìkæ~gâp`Õ^VµLN4FŽ.FŒ&A•.F™1E 7K¨?L¤>K«@M©?N«BN°BP°BL°?J®BO±FP°=H°=J²?L°?J³BJ¸<F³=G®?H±:C¦3Eœ*KÕy]øÕ¬û¹€ø¿‹÷°xô¼—ø³nî›qìˆXÔlVÛeSËSPâ€`ÝhRºNY´JU¹LX¾R[Å\`Æ\`À[_¦BU®Nb½YgÈfqÒqnÎhjÐhhÏfeÒkjÊmn¸ZgÂdnÊ`dÀhqÈdnÎllÌkpÄcm¾ZbÌjsÐptÕutÐnuÊlyÁ^q§Rp´i„Ên}Ìq~¯\|˜Ah Nu¨UqœBe–Bg¤Pq P{ˆ;t„C†Ž^–œb“²lŽ©l•¤x¤h–P~v0gh*bd$Xh'et8tŽd–€N~b Xp6kk+b^LZEYER?SGSGK>JH\"Všn‹Ö®£Ù–ŠÉ}€Ï’•ذ±ß¼²ä¾­å½¨ã»ªâ¸¦á½­Þ¸§Þ¸¬ß¸©Þ¼©á¼ªÝ¹©ÜºªÚ´¦Ø¶ªÙ»°Üº­Þ»®ßÀ°ß¾²áÀ²àÀ²à´ྰྮ⽭åÇºåÆ¸äÅ¹æÆ½æÆ»åǺãÁ¬Úª’Çvd®XTžDM—>L™@O˜9J™:G›9H¢@P BW±Xd̃‰Ý§è·¤î̳ßìv/BY$IR=X=[Aa?a@\@\?V@a@h@g@aAX@O@bJ–IfÀoxÏ~Íz}ËvwÊsxÐ|{Óˆ„ÔŒ…ÔŽ‚ÓŽ…ÓƒÓŒ‚Ô‹€ÔŠ~ÔŠӌҋ~ÔŠ‚ъЇ‚цω‚Ò|̃|υΆ€Ï†~Ї~·}͆}Í„{Ì„|Î…{І|φ|Ò†zÒƒvª?P¬EP¬EQ®HQ­FP¬AO©DP©AP²DN³@KºHOÅIOÉJNÒVRÛf\àr^é~`êŠfì“pì“píšrï–rî•tï˜vî”tîšuî“qíŠlæ‚lârcÖ^R¹KQž0F&B‹)A”1Fš4Hž8N¢=N¥@P¦>L«CR¯AM¬>J®<J°BN°AP²BM²=J°BK³@L°@K´CMµAH²DK­>G­=E¥5Cž,IÀZNöΟúÉ–üµzô¾˜ü¾~ô³Œò¢jæ„eÛeVÊPLÑbTÒ^QÍc^¾UW¹T`»R]¸NZ¿U]Ädg«FR®Rc¹Q^Â_kÔjnÏhmÐhmÏfjÌhkÊfh¸U`Â^dÍdjÊflÎjqÂeoÊdkÉjqÆ`hÄdpÓnrÒrrÒlvÒntÆdt£Io±ZvËo|Ìn|´`€˜Rv®ZyÀhz²Qh¦Jx´fжj’¡^Žƒ8m|1i~Bt„D}‚>pšh—^…€9qv6kr4ke#Xh"`‡R†…X„k,Xi+bj,hm0dr5hWKRCN?K
+:RQONNQŒ`‘µ†ŒÛª“ÏŠ}¿xЗ޼µäðæÄ±åÀªä»ªà»®Þµ¦Þº¦Ü¶¨Þ¶©ß·§Þ¹¨Ü¹¬ß»«Ý¹¨Ù¶§Ú¸ªÛ¹©Ü¼¯Üº¯Þ¾±ß¾²Þ¾²âÀ°à½´àÁ´ß¸©â¿±çÉ»åÄ·æÇ¸çɼྤЕ{¯\VœEN™;J;I–<NCS˜<Lœ>Kž<M¢DR®Ta¸dqΉŠá©œæµ¢êÀ¥á¼ž“PTR;UBV:]@b@dC`=]B\DZ@^<hAf?\>SDQBr*S¨XnËwzÑ{xÍx{Èw~Ëx~ÒƒÔ‡Ö‘ˆÔ‹ÒŒ€ÔŽ„Ô‹~ÖŠƒÓŒƒÕŒ~ÔŒƒÒ‰ÒˆÓ‰~ÑŠ†Ï‰…χІ~Î…Í…€Ï…~΃~Ї~Ά{Ï…}ψ{І}Ï…|΂xÏ‚xÎ…xÑ„y¬CP®DN®ER¯HP°FN±BK¬BL¬=J®AM¶JR½HNÄFKÌLLÕWOÝcTàr`ç|`çŠnì•lì–rì”tí”tí—vî˜qð–pî—sî•sîŠlè€iãp^Ô`X¯GN 4F.D(B–2G›3F¡<J¦>L¥DQ­>J­?M«DK°BJ¯BJ®?I°DM²AL±@M­@K°BP¶?I²BK°<G±?L­<F­;E¨4Bž,F«9Kô˨ûÇøÅ“ü¾€÷·‹ýº{ð¦ví‘aÑ]WÌQR¿INÌqlÓk`ÄdlÄS[Ä[c¸S_»Z`°NX®MZ¾S_Æ[dÔlkÒkpÒhiÐgqÌhkÈdf¸Ze¼WaÆ\`ÉdnÎilÌjqËkqÄelÁXcÆ`kÒlmÒqtÏjoÑhoÉm{ªIl²a|ÂizÆewÀ[r¢Px¨Vy¿a¯V}¨W†œO€œXxƒ=h}7il"Vt*\u-\t+eƒFyXŒ†N†ŠTŽ~<y{2p€B‚ŠV‹y<ir2hyAxm.cb$Uk>fn1h\!MVDM>C 9B
+:HD{Dlͤ›Ü ŠÈurÃv|Íš¤Ý¬œâ¼¬æ½¤â¾­áº«â»¬ßºªß¹¨Ý·©Üº©Ý¸¦Ý¶©ß»«à»©Ý¸¨Ý¹­Ú¸ªØ·©Ü¹«Ü¶«Üº¯ß¾¯Ýº°Ü¼±àÀ´à÷ãŵãÂ°æÆ¹éʺèÊ·äéƋr£MNšBO“9L˜9L™9J–7Jœ<L AOœ=M¤BR¥I^®XjÄ|ً榖캜꿠¸s`s):V9V ;V9Z>bAb@c@_=^?[>_Dd=j?eCX@PBXEŒ?dºj|Ð}Ï|{ÍxzÉv|΀~Ó‰‚׎„Ø‘„ÖŽ€×Ž€Ó‹†ÔŒÔ…Ô‹ÔŒ‚Ô‹‚ÒŒΊъ€Ñ‡ƒÑ‡~Έ„ψш‚Ј€Ï‡‚΄~Ѓ{Ї}І|χ~΄{Î…~Ï…|΄|΄{уz°BR­DS°FR°GR®FQ®DP¬?K«>I®@M·FP¼DNÄFLÈMPÔXTÜeYàp`å~gèˆgì“lì–tê˜sì–pî•qî™qð–rí–víoìŒnèƒgãoWÐ_ZµGN 3F)CŒ,B—0H›/Dª:G«<K¨=M«<H®@N°BM¬@M®BN¬=J°DP´BM°AN°>I¯DR´>J²@J²=H®>G®<Hª:B¤:E ,E¤/Gíµ€ùÌ¢üÁöÀ”ü¼ö½’ò¥ræ‚dÚn[ÆKWÈY`݉zÒlnÚ{fÆW`Ê^_ÀY[¬N`§DT·UeÀZdÎgfÏfjÔnmÐjpËlpÃfl¶Yc¿\iÈ^bÈbhÌdhÇhpÇhpÆdjÄ_dÆ^fÎjmÐmrÕjnÐjtÄ[l­Ji®[yÂhyÄhxÈfx´T|N|®fޤ_˜T‚Ksv-Zk%RgNf RePhSlTx2m‹NŒ†O†’r£„EyR†Hz–iš~;nZ‹ŠX„]Lg(d\Vo@mwDyNCJ;OJD BMJŠ_|É¡™Ø–€º\d¾jvÓ¨¤á¼¨à°žà°šß°šÝµ¢Ü¸«à»­Ý¹¬Ü¶¦ß·¥Ý¶£Û²£Ýµ¤Û´¨Û¸¬Ý¸¦Ü¶ªÛ·¥Ø·¨Ø¹«Úº­Úº¯Ü»¯Ü¼±ÞÀ´â¾°â¸äÀ¯äÀ³æÇ¶æÆ²Ü´›³ji˜DT˜>NžBRš=N›>Oš9L—9I >M¥HW¬Vg±^o¿oyÒ†…à簤뾣èħ°tmZ7V>W<\<\>_BcAa>`=^<]AX8b>f=j AbEXDUJl&S¢SnÅt~Ó}xÐ|zÇu|Êy~Є„ÓŽ„׈ւՎ؎€ÓŽ‚Ò‡ƒÔ‹‚ÓŒ„ÔŒ‚Ò‹ƒÒŒ‚ÑŠ‚ÒŒ„҈ъ‡Î‡Òˆ‚ІƒÐ†€Ð…|Ï„}Ά}І~Ï„xÊ…~̆~Î…|Í„|χ€Ì…~Î…~³JT°IR°GQ³FN®HQ®BN¯>J­AK¯?L´DN¼FLÂHNÈJJÒWQÚdTás`äzbç†iètì•nî”rì”rì–rìšsï”tî—oî“rìŒlè‚gâp^ÖaX´FM¢6F1F,E–,Fš4E¢9J©=H§;K«=J¬>M°?I®@J®>J®BJ°@L¯DP°AL­;H°@H´BN±BI²;F¯<F¬<H°@H¦=Iœ6F™/MÚvúÉœøº‹û„ö¸‡üÆ~ñ§|ê…X×^XÔ`XÛwgØmjÖ|uÐ^^ÐspÄ^f²GV¬HXµM^¿T`ÍdhÐddÎlpÐhoÐhjÆfu¹P^¼]lÉ`mÆZ^ÊgnÍenÆhnÄgqÅ_hÅZfÒnrÓkmÐjlÓluÊap§Ffª\€»dzÃfyÌjwÄa~¦P|’KzˆHtu1^s.`~3ly0hv.`lQdLh"TjSt,dŒT’ˆJ{•pŸ‚Bs|>uj"T:lˆAv†O†’Xx0ev<{v=xXRlAer@tF >C
+<@@d,fn„Ö®—Ô†v´[fµfuÔ¢žâ¹ å¹œß®˜Ûª›Ù¬ Ø­ Ú¶¬Ý¸¨ÜµªÛ³¥Üµ¤Þ·¦Ù«œ×±¤Û·¨Ü¶§Ù´¨Û¶¨Ø³¨Ù»«Ú¸«Úº°Ü¼¶Û»°Ü»°ß¾±Ü¿¶Þ¿²à½²áÀ¯Ô«–º€x®p€§cxž@V >L¤APœ:L˜<Lš<M ATªMb¹hsÊvwÙ‡Þ‰|㙇訔鴖è¨͇g<VA]>[<_>`>eEd?fA^:^<X:]<dFj Cj'F`(PXL\N„;a·duÍz|Ò|xÍ|zÇtxÐ|Ò„Õ”†×’†Õ’†Ó„ÖÔ†ÔŒ~ÔŽ„ÑŒ…ÒŒ‚Ӏόъы€ÏˆÐ‰€ÎŠ…Ð‰‚͇ϊ€Ñˆ}Έ‚щ|ц{φy͇~ψ€Î†}Ά̓~͆‚Í€x²HX²IVµKX³DP¯GW®<K¯BN°BN²BJ²AR¸BJ¼FPÃIMÒYUÜbUàr^æzcç†nêrë”uê™vê—té–rì”rì˜pë•sî“pìŒnè‚gãr]Ô]T¶HN 8JŒ*DŒ(B“-D™4H¢<N¤<Nª>K«=I©AL©?J¬BJ¯>I­@K±DP°BM®BL®AJ­@L®DL°?N²<I²@H®AL°BH¨<G¤4Dš'G¿_NøÐ¬ûĆõ¸ú»ö²…õ®rä†jâ~\Ö\SÌdkÖwuÚvgÔ€{Ðe_Â[g¦DU³PcºO_È_dÒjnÏfgÍbfÍloÆbf¾YbºZfÆ`iÈ]fÆ^hÊckÌjnÅdk¿ZgÃUaÎnsÑuxÍhnÎkmÐbjÅ`t™7`¾i€Èj|Éhx¾TnžDo†7lp(^n&Zz3l…F}~;qq0^e MaHbJcNm Z‹L‚™b–X‹‰R€s2b]D`Sj(X‡Tt5h|6jIƒx=m^"RM EiDnb)ZB?< <YN©yˆÙ¡Ð~m·^n½v‚ר ã°–䶠߬“Ù¤”צ›Ö©ŸØ±¤ÛµªÛ³§Û³¢Ýµ§Ý´ŸÚ®ŸÚ²¡Û³¢Û²£Ü´©Ü´¨Ûµ§Ú¸«Ú·¬Ú¸¬Ù¹­Úº²Ý¼°Ý¼²à½²âÁ¶à¯ذ˜µ|u˜NhRm«vŠ®k|¬Vg¤BP¨BS¡>T£J^´ao¾ksЂ|܉|ß‹z唿—柎ꬔçÀ¤Ã“|m#>T <Z?^?^?`=fCeCiAb?^;[;Y=]8f?j"GbD`(T\%Tk(TŸRnÈt|Ò}{Ð}{Èz|Éx}Ò…€Õ„Ö“†Ø‚ÔƒÔŽ„ÖƒÔ†ÔŽƒÔ‹‚ÔŽ‚ÕŒ€ÒŒ€Ñ|ӌЌ„ÎŒˆÐŒ‡Ð‹„Ή‚Ά€Î‡‚Òˆ~Ï…|Ï…|Ð…~Έ}̆χ{ІzΆ~Í{Ì‚zÎ|±LZ´FPµJV°HU¯EP²FP³GS±BQ±BL±BN¸BI¼ELÆLNÒVQØf]àtaå|cç…dêŽpê”sê”mê”uì“së”oî”sì•qî’píŠlèeãs\ÕbX¸LSœ8IŽ,DŽ+F•0Cš2H 6D§:G¦<K§=J¨=L®?L¯?L®@M­<J®AK­@K°BK«@J¯CO®@K±FM¶>F¶BK®BK°BI¬>G¢:E›-E¨8Kñ‘ùƘú½€ó¯ˆø½ƒñ¬|ò¤mß}cÊmjÇ\ZÚ€uÓheÞyeÂhn¯HY®EVÀVbÆ_kÍnrÐfhÑhnÎjkÉen°O`´WhÄ\hÇboÄ^hÌ]eÉ`fÇfnÃ`h¼ZiÍ_hÒjqÐgjÒkmÒjlÏirÆhz>dÀh€Èfv©Mo‹>lv,`fRg!XŠJ‚‰Lzl'SdL_LbL\DfQ]L„;užj›œ]ˆ’Z~v2`]FX F`N‡[Fyl,eI|€LzXFJEHB|WŒqAƒHHg/Y®~†Üž‡Érl¼foɆŒÝ³®â·¢ãµžÞ¯œØ©›Ö¦ŸØ©œÙ±¥Ù±¦Ú±¡Ú°¢Ù²¢Ù±¢Ú±ŸÙ¯žÚ®šÙ¯ Ø³¢Ü´©Û¸©Ú²£Û¶©Ø¶¬Üº¬Üº®Üº¬à¾°ßÀ³Þ½¬Þº¢¿ŠzŽFX‚B_”Xv˜Vv«uвxŠ»ly´`o¾hoÉmjÒzà‹zâtæ‘wå“vá|æz蛄馒ìÀ¤Æ‘~g6T>X<^<^=_<fCdAc@h@d?\C[<W8`7j@l!Bg!HZH^I€3Z²duÎx{Ï|xÎ{|ÊsuÏzxԀד„Ö‘€Ö“‚Ö‚Ó“…ד€Ô’…ÖƒÔŽ…Ó„ÒˆÓ€ÒŒ‚ÑŒƒÐŠ‚Î‡‚ÓŠƒÐŒÍ†‚φ€Í†ƒÑˆ|̇€Ìˆ€Î†~͈€Ï‰~Ј€Î‡‚Ά}̃΂{Ê‚|¶LX²GS±FU¯DQ°JU°DT²ER¯@L±BM°AM·CKÀAIÉKNÐTP×b\Þq`ä}hæ„lëŽqì’ré–së–qê”tì˜pì’sî•pî”rìŒrçhâr`Ô`X¶JP8I*C)D‘.F™2H 4H¦<J¨<I¨@P¬?M­<K²AL®CM°BM±@K­BL¯AM®AK¬BN®>I¬<H®=J²@H²BL³DL¬BJ¥;H/Eœ,JÞlùÌ¡ö»Žûº}ò®‚ú¼ƒî vçœxà„iÌqp×i[Ú€sÍ\XÄ\^³LUºPbÆTaÔhkÒihÐilÐceÂ`j¸LY´L]Â]iÄ_jÉcgÉcmÊfj½ZhÅ]e¼Yd²UoÈnÈiuÊdrÑelÔhoÕr~Ên|§Fi»`v®Sl{0Zj$WdTw2i‹M|v6_^FXEZDdN`F\D`N\L‘Wˆ–W‡›Pt{2]^HdPbR‰W‘\Šo*`y;tz@q]IJ >RNI?€[f3g|<k½‡Ú–€Élh¶_jÌŠß²¨ä¼«á¶¡Þ®™Ø°£×¨šÔ¦ Õª Ø° Ø¬¡Ø¬›Ø«Ô©›Ø± Ú¬›ÙªŸÚ°£Ú±¢Ú°£Þ´£Ý´¢Ûµ¥Ü¶¦Ú¸®Ü¹¬Ý½³Þ¾´ãÀ®Ü¾¦ÀŽv‚>H|<\‰Hk†=`•Uq”Nk¦j†´xЀ„ÝŠ€åˆuæŠrèxæŽwæxå|蕂Ⓜ蚋쪔îĤԣŒj!<P;W;[;`>cAf Cj Db?dAe>_:aA]=a>d;j?a9W<VFdL—PmÀpzÑ|zÌzxÉ{€ÉsxÑ€|Õ’Ù’„ÕƒÖ’†Ö”ƒ×”Ö”†Õ’€ÔŽ‚ÕƒÖŽ€ÓŒ„ӈςы‚ÑŽ„ÑŠ€ÐŠ…Ð‰Ðˆ‚ψ€Í…Ή~·‚Í…|χχ~ÌŠ|͇~Î…z͈˃z΂|Ë{µLX°MX±IT°FS°FQ²HU²HT­AM°CP³EK¸BI¾FKÁKMÐTQØ`Xâp\æzdéˆgìŽjì˜tì–pë•së”sê–rì–vî–nï”níŒpébáp\×`V¸LPž8F)DŽ)D•/Fœ7I¢6J¦:N¦?N§=L©?M©=K®@L®BN®BL±DN°BJ®DP®BM®@J¯AL¬AK°=I°CL²BL²CN±CM§<H¢3Gœ.MÊq[÷Ò°úÀ€ö´ŠúÁ‰ô®~ú¼‡ê˜p߇|å™t×rhâŽsÇ^]«AL¶P_Ã`gËdkÑknÎfmÔljÈen°JV«F\ÀWdÉ[cÈ`hÅ_jÆ[kÇ`iÃ_l¼Vd¼Wb»ly´dpœMhB_ªF]ÉasÖpxÊjv¤Dd’Bbv*Uh0^g$Zx8h„Ami"Pb$R[IWAh#Ph"OU :]FaPz;qšlšx.Z~4_dH\Gh X‰W‘”g“‡D|@{R‰TEO>i4vF @QJ„XŒƒKoÆ‹†ÚsÂdf¼hqÌ’–Þ­–廤⼨ܮ™ÙªœÖ¦›Ö¤žÔ©¤Ö®¤Ø­¢×«šÕ¨šÒ§žÔªšÔ§šÖ¨š×­œÖ¬¢Ù¯¤Ú±¤Þ¶©Þ¸§Ü´§Þ¶¨Üº­Ý»°ß¼ªá¼¢Ì¤ŠŒIG‚<YaI};X l‡”Sx—Tp˜Rvaz²|Ó‡†à„æŠwåxçtç‘vçvå{阃è¡î³•ðÄ Ó£Šv(=R;V<V 8Z8b?b>d<jCdBdAfE`:^;]:^<e?g@^EVB^Os*T¬]qÎ{~ÑzxÌwzÉxzÎ{yÓŠ€×’Ø’„Õ”Ö’€Ô‚Ö”ƒÕ“„Ö‚ÔÕ‚ÕŽ‚҆ӂЊ€Ð‹‚ЊЉӋЋ‚ЊЈ‚͊ψΆ€ÌˆÎ‰‚̇͆̆€É„Î…~΄|Í„Ìz²JW´JR´IR²IR²FQ³FP®EQ®EP³DK³EL³AJ¼BIÁILÐVTÚbWáo_æybç†lênë–pê–tì–ví–xì–rï•qî”pî”pìˆlégâr_ÖbYºNR 8IŽ(@+F’-Fœ7L¢;L¢<M¤?O¨=Jª=Qª>J¯@K¯@L«?K¯BM¬@K¬>K¬=I¬>J®BM¬AL±@G¶CH³DL·EK°FP«@I§9Ež/I´FKîÁ’öÆŸüÇö²ùÄ‹ò¢tã‡p袄ærߌuØmUÄ^\¬ATÂZcÊ\cÎ`fÐinÐkmÎce¿Xb°Rbº_lÄ[dÊ^gÈafÀZh¸ZhÁblÂ\g¾U`ÎhoÕlpÖjlÍhp¸[sJl<c¦Eb¸VkšFeq(Tg%Xl-`{;ky0bm*[f"Xb ReLj%Su/^X<R ;\Fp)`†IvˆH{cGiMdOcP€FˆP€y4m]–Š^Šf&ZMGnF|nC{:>b6lª|¢Ë™”Ïzm¼_f¼cnÑ””߸­à²¢äº¥ß¹©Ö® Ö¦œÑ¤ Ø¨œØªžÔª¢Ù¬œÖ©šÖªšÔ¦—Ô§™Ó¨›ÒžÓ¨ž×±¥×´¦Û´¦Ý²¢Û±¤Ü³¡Ý¸ªÝº¬Þ¹£Ù¸ ¶‚jf;fD‘Mfˆ?jy,R­wƒ¢k†˜Tp¡d™Rr±t‡Ì†’Ú‰†âˆ}è’{è”|ê“vê—|êšï¦’î²–ð¿œÙ©Šv)BR9V?Y;^=^<a?f>c?e?c?c?d?`<[7_>`=g>d@]E^IdKA`¸juÍy}ÌyyÊtxËvzÒ‚|Ö„Ø’€×’…Ø•€Ö“„Ô‘€Ö”וƒÕ‘Ô‘ÕŽ‚ÕÓƒÔ‘ÓŽÓŽ‚ÎŒ„ÒŒƒÒŠÑ‰ƒÎ†„̈€Î‰€ÏŠω~Έ|ψ€Ìˆ‚Ά̆Ê„€Íˆ‚Í„{Í…|É‚´FP°JR³GS±IT°HU²DQ®DP¯BN´BO²AL°DMµFG¾FIÍOO×]Qân\æzcê‡hê’në•rëšqì’sí–zë“qî”uî–rî•uìŽsç‚nâr`Öe^¾KPž6H&BŠ$@’,F˜0HŸ:M¢8H¤;J¦>Lª?N¬;H¬=I­>Lª>L¯CL®CL­=L­>J¬>H¯@K¬>K°>K´EL¹EK¼FN¶DH±DN­>H¢9LŸ6NÎv[÷زùÂúÍ—õ´|ê âˆlì~å „äkÀ_]¨8H¶NYÏddÉdlÄ[fÌ`bÊfm½R_­CV»boÅ`kÄ]fÉbfÀVbºYf¼UfÅ[f¾WeÊckÑflÖnqÕlsÇjt¦Hf-N˜Fb‰6_x,X{:ft8kn+^y3i„?|p1el*^f S^Lm'VŠNƒ^CQ
+<R >bR~>vx;bx4b^DbGbJh$R‰N~t*b€Cy‡No1gP Go>x‹xªQLEHiÆ“ŸÊ{s®ZeºftИšá¸¦ç½§ä¸¥á³¡Ý´¥Ø¯¢Ôª¢Ó¦™Ô© Ö©œÓ¥›Ö«žÔ©›Ñ¥•Ò¡–Ò£šÓ¤˜Ñ¦œÐ¤˜Ö¯¡Ø´©Ø´«Ü¹­Ú¶§ß³¢à¶¥Ü¶Ÿ×©’´fY€.ElEf?FbžWn8a’Ld­}“š[x£m‚ž\{¦gz¾‚’Ð{àˆ{è“yê“xì˜yìž„í¢ˆò²”ê½–»rXo&:U6S 5X7]:a:a=f?d@d@hDf Dc Ba?_@\=`8b=e;b?Z@_Jp(QŸPl¼pzÊwxÉx|ÆpvÌyzÖˆ}ؔؔוƒÕ‘€×—‚Ö‚Ö‘‚ב€Ö“…Õ’„ÕƒÓԂ҃уԊьЉƒÑŠÐ‹ÎŠÍ‰†ÒŠƒÏŠ€Ð‰~І̊ƒÎˆ~Έ€Ì‡|Ë„‚͈Ë}Ì„{Ë‚~»NZ¶KT³KU³KZ³IU³HR®IT®BN±AL®AL­EN´AI¼DJÌPQÕ]VÞm[æzbéŠmëpí–që”vì’wí•sì˜uï•xî˜mî”uìˆhè‚jäp_ÚbY¿MNž4FŒ(CŠ&C‘/H›5H 6I¤:L¤>L¥>M«;I¬?K®<L®>H®<K¯<I¬:G°@HµBK®<H°?J°AJ±@J·FN·DK¸ELºIJ²FN²DJª>F¢3H¯BMì¶„úÔ¯ú„öƓ짆à„mè˜zìŸxä~Íe_®BTÇ^aÐgjÌdkËbhÆ^fÃ[d´N\¶TgÈ\eÊ^fÄ`lÀ\lÂYh½WjÂ[jÂZmÌ\lÓltÍclÈ]fÀUe¬Nd”@bt'Vm"Pv"S{4dq6dm(Z?q|;ko0`e&Xb PTD\IŒR…<iS<S@UAl(Y‹Hzq1_cFX@cHs'Xs)Vv,^aLm-]‚P‚ZNSKŽ«€bŽG Ej0lµˆ¡ª^r«\t¿lzÕ£ â´¨æº§ãº©à·¦Ý´¤Ú³¦×²¦Ö«ŸÔ¬ ÖªžÓ¨£Ñ«£Õ¬ŸÒ£›Ô£”Ñœ’У˜Ô¨žÔ©œÐ¦š×¯¢ÙµªØ·©Úµ©Ý´£Ü²¤Ý­žÚ¦œÕ“Œ±YfŽ2OŒ8Vˆ<^}.OJd–SlŠDlš_rªvŽœ\s¤iƒ˜Rq»|‰Ë€ˆÞ‚zæŽzë’|ë—‚î Šòºšç´ŠžTN\6W:V<T
+6Y:`9b;b<h<i Eg!Gh"Ci$Dc@`<^<\8_9e<e>aC\Fi"N‚4Xª^oÀrvÁnuÁpv¿ltÐ~|ׂ֓„Ø•†Ù”Õ”‚×”€×”ƒÔ•ƒÔ‘‚Ô€ÔƒÔ‘‚Õ’„Ó„ÓÔ€ÔŒÓЋ„Ò‹€ÏŠ€Ò‰€Ð‹ƒÎ‡„΋€ÏŠ~Έ~Έ€Í‰€Î‡ƒÌ…΃xÌ…€ËƒzÌ‚|΄}²HS´JT´JW±NX²MVµIR±GR­ES­@L¯AI²>H±>H¸CIÉPNÕ^SÝm^å{dè…lëpì•vê˜tí”yï–qì˜uíšuì—sï”qí‰lçiâubÖ`X¾JMž4G+D(B”0H—5G 0F¤8J¦<J¥<Mª>M¬AP®AO­@K°@L®>J°=I°>J°?N®<I²>I®@L°AL¶EO·FM¸HOµHL¶HN´CK¯DJ¨;H 2JÄaPöÆ”øÐ¥úÆŽó®€ç¦‰ç˜{쨂è¥{Ãg`¼cfÎ`aËagÈdlÌdoÃZg´Rb²TfÄ\fÌbgÍflÇalÃ`o¿Zq¾\r»UlÀ]xÎjxÍar¶Sp­Ss®Tr°Xr¨KlžHn‘<fˆ9gp&Xk$Y}6j‚Aoe%RYITBSC\Sz3l˜f‘t2\R :S ?YDo+]ŠIxr0^]?`BjLdDgNaNYIt?k~IO IX UŽ~®l6fˆKz–i˜Ro Oh§iŒÕ«ªå¸žä»¦â·¨Þ¶¨Ý¸¬Ú°¢×¯¥Õ¬¡Ó«žÐ§›Ñ¤šÕª¡Õ«žÔ­ Ñ¤˜Î˜ŽÎš“Ó§šÖ«¢Ó¨žÑ«¡Ô±¨×²§Õ°¦Ø²¦ØªÚ¥žå°§ê±¨è¢›Ø‚¬BUŒ-OŽ9P•Pt„<^ŒDb‹GeŠCgªt„¡f€ b}›Lm°l„ËŒÝè•‚í ˆð­”ñº’ᣉ8CX<W;Z;Y=[ 5_;c;i>f=c=jBfAk Dg=\<Z<Z<^=]:f>dD_Gj$Qt(S’Hd´hr¼ox¹ls¸jtÁptц‚Ø–‚×—…Ù’Ú”Õ’‚Ö’„Ô~Ô‚Ö‚ÓŽ€ÕŽÖƒÖ€Ô‘„Õ†ÖŽ€ÔÓ€ÒÒŒ…ÑŒ†ÑŽ†ÐŒ…Ћ‚ÑŠ‚ÐŒ~ЉЉ€ÎŠ~̈~ˈˆ‚Ά|Ì„}È~Ë‚|´N\´MR³LV´KU±EO±FR®HR°EO²CL®CP¯>H°>H·@EÇLLÔ\SÞjZãzhè…jëŽtìtì—xì–zî˜xî™vï˜vì—rî“rîoéhâs`ÖbXÀMQœ3HŒ)Cˆ(B.F™/Bœ5H¡:J£<M¦8I«BO©>OªBN®?L«?I®>L°BN°AN±?M®>J¯>N±BL¶DO¸DL·GL¸JP¹FL¹HL·FM±DN«?I£8L 5Rß•lúÒ¦øÄ’ò²ƒö·„彩ð®~Ú}o´=FÙybË^[ÆVZÈaiÆbp¾[g·ViÀXeÃ_hÈdq¿]w¿n‡»gŒ¼_ˆ·a‘¶V†°\†¼^Æb~Èa}Æ^oÃUg¡B[•=X›<b4^~.^t.d‚:nŠIt|@mb QUFQCS C_O‚<q˜h’dGQ<YCaMn*\v.Zq.[bHcFr$QbEbHaOUGzFz{;zTLuI†Œl£›`Žº”œlœ„6[®b~»‘¦â± äµ¢á¶¥Þµ¥Þ·§Û´ªÙ¬ Ö¬¡ÔªœÒ¤šÑ¨žÔ¨˜Ô¨›Öª Ô¥˜Ïž’Π™Ð¡œÑ«£Ô¯¦Ô¬¡Ñ«¢Õ±©Ô®¤Õª Û§žâ¦¢ç®ªé°ªé³¨æ«Ÿà‘ŠÉ]e¥>Pˆ2]‚4Tžb€–Rt“Lr–Sw `v¬z¥f~¤d‚¯k€Ç€ŒÜ‰‹î¤ò¶–ð¶‹Èv]€*>Z:X?X:X;^<a:e>i@h?f<d:gBe>h>f@^=]=`?^8b>e>dD`Hi$O4XŸWjºpu¼ns¯fr¯dpÄv|ÒŽ‚ו„Ø–†×—„Ö•‚Ó’„Ô‚ÔŽ„Õ‘~Ô‘€ÔŽƒÖƒØ‚×’Õ‘‚ÕŽÓ‘‚ÓŽ„ЌЃЂϊ‚ЈƒÐ‹Ï„ÑŽƒÐŒ~Јщ€Ì‰„ÍŠ‚˅̉€Î†~Ë…}ˇ€Î„~µKV´JT´LV²KW°FR²HT°EQ­CN®?N¯DM¬>G®<Jµ?FÃILÑ\UÜk\ät`èˆoì’pì•wë–tî–zí–yî•rîštðœtð”sïlè€hâtb×`T¿JR 6H*F‰(B’,D˜0D›4FŸ7J¤=L¨:L¨CR¨@N§?O¬>K¬<L¬?M°@L®<J®BK³CO®BO¬CO±EO¶EO·FL¼INÀHP¾LRºJP³JP²FJ¨8EŸ4N¼ONóÈž÷¾•ô·ˆó´‚ó°‚ñ¶ŠÍf`ÇghÙoaÆU[ÂPWÁ\d¾WaµNb½`lÇ`qÊ`jÈev¹]v¼^qºXl²Rl´^|²e†³^†¥U‚ŸQ{¦_ˆªR~ Bl~1]r%V‰6f{,[ˆ:j†:hŽDu‰H|p,\aN\KZFTFbOˆN€ŽU\CVCeH~Ev^A`HcHgRm!V€9m„>z€>z€>}p%c†XŽw8sk)lƒR‰†P¡_ƒ©l•˜]Ÿ`†Ê¦ËžªÞ°œâ°œÜ®¢Ý¯ Û´§Õ°¤Ø¬žÔ©žÓ§›Ò¦žÓ§žÑ¥žÔ§˜Ò¢•Ò •Ò¢–Ò¢šÔŸ•Ò¬¥Õ¬¤Õ¬¢Ñª¥Õ¬§Úª¥æ®¤éª ê¯¨ë´ªë¹®ê¹¬æ®¢â “×qr¼HUœ:N7^ˆ<T¢l„“Kf†?`€:Z¬pƒ©rˆ«pŠ®jƒÆ|Þ•’꧉ߑq§JIi9];Z>Y>Z>]<[8f>j>f>j!Fh@eAf@f>k?b<\;Z:b=`:f<f?^Ec"Lo)U?^­dp¸qr´iq®dn´kxÊ€}Ô“ŒÖ–ˆØ˜„Ö˜‡Ô”„ÔÖ€Ô’Ô’‚Ô’„ÔƒÖ‘ƒÖ’‚ÔŽÕŽƒÓŽÔÒŽ€ÐŽƒÐ‚ÑŒ„ЂҌ‚҃ы‚Ћ€ÏˆÏŠ‚Îˆ€Ê‹~ˈ€Ìˆ‚Ë…}̈Ì„zÌ„zÌ|¶LR´LX´NW´KS¯FT´CP¶FL±GP°DP¬@Kª>M«;E°:H¿JMÐ]YÝl[âzgç‡nêŒoë–xê˜tî”ví—vëšxï—vð˜wð–pðŽmè€hâvb×f[»LN 5FŒ&AŒ$?’+F—2Ež4F¡8H£9I¦;Lª>L©=J«?N¬@L¬<J®>J¯?I­BL­@M°>K°?L®@K¶EP¸EPºHQÀJRºLPºPT¼JP¶KO¸EK­BJ¤8K¤6Qè©„öز÷Á˜ôµ„ô´€ò»ŽÖxhÛu^Ì\[ÅY`¾S_½V]°IY°L_½YeÆajÄcqÂ^n¶Oe´Rb°I`Â]rÍbrÏ`pË\sÊbvÂj~§Xq’Fop%Yp%V|/dr&X„0aƒ0`‚9gŒFt|;km.Zd"PdLXJVGj$X|7j€Ani"Tf Rn(Sp2^[?^>aB^@` EaCi"Lf$N^FbJ|Auj)Zn,dyAt}A€†L‡¤z¨š^ŒÄš£ÆœÒ¢ Ûª–ܦ”ؤ˜Ö¬ŸØ°¢Ô¬ Ô¤šÑ”ÒžŽÒŸ˜Ñ¥Òž’Ñž“Ó¡“Ò¤šÓ¢šÒ¢™Ñ¡šÑ¤Ï¥ Ò¦¢Úª¤ä²¬è±ªê°¦ê±¨ì´©ë´­ï¶¯ï¼¯í¶¨ë°¡ãŒ„Ô^b²;J’7Ox-Xn,Hœa€„:[~8b€Aa®vŒªq‰¯p‰¾}ŽÖЉÐqn¶XX€)F[ :Z =X;ZAY>]>a>f9g=h>j@h>j@i Bj@h A^9Z9[;`=b=g>bA^Cf Ly/VšOg¶jq¸ow°fs®hsºmtΈ€Ö–†Ö™…Ô”„Õ—„Ô•†Ô”ƒÕƒÔ”†Ó‘€Ô€Ô“Ö’Ö‚Ô~ÓŽ€Ó‚Ó’ƒÒƒÒƒÒŽ„Ò„Ñ€ÓŽ€ÐƒÏŒ„Ί~Ћ‚Ћ|щ‚ω͈}΋€Í„~Ά{Ì„zÊ}É…‚µHT¶MVµLV³GS³FR´HP±DP°CL¯BP¬@K¨>J§<H®;E»JMÎ[XÚjZà{hå‚fèrê’uë—vìšqê—xì–yïšwî˜zð•sîqé‚kârbØcWÂOQ¢6FŒ&A‡%A‘+C•1D–2F :L¡6H¨?L­=L¯?K­>L«>N¬?L¬>L®:H¬>J­@N¯@K°@M²BL²BPºDM»HP½GN¾JR¾PV¼MVºJQ·HO²BI¥;N¤5Pá—zúÑ™úÒ¤öÀŒöº†ö´‹æ›rÐibÂXbÆ^dÂ^jµJ^¬DX¶Rh¿`pÈ^kÁ]l¹Wj·Sj¸QeÆfnÑfnÑboÄ[q®FgªH`Ìgs¾\sžDjw*So,Zo*]dPi!Un!Vz1d…Ap~7gq*\g!VZFXO`Pp-bl$Zx4k‚?v>rh#NZB]@^<]=^>_C_C\>Z=T=R@l-ar*ZˆFtz;nm/nzK„”]‹™l–±~¾Š‘Ô¡˜Ú¥œÖž•Õ¥žÕ¨žÕ«žÒ¢—Ξ”Λ”Í–’Ò¦žÑ —Ï ™Ò¢›Ô£›Ó¡•Ò ™Ï¢šÏ¤—Ìš“Ϥ¡á°¨ê¶©í·¬ë°¨ê²©ì°ªíµ«ìµ¬ì´ªî¶­ï¶©î´žèœŽÝnj¿FO£6K-LkOr0JNo‹@ct'R f}¬nˆ´x޼}É€„Âhm´QSŠ(BY<X 9X>V;Z=^<gAh@h>jAl DjCg@jBe?a=^:Z8Z6`<l<f>_@`Hp'S‰=_«dr»qt·lp­an±doÁtw͈‚Ö”„וƒÔ”ƒÕ˜„Õ”‚Ö“Õ’ƒÑ˜ƒÑ‚Ô’€Ò‘‚Ô’‚Ô“…Ó’€Õ€ÔŽ„Ô€Ô‚Ò„ÒŽƒÓƒÓŽƒÔ€ÒŽ€Ð‹ƒÑŠ~Ј}ЉЊ‚ψЈ|·~͆|ω{Ì‚{ʃ΄z¶IV²JR´HU´HO±HW´FP®FR°BN°BK®BMª=I¦7F¨;J¸IMÎXSÚj[ávcæ€héŽoì’uë–wí’tî”wê—xîšví˜xî˜tðnèmãtcÙbXÂNM4GŒ(BŠ&@”0G“,Dœ1F£<N¢:N¨:K­BO°AL­AM¬BP®BO­BPª@P¯?M°AMª>K¯@O¶CK²DN·HP´DN¼MTÁQS¼NT¼KQ¸LO¼KJ´DL£:N¦2Nß–€ùÓ®üΖøÈšø¼Žôµzä†`ÆX\ÄZ]ÄZcÆ\d¸Td´RkÄ\g¿\e½Ta¶Tj·RgÉ`pÐlpØhjÒafË\hÆ\m«He¡Og²Sc£?Z•5X.Wt,dh#PcL[K^Ln+Z{4gx.dq(^y4hp)\d!U^Jw4m^Ot8pT†ŒM~k$UW:[=^BZ>]>^B^HZ@T<SATBx6hœQx Liˆ:gx9}Žo¦£~œ¸“£Ì™“×›ŒØ£›Ö¤œÖ¦ Ö¯¤×­¡Ò¦œÐ–ҙ͗”ÏŸ–Ñ¡šÒ£œÓ¦œÔ¦šÑ–Ïš‘Ò£šÍ¡›Ë—˜Ô Ÿç­¨ì°¥ìµ«îº¯ë´¨ì¶­ìµ©ì¹®îµ®ì¶¬î¸¨ðº¦ï±¢ìžàvrÈMU°<JŒ.Kl!HTCu4RŽGe|.]ˆA`©pŒ³p‡¾‚’Ë…ŠÊps²NVv>`>T;X :X:]=b<hAlCi@h<lEl An&Gl$Hf@`<^=Y;[7\7e;b<ZDcLv*S˜Nhµgtºpt²jq²bk¸ktÈ~}Љ‚Ó’Õ”„Ö–‚Ö”„Ö“‚Õ•‚Ô–…Ô‘‚ÓƒÔ‘ÓÔ‘€Õ„Ô’Õ“}Ó„ÒŽ‚Ò‚Ó‘ƒÒ‰€ÒŒ†ÑŒ‡ÔŽ…Ñ…Ñ‹ƒÐŽ}ЈЊ~Ñ‹~ÑŠ~Ћ~ωΈ}Ά|Ά|̆~Ì‚~²L\²HR°HW´HT°FS°JS±HP®>I±@I®?K©:E¥7F¦<N¸LQÌZVÛh]âvbã‚qèŒjì•xë˜vì–vì™xîšzï˜xî˜yï˜xíoèläudØf^ÀPP¡4H(FŠ%E’-H–4I7M¢>N¢8K§BQ«>N¬@P¬?N®DR­EP®AN¬AN®?M²?L¬<L¯<K¯@NµAN¸FRºHO¿LTÃMU¿MT½PV¼MP¼FK²DM¬=M©3Nà’zûÔ¢ýÌ›ûПúÈŽñ¨sÊRNºFPÀX_Ä^i¸Vf»VfÃ\fÆ\d¶We¬E^¸NbÉanÔjqÔejÒdjÒcgÄap¸QdŸ;X’1L•7P|Ht"M‡Btq-[^EXBUBWFaKz7lu,\q/a{:f>qa QXHu3d^Kd$YŠRŠ™g”|:kV<\@]>^BZ@V9XBS<M 7P@l'U”Np®_tŽ/R˜Bg˜_š¬ˆ¬·{Õ¦Û¨˜Ú£–ب›Øª¢Ù°§Ù±¤ÖªŸÒ£˜Î˜ŽÍ‹Í˜”Κ’Òœ—Ó¦ Ó§žÑ ”ÊŠÊ’‰Í›–ј–á¤šê®¤ì¶§ìµ§ìº¯í¾¯ì¶«ë¶ªë´¨ì·¯ì·°ð»°î¶¨î¶«ï± ëž‘å‡|ÒX\´9G’,Ir C^AZ>ŠHb€4Z~5\§l„¯nˆ»u…΃Êtw¬HRl@\>X<\<Z>^=d;d;j Af@k!Ej>iAl#DiBd>_<`A[9\:b<g>_>ZAbJ‚=`©aq¼rv¼pv³kp¯fn¸tvȇ€ÑŒ€Ó€Ó‘‚Ö”€Ö“€Ö’Õ}Ô“‚Ó‘Õ“|Ó€Ò|Ö~ÔÔ‘Ô~Ö“ƒÔ’€Ô~Ó€Ô€ÓŽ‚ІЂы҆ҊƒÒŠ€ÑŽ‚Ò‹‚Òˆ}Њш}χ€Í‡Ì‚zÍzË€}¯KV²LV±ES±KX°JV´JS¯GQ­FS¬BL«BN¨;F£;G¨8H¶EMË[[Øhbãu`ç‚mèŒtê’vê“wê˜vì—wíšvï—vî˜xð–qîŒnè€låtfÙ`V¾MQ¤2F*D*E'B™4Hž7N£=P¦<MªAQ®BN®>N®AN°CQ®@N¯@O°CR¯@K±?M´AL³@M³BL¶FP·FQ¸HP¿MRÄRVÀSV¾PY½NS½LO¶JR¯<Mª4NߌrûÒ¨üТýÊŠ÷Ì›á}Z¶EN¹CMÂVZ¾V_ºT`¾YeÆ\d»\m²K^´NdÆ\kÒflÐdmÎelÐdhÁTfÂWdº^oƒ*Jd:mD\Bn.Yy4]dDWAZCWCYHYJx7f~>jl$Yv2b‚Bs^ NVJu4icRUKv=w’b“šil'Z^G[@Y?Y=V;P :N 6L?h!M¨c|¥ZtšKo›?b¼€šµˆ¤ª‚¤¾Š£Ø¬¡Ù§–Û§›Ø¬šÚ²§Ú³¨×°£Ô¨šÐ—’Ê’ŠÍŽ‡Ì”Ì™”ÏŸšÓ«£Ò¤žË——LjˆÏ’“Þ›ç£™è©œê®¡ì´¬ì¶¨î¸¨ï¼¯ï¹¬ì¹­íµ«í·¬ð¸«ð¼®ï½¯ò»­ï¸¨í¨œçŠ‚ÒZZ¸;Jž2JtA]>YBn!F†9\y.W›\r¶x‘»‚•Ê‚‰Çy‚.IeA^E]B^>]>_;e;j?l?k!DlEj Gp,Mm#Hl&Hc@^<]9_:`;f:c;Y<[Hk!L“Id¶jrÃvu·qs°fp¹kpÅ|}͆̅€Ð†|ÒÕ”€Ö”„Ö“‚Õ“€Ö•„Ö”‚Ó’Ò‘€ÐŽÒŽ€ÓŽ€Ô‚ÒƒÔ‚ÔÖ€Ô’ÒŽÒ‚ÐŒ„Ô‹‚ÒŽ‚ÒŠ„ЉЊ‚ÑŠ‚щ€ÐŠ‚ÑŒ€Ñˆ~·€Ò†zЇw΄~Ê€®HX±JV°KU¯LZ®JV®EP­FT®CP®AN­AK§?H¤;Hª:H¸JOËZZÙi^áwfæiè‹vê“|ì”tì—wì˜yí˜{î–yí”wî•yíŒpè„nâtcÙe[¾OP¡;I-H&@”*B—1E›5F¤@V¥<N«>N¬@N®CP°AQ°BR²DP°CR­@R°@L­>L°BN°BP±EQµHUºFN¹JR½JSÁLS¿NTÀNWÂNP¾JR»KN´@M¬8TÒr]úÖ³þÑ™ûÍ”ø¹‚ÉXN´DN¹FMºKS¼RX»P[»S^¾Te·J[ºRaÌ]cÌ\iÉZgÂOYËelÊVc¼Rf¾\n¶\n<^V@Z@hJ}=llI]@ZB[>X>\HXFd$R~9jx.ho%Xz9fr.bWIq:gh VQ B\RyCv¢wª‹Iƒ…Bx‚ExdQN
+8N:I
+8H ?^F¬lzÐ}z FfœJkÄ‚–ÙÀ…“škšºŽ¦Û°£Ö£–Ùª›Ù´§Ü¶«Þ¸¤Ø¨žÒ›’ϔʑŽÎ‰Ì“’КҨ ÔªžÒž”цێˆæ•è˜ê¦ê¨é°¢ì¶§ì²¢ì¶¨ð½®í»®îºªïº«ð¼°ñ¾±ò¿³ñ¿³ñÀ±ñ½®î°œê”ŠÙmj»ALŸ7JzBT=ZC^<…:Xx)TGf¼„˜Â‘Ï”Áuy&Fa@[>X@]:^>`?d:j<k>i@jAn%Fl"Dm#DkB^8^;]:]8b7d?_=\DbJ|2T£\m¼uvÀut·nq¶jpÁzyÉ‚|ʆzÉzÈ‚x͆ΊÒ~Õ‘ƒÔ•Õ•„Ô”„Ô”ƒÓ’„Ò€Ò’}ÓÒŽÕŽ€ÔŒ‚Ô’Ö’‚Õ’„Ô„Ôуҋ~Ò‹ϊЉ‚Ò‹~Ñ~΋€Ð‰~Ï‹~Έ€Íˆ€Ðˆz΄xЄ{Ì~z®DQ®HV²IU°FR®HR¬DO®DR°BN¯EN«ERª@K¨7F¨=HºIPÍ[X×i]àxhã‚pèxì’tì™wë–ví”sí›xî—xî˜wï”wívè‚jâ{nÚc\¼PY <M*EŽ">—/E˜1F5G¢<O¦:Kª>O®?N«BN²BN²AK²@N´EO°BP®@O°BO°AO²BPµEN¶EP·EMºKQ¹IRÀLTÂSXÇTXÃMP¾LR¿JQ¸DL±>RÀOTöÈœþÑžüÔœè›s¼FM¸KRºKQ²KWµLV»U\¶R[ºVd¹XgÉ`iÌ\aÍaiÊbg©BX¶Rg¸Od¶Pc®G_£C\‚7V[A^D7b„@i]>X<X<aE`?_EZFYHf"L|9np)Zo*Zn,\d"Vs<ml'[^O^Xk._ž~¨o)VbDd FZ?O:G 6G:X@§n|Ôx¾Td²Tkƈ–ܦšÚŽ‡ÎŒœm›ª‚¦Ð«¬Õ£¡ØœÝ¶«Ú¯ŸÜ¬ŸÒ¢›Ð˜–Ì’“É‘‘ÉÊ—”ЧžÖ®¡Ò™ŠÕ„€àˆ„å’‹è–æŸ•ê¨›ì­¤ì±¥ì¶¨ì´©í¹ªï¾²í½±îºªð¼°ð¿±òÀ²ôǹòǺôÅ´ôĶð¶£î á{xÆKP¬8IŠ&A^ :T?[Bu%H0^‚2Y»”‡–Жœ¼q}x D_<`>Y:b@_=b<e@f@i@h?j CiDp'Fo&Je=\:\8]7]8d9e>\=ZEj!PB\²iuÂxv¾qp·jq»ptÇ‚€Î‡~Ë…~Ê‚yÊyÌ„{ǃ}͆€Ï‰~ÑŠ€Ò‚Ô’ÐŽ€Ò€Ó€Õ|ÖŽ~Ó€ÒŽ€ÓŽ~Ô~Ô€Õ“€Ó€ÔŒ„ть€ÑŒƒÎŒ†ÑˆÐ‰Њ„ÑŠ~Ћ~ÒŒ{ψ~ψzЇ}Єy΃|Ìy®EP®HT°FT°IU®HT²GS°FQ®EQ«@K©DP©@N¨:I¤<NµJRÊ[\Ùj]áwbäƒmé‹sêtì–vì–wí–{ì˜zî›vì—yï™qî‘pæ„hãveÚf`ÃRV¢:OŽ,H‹(B’.H˜.DŸ4G¢?M¤=L¨>N­BO¬DQ®CO®CP³DR²DR²AM°EN°DP¯CR²BU¶DNµCOµ?M¹GRÄmuÂRUÁPUÄRUÀNTÆPRÄLQºIM²>O¶>Qì¬þÖ«üÓœÞj»PZ½LPÂSX¿S[¹OZ¼PX²FU´L`ÐhjÌbdÆXdÆT^¸T_’7Wƒ0QŒ9\‰0T}$L‚/Q‚3VkJ_G†Do{1^X<V=X>k!P_Aj"N\A\E\Bl)Ts/`h"Qg#Wm(^{<nj$Yf,^]Vc$V˜u›s.`V>S 8M 8K 5E5O>šZiÖŽxÀSa»SgΉ“߬žÛ”ˆÚ†ÕŽŒ©v™¥„©¸›«Ä–£Ôœ›Û´©Ô¤™Õ¨¢Ò›—Ñ›šÐœ•Ì–“Í••Ïœ˜Õ¦˜ÒŠÖ{zãˆäŽ„æ˜Šèè¦›ê¨œì²¦ì²¥î»°ì¶§ì¼¯ï¼¬î¼¯í¼¬ð»­ð¾±óöõôóÆ·ôdzõÇ´ò¾«ï¬šæ…{ÏV[´<H˜*Ai9P ;X<hAz.Rt&T©fzÆŠ—Øœ™¿z†v"HbCh@_?h>n?j"EeAfCh?hDl Do)Jl%Dl @b>^9]>_<c>g=c@W>\Fr(PVh¼u{Æzt¼np´dpÂ{v̈ЎΆʆ€Èƒ|Í„~Æ‚~É~È‚€Ì„€Î…~χ΋‚ÑŽ}ÐŒ€ÐŒ|Ô~ÒÓŒ€ÓŽ€Ó~Ó’‚Ò…Ó’†ÒŒ‚ÐЂԌ‚ушЋ€Ñ‰€Ðˆ~щ|Òˆ|Ї{Ñ…{І|Ï‚|΂|Í|µBQ°IT°IT±CN°HT°GS°DN®DL­DN«ANª>L§<I¦=JµHNÌXVÛjaávdä†qæ‹sê•tì–uì—wì˜tì™~í›vì˜zï—vð’nè‚nãvf×d\ÀPW¤<N-F*D’+B˜0F¢0E¡9I¢<Lª?Q«CQ¯FT²DR±ET±IX²DS±GR´AN±@R®CP°@N°@Lµ?P¶HXÈvpÞ„hÄJO¿MTÃSXÆOPÆONÀLP¾JM·FS²DZÔu^üܶûÓ Ùn[ÃOR½NVÃXYÄW\¸Q^°@R´L`ÊgiÎahÈ]gÎgp·Ul™Fh—Bb€2Y€5SŠ2Rh<u!Ez!F[D`FŽT„l$MVAU@^Jr*VaIgKYB_DfL\FbKi&UaQdSCw„C|‚J~aR]PVv„L}R;N 9L
+6I <M>NgÒ‰yÂaj³QeЃ’Ý¢–ÜšŽØ”‹Ø‘‰Ó”•­{˜³¤Ì¦¡¿— Ò©¨×¶¬×³¨Ö¦ŸÐ—”И˜Òœ–Μ˜Ò“Л”Ó…|Ùyzá‰†ä‡æ’‡å˜‘èŸ•ê¬žê¦šî²£í²¡î·©î¶§î»¬ðº®í¼®î»®ð¿°ñÄ´óĸóõóǺõÅ´öɸôÀ¬ñ°žê“ŠÙbd¾AJž,Dr?V@T:] :jE}+\ŠC`À„–Ò•˜ÃƒŠ†7Y`HdDZ>`Af>h=fBi BkEl"Jl"Fp(In'HiAaAZ<\8b<d?iAbAU@XCy2V¨cqÄxvÄspºlp¼otÈ‚~ÔŽ€ÒŽΉ~Ì„}Ê„{É„~Ê|Ê}È€~ÉzÈ‚~È€|Ê‚}Ë…|Ì„~ÎŒ|Ò‹}ÐŒ€ÓŒÒŒ‚ÐŽ‚Ò}ÑŽÐŽƒÑ‚ЅЋƒÐŒ‚Љ€ÑƒÐˆ~χ€Ò‰|Ò†zΈ{φц}Ò†‚Ά~̓΀|²HP´MR±IS¯LS®FS¯EP®EP«FQªBO§EQª>J¥:I©;H¶JPÌYVÙj`âtfäƒkèŽsê•vê–zì–xí™zí˜yí˜uì™uí–wí’pæ„kàtbØd]·QX¤?S,IŠ*F–.F™2F£6H¨<N¨AQ­DV®BR²CS°GS°IW´JTµDQ°FS²DN°@N°BN°@Q°BR¶@OÆZYÕo\ÂPTÄNRÄSXÀOTÄQWÆNQÂSXÃRX¼LQ·JW¾T`ñº‡ù×®Ù~nÄMPÄQSÃQVÅQS¾T`¸MZÆ]dÈX^Ë_cË^kÖetÑco¾R_³J`£Fb‘8ZkG^BbCX?V?eI‹P~dCU<ZCt0\j MbKdKYB`IhMYC^Fk&VaLbRt3f’R†—b’v2nbVl5\‘g”VCF4D
+9J?ˆE^ЊvÎgk½XgÎ}ˆÝ¨›Ý™ŠÝ¨žØœ•Ö’‘Ô™°š¹Œ¢ÕÑ —Ö¬¥Û¶«Ù¶«Ó¢šÐ˜’К–М’ΗŽÐ‘Õ}zÞzwâ~yæ‹„æˆæ’‹æœ•é¦šê°¢ê®£ì¯¢í´­îº¬îº©î¸¬ï¾°ï½°î¼®ð¾¯òŵòŹôÆ·õŸõɸöÇ·ôÀ±ô¶¦íœŽÞmmÆDLŸ.D:Z;M6V :aA€4`z-SÃŒ Ð“”Ä‚„‘@[bH`B^&M^@f@g@hBjCj#Eg Dm#Hq*Jn$Eh>c>Z9]:`:f;j=[=WAaFŒCa¸lqÅyvÀtx»hjÄzxω€Ò•€ÏΌ͉z̈~̆É…}ɆȃÈ…€È€zÈzÉ|ʆ€È‚|Ʉ˄}Ë„yË„|ψÐŒ€ÑŒ{ҎҀЄЂцЀϊƒÒ‰ÍŠ„Ïˆ|Љ{ψ~ІzÏ…|Ï…~Ï…z΂yÐÎ|t¶FR³HT±IS¯FR®EQ°>N°BL®BNªBL¨AP¦:H¤9K©BN¹HMÌXQØh[àtbäjèŒvë”vê”uìšvíšyìšvî—vìšwî›wîræ‚nâu`Öh^»TZ¢>TŒ1PŽ-L˜3O˜4NŸ7O¦?R¨AT²ET±CR°CQ³CP²FP²AQ²DP±BN²BN²@N±@L²DPµDN¸HQÃPSÁPWÂNV¿RYÄPWÈTVÅRVÅUZÂQVÄRTÃRV¼KT¶GZÜ|bùÜ®æ›ÇTSÃST¾OS¹FQÄV\Ñ\\Ê\aÅ_hÆV_ÍhrÊ^pÄU^®DX¨B]€!IkD`Ij$PeGP<VAZA…JtcHZDaJcH\Bj&S^C]FeHaLZFh#TdNVDj%U‚>x‚?zw3c„F€~:€‚C~¢Ž°r2^D6E ;{5UÇ€xÌi`»WjÅsޫᖇ࢔ި—Ù¦¡Ù¡šÖ¥¢±† ²ˆ Ï“ŒÔš’Û°¦Þ¾µÚ¸ªÒš–ј“њ̌΋Ø|zß{zâƒä†‚æ†å“‰æ’ˆèœ“ê¥™ë¬ í°¦î²¨ì²§íµ¨ï¹¨ï»­ð¾®ðÀ¯ð¾¬ñ½­ñòòŸôƵôƺöȶõȹõÅ´ôºªî£–âyqÅHP§4Fˆ!@`:J 8S ;YAz0Wu&T‰šÐ“•€ˆ™AXl H`F^DbDl FeAhBl$Hl En#Fq$Iq(Hl%DfA]<^=]=b:d<b>V>YDt*TŸ\pÀvwÆxu½pr¿knÈ}}ÒÖ”€Ô”‚ӀЌΊ}͈zÌ„|ǃ|ȃ|Æ‚|È‚}Å~}Â|Æ€|Æ‚{ƃyÆzÃ~{Ã|yÆ‚}È‚}Ê„|ω€ÎŒƒÐ„ÎŒ„ΌΉ‚Έ‚ψ|Ί~Έ}φ|φ}Í„|χ~΄|Ë‚{Ð~wÒ€yÑ}t´HR²JS±HS²HR®DP­>N¯AO¯EP©?J¦=K¦<J¦:J¨<HµHNÆXYÖdYàtcâiéwê‘zê–xì˜zìœyëœvì™yë˜uî—zî‘tæƒqâug×f^»RY¢<Q0M/N–4Pš<PŸ>P¤?P¦<OªAS­AN«FR¯DS°CR±DOµDN°DR±CO´DO²EQ³CQ´FT¹EPÂMRÂ\eÂMQ½NVÂQVÈXXÆTWÊSVÆTWÉRRÆR[ÄPQÂKTÉV\ñ½ˆì¬ÎXTÆXWÂNN¿GRÎdbÍ\aÈ\cÐdhÌctÉZdÅ\hª<R¡>R…(IdMaNq/`€<lU<P:RAWFw6ez8lr)Tj"TZDf$Pg"R\C`IfLaG`Hr0]dNU C^F‡K‰o._u-`n*^VH^F™|›m+`B:n&LÀvsÎqn»UbÀevס”ß—„ß—Œß£ŽÞ±¦Ü®£Ø¥¢Õ§¤°ƒ ®|’Í“˜Óž–ܵ­Ý¼²Ó¦›Ð”ώΓŽÑ‘ŒÛ{zá€}á~ãŠ†æˆæ”‰ç•蔎霎ë¦ëªœì³¦î´¤ìµ©í¶ªî¹­îºªð¿°ï¾°ïÀ°òÁ±ò´òĶôÅ·ôȶõȹöɺöȶôÁ«ï¬žä€{ÏTX´<J–$?j:P;Q<UAfE|,Z°j~Ж˜Æ…Ž¢Ldp#JZA[@dBlCh DeEf!Fj$FiCq#Fq(Fj!DfC_>^<c=d@e>aAX@`EŒC_´l{È{zÂut¹nr¿qsË„{Ñ”„Õ”ƒÒ’~Ò“ƒÑ’Ï’‚ÏÍŠ}ɉlj~Ȇ|ʈ{Ç…€È„~È„|Å„}Ã}€zÃ~}À~€}ÀzÂ|zÈ~{Æ„€Ê„~Ɇʇ„ÍŠ‚ΊƒÎ„~Ά}ʃ|ˆ{Í…{͆zÌ…z̈ЃzуzÑ{Ð}z¯EQ±IT±FQ¯FR«ES®CR°AL®CQª=J§<L¦;H¨6F¦3H´FKÆZ[Üf[àvfå‚lèŽvë”|êšwê™vìšxêœ|íšwí™zî–oìŽoæ„nàtgÕic½OT¤<PŽ+L.J”.L›<R :O¤>Q§>P«@R¬BP°ES²BO±ES³CO¶FO´FT±FT±FR²FQ´BP´EP·GQºIRÌ^\ÃMTÃUZÄPVËUXÉUXËUYÇWZÅYZÇTZÅPUÆQWÈT\à}`í¡vÐZRÅTX¼ERÐ_cÔb`ÔfkË`nÍbrÍhyÀex¶M_™6Qm@bI`Ra!P‚Jzw6gV ?T?ZDWDbH…Hrk NVD]Fz<ifL_DbEbIZEk*Zl&Sk$VZF`Jl.\YEz6j`JL =N
+@wQpŠRŽbF³nrÒvl¾U_ÃenÓ—›ÛŒ|݇ތܢ’ß²¤Ú­¤Ø°§Ï¨£®†œ¤l€ÍŸž×­¦Ü·­Öª Îš–ÍŒ†ÊŠŠ×†ß|zâ€ä†…ᆅäŽç“‘æ–é˜ŒèšŽèšŽê¢”ê¦ší²¢ì±¤î±¦ï¶©îºªí¸©ðÁ®ð¾°ð¿¯ò¾­ñ´òŵóƸóĶô˽ö˼öË»öÄ²ð³¤æ‰‚×ac·<K /DvBXBRAT<aD…5c—NcÒ——Ó––žK^n@\>`@bBhDgDh&Hj$IhBg!Dk#DmCg@c@^?]<`<g?d>X;XBiH™NhÀtwÇ{tÄtrºnpÂ{{΂֖€Ô–‚Ð’€Ò’Ñ’‚ÒŽ|Ђ̎~̃΄ˌ…̉€Ë‡ˇ€Ì†|ˇ~ɇ~Ç‚{Ä{¼€}Á~~¾x¾‚zÀ~¾~Á}~Â~Ç€~É~zÌ‚|Í„{̓|̆€Ì‚|Ë…{Í‚z͆Ï…{ЂwЂwÑ€wÐ}t²CP®GR±FP­BP«EP®DR«BN¬>M­BK¨@L¥<J¨6D¤:H·HPÊYTÚj^àvfæ|oèŒwë”|ê—uì™xë›sì™|î›tî˜wí–ríuå…pázmØg_ºOT¢8LŽ/I*J–0Jœ8P :P§@Q¥>O©ET®DR±CO°BR³FS±ER²JS´ES´BS²DR³DQ¶BR¸FR¹IU½IRÀQ[ÀSX¿QWÆNPÇPTÆRRÆPQÃQSÄSQÃORÆRSÊ\YÐc_È[MÔq]ÃMPÄTTÊUYÖa_ÔkuÓftÂi‰µc‰±_¨Sp’8ZpE`Gr.cXBS ?xExh"SVBj-aZA^HU =bFz2d`J{:kz2^\B[=bG]FWAv8hbEx4d^GYEVEf YIz`OH 8F<XHU…¨c€Õ‚m¾Yc¾ZgÎ’Þ’zÝŒ€ÚŠÛ‘ƒÜ¦”ܲ¥Ø¬¦Ù°§Ô¬§À”ž¦k†ÊŸ¤Ú´§×¬ Ð™”ËÍ……Øzà€‚à€„ãƒ„â‡„âŠˆæ’Žè•æ“è˜Œê™‘èž•ë –ê¦˜ì®¢í²¥íµªï¶ªð»ªë¸®ï¸©ï¼«ï½­ð¾­ñÁ²òôôƶòÆ·öɸöȸöÊ»öDzðº©ê–‹Ûji¾DO§:Iƒ'FZCTAS<Z@‚<bŽ>\ΕœÖ˜˜œFZm!Ep$I]>cCh"Gi Al"Cl"Dn&Jl%Hk)Ii"Eb>`?`@b@e@f@`<V=aLƒ>_¨dwÂxrÅtoÄop½pwÈ}yÐ’‚Ô”€Ò‘Ò҂уϒ‚ÐŽ΀Ж€Ó”Ñ~ЂЂΎ€ÌŒƒÎ†{ˈ}ʉ|Å„}¾€€¾}~¾}y¼{ă}¾}|À~~½{~À|~Á|ÁztÇ|xÊxË~yÊ…‚ÊzÊ…|Ά|ψ{Ï„|ЂxÏ€xÑ€v²CM²GT°BO°IU®FN¬DO¬EN®=I­@J©DR¨=N¦7G¥;K³EOÉYYØh\Þvjãpèxê—{ê˜|ì™zë›xëš{îžsívï˜uíqè„jäudØf\ºNP¤;L“,G,H—0H—4Kž6K¥=R£ARªFV¬BR±ET¬CS°HU²FT³HV¶GRµFQ³BP³BR·FS¶IRºGR¼KR¿NTÈRXÊRTÂSVÂPV¼PYÀNS¹LR·LV¶KTÀPTÔn\ÂVHž8H±MPÁTV¿FLÔcbÑYdÐdz¾eŽ«_Œ®g‡­^|›Ib†.RZBl2`o)\M @L <€R†m(Z`Mj,[T>aHXBVFi SƒJ~…FphI[>W9fMZB\AFxS@f Or/ddPaT[Ge‘h&TG5F6M=†De¸o{¸T`¸TaʓܞˆÚ†|ۊ׊ԎŠÖ›’Û°¤Ø±¬×­¦Ø®¤Î£¤º†’Ч¤Ô¨¡Ò”‘Д’ч„Ý~€á†…⌌ↆᅆâ‹äŒŠç’è–è–Žé˜’êšŽê ”êœ”ì£˜ë­ í¯ î´©î·ªðµ¤ì´§î¶£í·¨î»¬ð¼®ïÀ°ð¾®ó´óŵõƶöȼöȸóÁ´ð¼ªî¥–ßtpÂHQ­9H&D^?P 9R>Z@|.U‰2UÉ‹”×›˜§Ziw&LgF_>aDjBfDhFh"Gn(Ip(Hn%Fh F]>]:b?b<e?e>\?XDdN’Ni²kyÀxwÀqp½nrÆsp΃{Ô‘Ö‘~Ô’ƒÓŽÒŽ€ÒŽ€ÎŽÎ€Î~Ï‘€Ò”‚Ó“‚Ò‘„Ó“…Ò”„Ï~Ί|·͉~Ȇ„€Á€|Àz¿‚yÁƒ}Á~|¾}}¼~z¾}~Àzy¿y{Àyz¿zxÀ}zÃ|zÆ}zÊ„̇~É{Ë|ÐxÍxÏ€y°DR°HQ¯EO³EN¬CS®@P­CL«=J¬?L©BN¨>L¤6F¤:I²DMÈYYÚibÝzqâ†qæxè”|é”zë™|ëœxìšzí›~ìš{í—uî’rè„màtbÚb[ÀKN¥;M’.KŽ(E–.G›6MŸ5N¤<R¨?Q­BPªFT®FV®@P°FU±CQ´ERµEQ³DO´GS´DQ¸DQ´HR¸KR¾LQ¿KPÆQRÊQPËTUÌVWËSTÌWXÊRSÌVTÌSWÐ\XÊwtž9R¶V^΄oÒ„ÃLWËVdÂ^ƒ°`³m޲]t¾T`Æ]b¨F[wL^R…S„d RI
+@NB‚WŠz9ldQV@\O]En)^o$\ˆM}=fs*ZbEU :X?hPVD`A|@nT C_N…JƒfTZGo>nk0TR>RGH <t2V¶rx K\FaÉ€‡Û¡’ÛŒ€Ú‡Ø“„Ö‰ÓŒ‡ÓœÜ²§Ø´«Õ¬¢×±ªÓ§£Î¢Òª£Ð —Ï”ŠÔ„~Ü~‚á††ãŠŠãŒŠäŠŽâ‹ŠäŒææ”Žêœ“è™’ê˜’éœê›‘ë˜ëž–ì¨šî«ší°¤í·¦î¶©ë´¥î³£î·¨îº®ð¼ªî½¬ð¿²òÁ²óijôƶõȺ÷ǸöÆ´ô¯ð©•ázvÈKQ¬8IŽ$Ca<S :S>XCt*O€.VÀ|ŠÙžœ¯dv~,Pk&JeFi$Kp*Nh"Fm&Ji!Fl'Jn(Ik&Hf@]@_BbBa>e@a<Y@ZEr+SŸ[p¾uwÂtvºmn¸hnÆ{vщ~ÔzÔ~Õ’€Ñ’€Ò‘„ÐŽ‚σЎ|Б|Ò€Ó“‚Ó•„Ó–„Ó“„Ò–ŠÐ”†Ð„ÏŒ€ÎŒ~ʈ}Ç„~Â}À„}Ä€x‚~À~|¿|v¼}¼z|¼||¾{{½{|Â{~½xyÀ{{Á{yÄ}}È~zÇ€~Í~yÐv΂|Ì„}®?N±HQ°BK®AL©CS¯@L®CK°?J¬<I¬@L©<L¥8J¢:N²FRÊVYØlfÞwmãlçzé“xë–vì—xìšzí›xìœ{îœzï›xí’ræ„iáwdÚfYÁNO¢:K‘0LŒ)H•-H™3J :L£=P§?R¬ER­BR®BP­EU°IX°EV²DT°BP°DN´FR±BN·@N²HPºHPÃLQÂOUÄNSÄTTÆORÊTUÌTSÊUTÈWXËUSÍZbÒ[Vâ–rÀ\NÍZOàjRíœhÐff»Wuª^Œµg…Æbs¾TaÈ\d»Udz"G\Lk,f…XˆZ@K BSFX†ƒJ~V?R:[DZHr,\†Ft‚<gt0\g"JZ>V>Z@dL[Ig(Mp2[S >\F†U‡`LY>|FvWF]VRLaE¶rxÅ_a¥;S¿w€Ý«šÜ‘~ݘˆÜ Ø™ŽÔ†ÔŠƒÖ˜Û±¥×³ªÖ±©Ø²¬Òª¨Ó© Ñ¤˜Å|kÀTZÍ`iÔqwÛxyß~€á†‰â‹â‰ææ••癒Ꙓ꜔ꞔëœê›‘ëœì¢”죕쮞봤ﳡﶦ︦ð¼ªñ¾«ó¾«ñÀ°ôŲöȸôƸõÁ²ó¿«ð¬—â‚|ÆNVž,F}Bc9U :R:V?kGw"P¶xˆÙ£ ºq{…+N{,LdHn,Nm*Ne Ef Hk$Eo%Ho*GjCb>^>bBa@d?f!E^?TBe K†@d«bsÂrwÁrsºlqºnsÈ€|ÒŒ€Ò~Ó‘€ÒÒ‘Ò~Ò‘€Ð~ЀÎБzÑ’€Ò’‚Ó’Ó˜†Ò•†Ò‘‚Ò‘ÏŽÏŠÌ‹~ËŒ}Ȉ~Æ|ƒÀ„yÀ}½~¿}{¾||¿~|¾{}Áy{¾{|Â{{¾y}ºwy¼z|Á{~ÂyxÆwrÉ|wÊ{xÌ}v±GR¯CO¯FR¯DO«@M«BN®@L®@M±>L¬AM©=J¨;I¤8H¯EPÊUTÖjfÞqfäƒoæŒxç”|ê˜yì™|ìš|ìœzî›zî›vì—vìqè†qâvdÚdY¿NR¤:L’.L‹*H–0K›6PŸ8N <O¤@S«BQ°FR±FR°FT°DR¬>R°ER²CP¯DP®BQ³BNµGT¸HQºJRÀLSÂQTÇPPÇTWÉTRÈSZËUTÊSWÈR\Õ†‡ã®‰Ö„dÀbPb:n6t:(=¼Zn²j•³d„ÇcmÅV]»LUÂRVž9TdJm.lu;g|KxL
+=NHb"XŒcv8iP :Y@`Dk"Pp&OdDaDeJZ;V ;X@]A^DaHf'N_GT Bh.`‚Oƒ^Kg&Zw7id$YQCQ>¢\hÍ}n¨DY¸]lݰ¢àš„Ý“‚ܢܪ—Ô—‰Ð„~ÒÕ›—Û³«Ú¶²Ü¹´Ü´¬×³¡ÌŸ„¦PJ•*>¢:PµJ^ºQ`Å_lÍhsÐjp×ruÝ€âŒå“’藒뜕렕ë’ꙑ階똌ëšì¡‘î¢–ìªžî´£î¶¦ë´¢î°¡í®¢ì´¦ï¶¤ïº¥ð»¨ðºªñ¾©ôðôôôðò¾­ð³—ꛉÚvqºDL…@p=a<W 7T ;T?lFw'W¤f{Û¦Á|…Š2Rp Dk Im&Jf"Jh)Pi$Jl*Ns1Rr0Ph!@d?]=a@`@e?fCY@XFk"Q•Nk¹ntÀtu¿ps·hpÀrrÌ…|ÔŽ{Ó‚Ò~ÓŠ}ÑŽ~ÑŽÑ‹}ÑŽ}ÏŽxÎŽ~Ћ|Ð}ЀтВ‚Ð’€Ò“ƒÐ‘‚΋͉ÍŠ{ÌŠ~ʈ€ÉŠƆ}ąÆ~¾ƒ}À|¿€ƒÄ€~Á€À€~À}~¾{z¼yy¹z|º{¿{|ºy{¿xxÀuuÁuxÃvv¯@P²CN®BN®BP­DP­CQ±AN°BN°DN¬@M¬<J¥;I¦<J²DLÉXVØf_Þwoæ‚nèsê‘yí™xîš‚ìœ{îœ|ïš|îœxí–sì”pæ…láxfÙdXÀQS¤:MŽ0L‹-K”2M˜6OŸ3J :M£?R«AR¬@R°FU²GV­HU±HU°ES°DT°DQ®CT´FUµCN¶FN¹HRÀJQÂORÆSVÈSTÆSXÉVXÉVVËT[ÎY_õ¾›Þ‰i¦<A|;Q 9Q <S Ar&Q¼x”Р°×§©ÍVZ¶BQ«FT4LkEeR~?s{N‚g%UOFPKu7n–ršn0^Q 6`EZ<_DaF`G`Hf"OYAX?bJZD\Ah$Vt._k#VbOp.jy<ldRl0`XEL >H 9…D[Í}q¯LZ¯N^Ôœ“ࠆߖƒÝ¡ß®–Þ´ ÒŽ†Î…€ÎŠˆØ¨§Ü³ªÜº²Þº±Ú¸§Ê€¬HP¨BW«K^°KZ¯EW©@NªBV›6I¦?U³K_ÃVfÒmrÛ‚ƒãŠèš“ꢘ뜔ëšêš‘ìš’ê•Šëš‰ì£’ì¨—î° í²Ÿê²£ëªœí­ë°žî²£íµ£ð»ªòºªð¾©ô®ôÀ¬ð»¦î®˜ã’Övn¾RX3Jx@j:a8X 6S;V ?iD{.^’Jeݪ¦Æ€…žLedIhCv,Qj$Ii"HiEm'Hr-Jm%De=_>]>b?dCb@fGXBZFx/W Wq¾ptÁvx¹pv¸gpÆz{Њ|Õ~Õ|Ñ}ÓŒ|Њ}ЋxÒŒ~Ñ{Ï‹z΋|ÎŒ{ÏŒzÌŠ~ÐŽ~Òςю~Ñ’Ò‘~ÍŠzˆzʇ~ÍŒ|̉{ÊŠ~ljlj{LJ~Â…~ă€Â††Ä€À‚‡¿~|¸z}¼{|½zy¾x{¼wz»w|ºx¸x|¶sz»rr¯BO®FR­ET®BR®EP®CQ°ES°@L®BL®AOª<L¢=N¢6J²BLÈWZÙj_Þyjå‚oè‹të’xì˜xï™~íìœxïxíœ~ðš~îsç‡mã|gÙh[¾NS¢6K-KŒ+L’0M—6Nž4M¥BT§<O«@R­ES°HV°DR«BS±CQ°IT²GT²DPµEOµCL´ANµGP¸FQ¼KRÁUWÃPUÇQVÊRWÈW]É[_ÈZ`Ñccð¤~Ò†r’6KtBe>T =gE Lk¾™ì•uâ‹fÂRX•+Hˆ-Mƒ+NhSj*c{Bm‡^‘RCQJb#^€D~pq6`M7W?R<g"Um3]b*R\Fl/Yc LV>dGX=V@XAr,\†H€dKt<vd(Ti%[f)_OAJ@n*LÂyp¾`d¤G[ÌŒ’ÞœƒÜ™„Þ¢Þª˜àº¦Û®œÐ‡Ï…‚Ò•™Ú®¨Ù´®Úº°Ú¶¤ÌƒxÈ[bÎdjØtyØrv×rwÌhsÂ]j´Q^œ<S‡">‚$@†$>š4H±FXÍjo݂斋ê˜é™Šé•陎疉ꗌì›ìª›í¯žî²¢ê²£í¨œì«Ÿë­ î¯žî² òº¡ñ¸¤ð¸¢ð¸Ÿê­”߀Ìrm¸X]®JX£<LŽ(D~=w:d 4[ 8T<TBdEr$U„=ZÚ¥¢Ì‰‹«YkgH`Bj"Fu(Ho*Kj#Eo(Il#FeB` EaE]?_?e<g"CeA\C^C„:\¬dsÄts¾px¸kq¶luÌ|Ô~Ù’~ÖxÓ‰|ÓŒ{Њ~Њ|ÑŠ~Ћ}ÒˆyÐŒz͈}Ή€ÎŠ~ÎŒ}Ë‹‚̀Ύ‚ÐŽÍ̈}Ɇ}ɇ|ʇ~ÊŠzʈ|Ȉ~ȆzɉƇ‚ÇŠ…ņ„Ĉ„‚„ă¾}}¼|}½{|¾{z¸~¸wz»w|µrx²py¶pt¶GQ¯HV²DS°DQ±HP®DO®CN¬EQ²DO®@Jª>J¦<M¥9J°ANÉTT×g^àujãlèyì–xë™vî•vî›}î™{í}ïœ~î™ví‘xè„mâvcÛi\ÂRT¡9L-H0O–1N™8Pž9N¤:J¬=Pª@P¬DR²JV²ER±EQ°AN®@N¯@N®BP²@N´EP²ALµEN¼IR½IP½MSÁRV¿QVÉRTÈTYÊ`cÑnhØibÙlbß„m³XRvB`:N ;RF˜LoÆovÆ\a£:I¤CV€ GŒ8T*SgTz?w|BugOVL\PyB|:{xMp|GtM ;QAXKr8cl%U\"Kh6_m.Xt0^Z@`BS 8T;Q
+:VBt6f|>s~Q‰U<y9t[HJ?TBªclÒx°KZćஙܑ~݌ਛ߷¦Þ¼«Ò›ŒË€ÐŒÓ¦¢×¶®Û¼±Ö²¡ÉqhÊT\ÔfjÛsuà{xÝuzÛpnâ‚€à~€ØlpÇXcµJY6L“-E†$>‚ ;Ž)A«CUÒpr㊀摆瑇é‡è’Šé˜ëœŽí§•î­î® í®Ÿì©œíª í´¢î°¡í¬›ð²í¦ä˜Ðvg¶UT¥EN”4KŠ1M˜8S¦FX¨BVª?L¡1Dt:f;W=U>^?p%S~<Xؤ¦Î…Š´eto&KeIaBu1Rs-Kt&Gs+Ln$Cd?Y;`?^>_?d@h"GdD^DfL’Fb´p|Ãtv¸lu·hrÁtt΄|Ö|׎|ÖŽ|Ô‹~Ô‹|ÑŠ|щ|Њ€Ï‹|ЊzΊ|ψvψ|̈~΋~ÌŒ~ÍŒ‚ÎŒ€Î|Î}͈|ÌŠ€Ëˆyˆ~Ɉ|ˉ}Ȉ|Ê‹zȈ‚ʈ~ËŠƒÊ‹…lj†Å‡„Ä„ƒ¿‚€À‚‚»|€¼|~¼{·x|ºu}³ry³tz³nt²BM­BR°GP¯CS¬JV±KV­FQ®FP±BL¯@JªAP£5H¡8J°BMÈTWØbYàwhã{méˆsì”yî›xî”sî›|ì™xï›|ðœzðœzî”xç…läxeØh\ÁQR¢:N‘,I1N˜1Mš8NŸ:P¤BQ¨>Q¬AR°JX±GS²BQ²DQ²BP³CO¯AP­:I°BO²?I´GR³DM¶DM¹JRºGN¿JPÀOUÂO[ÐfiÚ{vátÜojÝneÚ}j§VemE‚:Vp0SJB’Pv¿cqÂbo›1QŒ6RŒ7Ty"IhMp-b„M„t7p]R\Uv@w„Mƒ^Qh3Z”hšV@TDV@p)XcHSBWGn1`ˆKxk#OV8T 6X8X:Z?_"Qm7VˆY‡YDh*UL7G6†>V΄vÊb`¿lxÜ« Þ—Ü—†Ü¤’ݲ ß¾­Ù³£ÐŒ„·†ÔŸœÚµ®Ü½®Ö¬”¾^YÄRWÐ\cÏ]`×kk×ijÑjqºH\×vtãƒ|ÝuuÍZbÎaa³?P®?P¦8J›2H•/Iš2KµGYÔhn㊄抆猆ê†éˆì–‹ì£˜í©œí°Ÿî°¡í«¢î©Ÿí¬ží¨™ë¢’ê¡Ö{t¹TV 8Mž=T¦Jb¦I\±UiºVcÍdjÂLN¸CJ¢4Bv=o=Y;T?[<t&V}/R֞ˇŒ°ar}.OdBbCu2PgDo"Jr(Kp(IeB\@`A`=dAfAfDdFdIp#PRn¾rx¿ts¶kp¹jnÇ}{ÔŠ|Õ{Ö~Ô‹~Ô‹|ÕŒ|Ò‰|ш~ÓˆzЊzЊ{ψ~ІzΉ{·{φxΊ|ÏŠ~ЋÐŒ{ÏŠ{ΉxΈẅxȇ}ʆyˉ~ˇ{Ê‹zËŠ€Çˆ‚ȉ€ÉŠ†È†…ȇ„Ç„‚Ã…„Ä„}¾‚…»€º~~¸x~¸uy²r{²qz°nz®AO­FT¯FT°FT°JV®FV®HR®BK°CM¬>G§:J¤6E¢4C¯CMÉVXØg\àv`ã€pêŠtì–xìšzï˜}ï˜wìšyïšwðzñ~í”tæ‡oâ|iØl`ÂTY§;L‘,H‹)G•2Kœ4J 8M¥>P¨AS°DS°DR±DQ®BQ²ER±AN³EO²BO²>K´FP²AK²AJ²CN¸DK¾JN¹GQ¸KU½LUÇTZÚxnÞuÞ€yÞzpßvlæ…lØ …¯nqÅ|~¿lz–Fd¦bƒ¾U`Ôjf¢=Rp(Il"D[Kk$_{7o`™x0hp&fp0h‡X†n1fK@ZH‹[ƒe%TVEYD}=n\BP=RCh(VŒR}t-[T:U 6Z 5`>\:V:Z?` Fr4fN
+8J:f!BÂ~y¾[`¶ZfÚ¦ Ý ‰Ü–‚Þ¡ß²žâºªà¾­Ù¦šÒ“ŒÓ–”Ú®¤ÝÀ­Îžˆ­JL¸BHÂJN¾FPºHM¾OPÁPNµKQ†<¬CKÔthÂP[¬=L¯BRµDYÂXi¸DT¼LZ¸AT¸EV¼DVÏ[bÞtvà~‚æz剃牂둉죗î¬ð²¢ï³¦ï¯ î«Ÿî®¡ì¥™æ“‡Ø~|ÂZd´Wf²N`¶U]¶R\ 8IŸ9G²HN±AI™0B–/CŽ(Bz =y'HZAU>[Cw,Xv,PÑšœÎŠŽ°_jB[a<b@r/Ph Dm(Hp)Gl"Bb?]>b?d>b@f?a@_Af J‚3YªbvÁsw¼ru¹gm¼kmÌ€zÔ‹|Ô|ÕŠ~Õ‰~Ôˆ{Ô†yÓŠzÒ†|ÒˆzшxÒˆv·χxЇxχ{ΉzΈ|ÏŒxÐŒ}ÑŒzÏŠwΉẅÿ|̆wˆ{ˆzʆ~Æ„~Ȉ€ÈŠ~ÌŒ€Í‹ÉŠ…ÉŒ‰Å†‚Ä‚~ƃ€Ä†Â…ƒ¿¸}~¸vzµu}³u~°oy°@T¬FR­ER®HW¯FR¯DS®BM¬AM®DNª<I¤<I 3G¢2D°?LÈXUØe`áugå€kèŒrí–xîœuîœxð™tð›zïœuîœvìš{ìvçˆvã~gÚndÅQU§<K’,EŽ(H–0G›3H 3I£@P¦?S­AN­BR²BR´EQ²BL±?K²BL°BK³@J´?H®;I³?J´BL³@K¶ET²N_¶Zt¾o{ÎpuÛtjÜ€rÞ‚xÞ€xÞ|pæhî–pãŠgá}à‰€¿Wa‡/Mƒ$D} @`A^2[]#Qh)f~;t^‘}>sdT|7vv3j‰[ˆg&XG>RCs<bv:lY@ZH€BrZ>R=R ?[L{AkˆP~YBV<Y=\8a:]<Y<S:V<XLL
+5¢YcÏxr®JXÐ⩌ږ…Þ›ˆà©˜âº¨äÁ³Ýº®ÖªžÖœ•Ú© Þ¾­É‘}œ7B¤6E§7B 3B¤;H‰$=†"=„#:~6o5z6”/:w9m6w;š4G¾Va²AVÐ\eËU\ÍQ\ÌR^ÑYaÙfmÞtwâ|æ‚~ç}êˆîŸ“ï°¢ñ¯œïµ¥ð²¤î®¡ï¬žë§–߆‚Ñno½`h·X\›5B…(=z8r7r:}>{@~"@‹'?‡ =ƒ,Dk8\;R :\B|1Yw&NÒ—˜Ë–¬Xh•G]hCaBt3Ol"Gp(Ll"Ef?a<`?`=`>`>d@^?U=eJ’Ib¸pw¿wzºquµhoÀvvÎ…zÕŽ~ÕzÕŒ~ÕŠzÒŠyÒ…zЇ}ц{ÓŠzÓ‰vÓ‡wІyЄyЄyЇ|Ј|ЈzΊzЉwΊzΊxІzˆyÎ…vÌ…zʆẍ}ÊŠ{dž€Ì‰|ȉɈ€Í‰È‰‚Ȇ„Å‚€Â€}Â~|ÀÁ€¾~º}¾z€¸v~¶w{±r~®@N«HR°DN¬DQ¯AP®DQ­@M¬DN®>H§AO¤8E¡8G›0E°>IÌVT×f^Þuhä|jéŒrê”wì™tïwîœvîŸyïœxí|í˜zì’tè†nâxdÚhZÆRS¦;J3L*D–*Bš2F 3I§9J¢=K±@O´DN²BL·DMµBJ´AJ´BL´<G°>L²?H²BL®<H²;G®=P´QdÂ{ŒÊŽÔ‹zÔqjÜwfàzhàrß|sâ{mâpÙxk·N^ÀT`Ãmu”<^^:T<K ;QHk8kt8s|:|ŠR‡–jšp&ak$]„G‰x4hŒ`d#VNDWDr4fq2a^MXB‚N~X?S ;P ;RH`RŽ_…M„`MS:X9b:_8V 5P 6G>L9w3Qˇ€¾Yc¿lvÞ³¦Ý—‚݋ޣ’â³£ãÀ±ß¾®Û²§Ú® Ûª™ß¼¦Ã…o+<”*B’&<ˆ <r:v:o:o:l:j8j8l<h8f<a5]
+3k7€;»UbÔjuÔ\fÖ[aÕXaÕ]dÙhpÝnpâvuä}zæ‚}ìŒ‡í “ð®¡ð²¥ð¶¬ñº©ð·¥ì®™ä–†Êkf;H'Br<m7p8l8k8j8l=n?p<v?$Ay"A`6]=Q <X<2ZsIГ’Ó˜š¬^n–F[z/NbAq,Nn(Jn#Gl%Df@`:`>c@^@d>fCYBR <bGžTi¿txÂtq¹mp¿klÇzuЈ|ÔŠ|ÔŒxÒŠ}Ò‹|Ó‰vÓ‡vЈzІ{Ñ‚vшwÓˆxч|Ò†xÒˆvχ{Їyω}ΉxЊ|ЋyЋxÎ…x̆vË…{ˈ|Ê„{ʇzɆ}Ç…{Ȇ~LJ€É†‚ʈ„ƈƒÇ…€Ã…€¾€À€¾|{¼z»}€»|€·z~¶wzµuz±s|¨>N¬DNªBN§>O¥DQ¨@Q«<K¨<M§<J¤=Kž4E™0F˜0G©8FÄOPÖdZÞtfäkèrì’uíšyðšvïžyìžxð|î›|ð™xìŽuèƒiäwdÚnaÃRR¤<L/J‹&A“-Dš2Hœ6E¤6H¢4F«@N¬>L²HQ¶BM´@H²>J®>J°@L­>K®@L®;I¬5Hª4N¯F^Å‚–ØšöLjáz^×l_Þvfàvfá{pâ~rá|rÙzwÀfr“B\lF‡<X€.PR <G @RMzG‚ŽXŽz3kz:pb—t1_€B€t1i~@~|By“oc"UMCUCu3fXAq/h[I‰W†[@TAR :O
+;R Dj,Z_ŽZŽm,_P
+5Y 5`9\;J9F <U=³opÅnf¹VdÚ© à ƒß”€Ü †Þ¨™á´¨â¶ݺªÝ²©Þ³¢Ý¶ŸÆwŽ*=† =†<w6n:n<k:l=i:g5c9d=k&KŠCY„9Rm?Z7Y9_ 6ƒ>¾KXÓfkÕ`eÔZbÖ\dÛdiÝklàrxãxuåë†í¢˜ð°Ÿó»­ó¼¯ó¿­ðº¥á ‹»[Z†(BjDd@eCk>~(F|(Do=h<j=oAl Bp>z<l8d8_7U:Y?v)SqFÌŠŽÕ˜ž­[n”H`v,Lo#Lp*Ln&Jl$DkAb<[<`=c<b?eAbCZATBr$Jª^lÄy{ÀspºntÅstΆ~ÕŽÔŒ|ÓŒxÒˆyÒˆzÒ†yÏ‚yчxЄzÏ„zÏ„vц{уvчyÒ‰y΄yÒƒzІyψzЇ|·|ψsÎ…{̇xÉ…y̆{̈zÊ„zȆ~È„~Ƀ|Ȇ†È…€È…€Ä„~Á~Á~}Á|¾||½~|¼{~½z|ºy}¶x{¸x}µv{´s{¨>L§=L¨AN¦<J¥>J¨@N¦:H¦;K¦:HŸ8Iœ6D•,B’+E¤5H½PTÕbWàrcä~féŽsì˜wí›xîštîšrîžvï›~îœtîšvìnè„häxbÙe\ÅRT§8JŽ(CŒ&C—(B2F¡6J¤:J¦:J¨?J«=J°>H±BN¯<I¯<I°>L­;H¯>Jª9G¥8G¦0H¡.NÏ„~í¸šòµˆøºtÒZQÙnbÞraàveâlälÈadÁox°^o‚-Mz-Sm@X@J DLEU‹^l'X^I†YŒd)SXOx<pBwz5vŠ_Ž”l”XJJ <XFn0`TFd$Ti&W‰^ˆf!KXE[>T:T=TCm4_V†Gty:pgOZ6U
+5J ;H
+:H]Í‚z«HWÎŒŽã²™Ý–‚à›ˆÜ¢“Þª–ྰàÀ´ß»®Þ³§Ü¼¦È{Ž*@”)B‡!>u7g9f;i7k:h:i;]=hCtJŽFf¿– Á–§ Xpt$E[ 7Z 6a:*GÊXXÕbdÔYbÙZ\ÜbjÞejánoâuwç}zꊄð²©õÀ­õµôŭ篖®PVj=aD\C^Lp!K2dœWv®k~—?Vk<^ 5f8m>p?n>c7h:a=V;^@{.WpFȆŠÙ¡ž­`rŽAYt'Jo Hv.Ps-Km"Eg@a<^<`>b<b=cC^>[E_Gˆ;Y¹jsËxvÄvu¾orÈ{{Ί|ÕŽyÔˆzÒŒ{Љ}Ò†xÒ‡zÒ„wÏ„yЃvÏ‚zцyЂyÏ„vЃuщЃzσyЈzЊ|Ά|΄|ΆzÏ…zÎ…yÍ„tΈ~Ά|Ì„|É…{Ç…|‚|Ń€Ä„Æ…€~À~Á||½||»z|ºy|´u}¶u{¹vz¸w|²t|µt|²qx§>M©EP¨BQ¦<L¨;J¦?O£:M¥:J¡:Hž5F˜0D“1E”)B£5DÀMNÖe[Þsbä€iéŽví™sî›{ï™xïxîœ|ïœ|ð|ïšvëtæ„lâvcÚfZÆSP§:HŽ*D‘(D˜,F¡4H¦5J¤<N¬>K­AN¯?K°?K²?M³=H°CL°<J°BK«;I¬8F¨5D¦3J¢0Oáv誘õ¹…à€hÌ^\×lcÞteâzfähØrk¢C_Êjpº]l|+Ox$KO >RIXNp>pˆVzk#VVF{Izs9aJ BNHv6lŽ_—‡F}‰YƒDpVHP>WG`FT>XHWF{Onx<h\D\AW:\>[AZ?bBW>b)Ls7abHT
+:K=a@Áxw¼]`³\n೥ܘ€ÞšÞ¦“Û¤—Ù¯§ãĸ࿳໧àÀ©Èš‚˜7E4D”*B|9g8a 7k?†2IvAs"BpDT 8d@0W…8TÖ®¯á¼ÀѨ®²~ƒ+Ns=˜;Z‘(GÀOTÐXbÎR[ÔV[Ü`gÝejànrßplå~{銃ð–ô¹ª÷Ǻö̵ðèÂoc|AbB`CZF\Jx*NªxŸŸ[sÈ¡¶fv‰*Id9b7j8n9jAd;d;_<T 6`@~-TgDÇ‚…Øž™°^t–BY‚8]q(My4Rp%Ho&EfBa;`<`;d @bB^>\@WBl"OPfÁryÊztÂtt¿pqË{ÓŠ|Ô‹zÕ‰|ÓˆzÓ‰zшyщw΂wЇzÒ…|уtÏ„zЂtЃxÏ…zΆ~ЄwЄwΆ~Ï‚x΄yІuΆ|ˆ~͆vÍ„vÌ„xˆ|Í„{ɃzÅ‚}Æ|Ä‚}Æ…€Â‚€»}€»|y¼zy¼yx¸vzµtz´w{µpv²t|¯t|®q{¬s|«px¬DNªBN©AO¥?P¦?L¨@O¦=N¥;K¢6Kœ4E™2E’,B‘'@¡8IÄPQÖf^Þr`ã‚kéŽrî›rîžxížxí™yîžvðŸxî wéšä„nâxbÚdZÆRP¥:L‘(C“)Aœ0E 2E¨;J©BP«>L°@K²BJ°BL¬@M­>L­BN²EL¯BK®?L­9H¨9D£4G!F¶[]ࡔݦ{Þ¨”ׂÖvpÝtlâxjàxiÐkmº[hÜ{hœ8Rc@dALE^TFzDr\FXGo2hp0Yd"XPKYRx:n”d™|;s†[ˆ~>o^KUBV@U>V=ZCSEk1S‹Y‡e"M`DT 9X>Z=^E\=U7YAVFR@^Rr(g–NfËvm¬EW֤㬒ܖ…Ý¢ŒÝ¬™Ù¯¤à¾²â·àÀ±âÀªÕ¬”¢FN¬AM 6F‰"?s;g8k:#B¡IY‹1P|/N‹>`lEt(HCeo(PÒ®½ä¿Ä潾ܰ°¿n~•&H¸LU¶@T¹@IÊRXÆLVÌT^×ZcÚdkÞghájpç{vìŠð§›õ½®úлôвۘ†¢>K‚(HgDz0NbCdCx+OŽ?d™TrÔ›¥Ë|…¢9Jv>a8g<g6i<b;b:bDX@d@~3\gDÀ„ŽÖœ¶n’4N‹Dfs*L;Wl"Fj!Dc>b:`>a>a;`=Z>Y@]I}0Rª^pÆvvÈttÁssÂstÏ„zԌ҉~Ó‰xÒ‡{Ôˆ|Ô‰yΆ€Í„|Í‚xÒ‚vÑ‚vσxÐ…vσyЀvÒ‚sÑ‚yЄyÏ‚vÍ‚w΂xÏ„|Ï€vÎ…{Ì„zÌ„wË„~Ì„zÈ€{ÆzÁ~Á~|Ä|Â{À~~¼}€¼z|¼y}¶z~¶v|±s{²ov²rx¯oxªovªlxªo{°vz°ANªAQªGT©=L¨CP«>J¦?M¡<K 8K›;K˜2B•,D&?¢4F¾OQÕbXßrcãnêŽtî˜vì›|ìžzíž|í yð¤ð {ïœ|ì”vè†rãu^ÙfYÈTR©>K–+B”*C›1G 2Fª>K®AN®AK¶DL¶CK³BLµFN±DN³GN°EP²@L±FP®@Lª<GŽ(BiB—MRÆ•›Š2O«^Tê°’ç­œâŠ|à€qÛzk´Q\‹,D‰/E^BP=RGWP~Gz‡HtfQSCm2ip-a`Pt8nZNaYzEz’^˜|9t•v |=mXCSBZD[BZCW<PAcL’dŽp(U[?Y?X:Z;`D\8X 5W9R7L@L >j(N¤^{™@Z¸l€Ú¥˜Ò‡‚É€ƒÍ’“צ¢Üº®á¶ÝÀ´Ü¼©×±š°Z^°GWÂR^³HV¤8L•2H{<l6‘&F¨HS«Xlz'J‚6V‹;V‡7WwJšbŽÜÇÏçÇÆêÅ¿éÀ·Û”®AZ¨6IÉR\¸:HÄHTÄJXÉP_ÓT`Ö^`Üdmßqt惀쒋ðªžöóøÎ¸îŬÊpn£<P§Rj|&KE`Ž=]‰6RŽA^{$N°r•Ú–Òzƒ«AN~ <h3`3b 2f8^:`9^9W=fB~3ZcFÀ|…×¢œÀx‚„2QGbz.Q„@\hDm$F`>\8a<bAeBc?^@ZBbEŒ?[´joÃxvÅtp¾npÃyzÍ„|ÏŒЉ|ÓŠzщ|Ї~ÒŠy΃zЄxÍ„v΄vÏ„uË‚wÏ€uÌ‚|уvÑ„zÒ„yІzσxÌ„vψzÏ„zЃtЂvÌ„xÍ€yÊ€vÃ~xÊ{Æ„|Ã~{Â|Ã~zÀ~{¾zz½{}»vxºx~¸uv´ps®nv°os®jrªmv¯py²z~»‚„ÆŒƒ¯FQ©CP©BQª@JªBN«@P¥BR¢8JŸ=Mœ8G”0B‘*BŒ%@ž4F¼MRÔb[Þsdãmèrì—xîžvïyîœwñ¢zî ðzí—xç‰oãv`ØdZÈSQ­?J–2H˜+Eš4F¢6Iª:J°@K°BL¸DL»FL³DK¸EK³JW³?M¸FSºJQ»YU—1B}9q:^?l@Ƭc{˜šç©”úÉ¢õҾꮎ°TTbGR>TCSAO BXKx<n~:hi Pe"\cWq0fi*bd!Up/am+gs0nˆ\”b™‚=y™zš€H~RDVA_H[Bd#MXBSA^D}BghF\=bEZ<X:a>^<W9P 6Q<PHVFœZnªUb˜H`Àš¦ÆŠƒÍ”’Ô ’ܰ¢à»­ßÀ³Þ¼±Ü¼©Ø²š²\_µN\ÂT\Í_dÈ\hÇYdºQX <Ov8=®EZ¹V^¡Rl}.RrHy*Xš^ŒË±ÅçÁÄíÁÀ첦áˆ~Í_kÂBTÐWVÆKVÈKSÎV`ËNZÑTcØ^fÙgnàpp傀씌ð¶©ö̾÷Һ輪½S`´M^Àlv¦Wyƒ4RB\‹B`€.X›Y…Π±Ü”Òq~ª;L€=m5j6j4h6\7^7Z;Y?o)Jz2ZeJ¹u„Ù š¾{ˆ€0T‡@b‚6Uƒ<ZhCfA`<bBb@`>i$Fc=\:Z>dC—Nd¼ryÅytÂut¾tyÊvÑŠyÒŒ|ÑŠ{ЈxÒ†yІ{Έ|Ì‚wÏ…vÌ€xÎ…z̓vÍ„{΄|΃wÏyЄw΀yÐ…xÐ…xÎx̓zÏ‚yÏ„xÏ‚vÌ‚w΄uÈ€zÅ}xÄ€zÆxÃ}yÁ€¾{{À~z¾x|¹sw¹t|´tx´r|±mt®oz­lt°rw²y}À„{ÈŠ‚Д…Øž‡®@N¬FQªCP­BNª@LªBN§@N¢9J¢;J›:K–2F‘*CŠ$@ž6F»LSÔ`Yàubã~nèŒrî–yížvïŸzð xðŸ{òŸ~ðž~îŸî•zè‡oâxfÚdZÊRP²>M›0Gš1H 4I¥8I¯>JµCL²EM¶GN¶JP½HO»IP»HNµEL±?Fž3<„)<l<2[z$P‡<_§_|И”Ë‚šç”ˆî¸£ùïÑôݯ´r`h!Lm0jM>R@M?TJs4nŠK„|;tq5nh(aZKl0jv7ny<s|:r€Dys/hŠb•ˆU’s0h–tžˆX_L_K[BV=cFZ<X;_=f#H]DbEfFY=\:\<`>Q8N :J 8RL„D\Âwx¢BVÀ†Ú¨šÕ‚zÜŸÚ¨•Þ·¬ß¼²Ü¿´Û»¨Ôª’¶ZV¾R_ÉVaËR\ÐVdÔhmÖjoÑcj¼Tf6R—2N³@TÀQgÂez´a¨Z„³u’Χ¯á¸ºå¿½îļæžÜmpÞsoÓ\gÌNWÒ_cÎPVÍNXÒU[Ó[dÖ[dÙ_hÝinä~똒ò¾´÷ÔÅõһ⮞¼P\¬<Pµ\hÂp‚¦`•Hv•M~¦lœÊ¢¶Ø «×’˜Îgp¦:L€>m9n;n6o=c>Y<`C_Br,Lu+RdL­cvÚ£Ÿ¿„‹‡8\†>`‚>^€;Vl&G`>`>_>];^=k>f>\>Z?n(M£Zf¾uxÂssÀqrÂtt˃ÒŽ~ЉzщzÒ‡wЇzцv΄|Î…w΄vË‚wЂx΂wÍ„z΃̅|ʃsÏ…x̃|Ï…x·{·wÊ…z̓y΃zÎxÊ‚zÌ‚sÉ€wÅ{Ç€xÆ|wÂ|y¼yy¼yzºxw¼wvºyx¶qv±qx²nv®lw¯tz»~{Ň|Ê€Ô–ƒØž„Û¢ˆà«‹¯DQ«EP®FPªAN¨BN©BN©@M¤8L¤8K:J•6G)@&A 2GÁPQÕcYàsbä€lêŒsë–uíœ|î uî¢yï¤|ð ñ ~ì—vê‰qã{jÚg[ÊWT°@N4I™5J¢3D¨8I­<J³AMºEN¿HP¾LT¼IRÀJR¿MP³]bp8^7b;tHžSl²k‹ÇØ~„䔊߫­áŸ’óɤʔk|<CXKYKH€SMVQh)l„P’šo¥ŒZ†{ApP>OCPDuB_L‚Byw6mr2lt8x‹j˜’g¦k(dœƒ°b|>rh"Ke$VR=[>T
+4V 8]A^@^Ag GdB`A^=^#C\>R6J 7K <e C¼ru´MX²]qÞ·¯Ûš‰Ú•ˆÜ¥—ܰ¡Þ¿´ÞÁ¶Û¾ªÑª”¬OQ½LVÊX^ÍZdÏ`eÒ[bÙkmÚrxØnsÕhlÅ^k·HX¯B[²D^ËSdÐdnÑt€Ø€‰Û‹‡Üœ›ã¤¡é¤Ÿæžœàž¥Ú…–Úq€Ð_rÒ_hÖ_bÔV\ÎQ\ÏQZÖZbÔ]eÙXbÞovå~€ìš•ò¹­øÕÇóÒÄડÅj|´Nm°LoÄXpÌs‡ÃtÉz”щבžÑ†Îpr¾HP‘*@n4{#@{">t<r?d?[Bc"K`Fw2Ks*RcK©dzÜž—Ę—Gb†;[ŒHcs$Fm"JcDZ:^AZ8b;h>b>Z8dC†8V®gtÀxwÀsuºnrÆ{ẍЌ‚χ|ΆyÒŠx͆x̓tÎ…yÌ„v̓xÆ~z̃z΃vΆ|ÏŒ‚ΓˆÌƒzÑ…w΂xЄzΆzˆ{ÌỹwÌ€vÌ€vÉ€wÊ~tÅ~yÃ|vÄyvÂ|x¿zx¼yz¸t{ºvy¼xxµst¶qx²nt´tx¼}}Æ{Г֘ۤˆÝ§‰àª„䮇粌°JV®IQ®DR­DR­IT¬FN¨>K¢9H 8Jœ1F˜0D”-D$>2D¿MPÔb\àsbä‚mèŽtê–wî™zðž{ï yñ¡€ñ¢|ò¢ñ {ðšwê‹pä{hÜk\ÈUR¯@M2F›1F£7H¬<I±>I´FP¶GQ»NR¾LR½OY¿KSË^P«XY\8\ 8ƒ*P¡JfÆ…ŒÍ‚ˆÃwŒÏyˆÌr‚Ίۥ¡¬ifb CY RTNVP{G|‡PŽˆPŠˆW„}Erl/Y|Gx_MJ @MC[N„X’UJ|H}v6pl,kv8x‡Y‹v«…G‰¡‡¯‡R~~Bwn1ar9hSAY@S:U<Y=]@^?a@]=`<`A[?Y 9O
+4J
+8N
+6—O]Érl¨>UË•–ଛړ„ÛŸŽÝ®Þ¹¬àöݿ«Ò®š¯LN·DNÊP\Ï_iÒ^hÔdmÕcnÙjqÜnuÝosÙpvÖkqÊ\hÊcrÎ[fºGXÆRd²>QÅ[lÒgpÈfiÊnlÄdkÒ~ˆØ‘œÛ—£Ü›œÞˆØnqÒW`ÒV_ÎVbÒZhÖXbØ\dÚ\fÜkqãzzé—’ð¶¯öÕÊôÖÊæ°ªÞ˜ÖŽÎ€ŒÌp~ÖvÍdpÈ]gÌnrÆpr¬JTŸ4F“&>€<t<—8JŠ-Av<t@eB\=e"HdEv0Mq#J^K¤\uØ•‘Ì“—žVn}3TD`u,Jp$F\>\>`?\<e<l#G`BbBy<X˜LcµjtÀux¾mnÁopÇ|wÑŠ{ЊχxшzчzΆ{Í‚yÍ„z΃xÊ€xÄz΂zÍ‚zΆzÏŒ„Ê’‹È„{̓ỹ}Í„zÌ…{̇zφz̃uÌ‚zÊ~tÄ}xÇ~uÇ|s¾wv½utÁyv¿xv¹tx¸pw´uz¶uy³tzµrw¹{|ȆwÓ“~מܥ„⨆⬅䯈它氆贉´CP®DP°HW®HU«BN¬DQ¥<N¢9I¤4Gœ4F–.B’(@$?œ0B½HLÕc\às_äqèŽoê˜xì›}ï}î¢|ï¦|ò¨|ó£~ñ¥}ð—yêtå~mÜhYÌXT±BL™/Fœ0F¡6F©>K°DL´DN»EKÀLR»KVÁKOÂFPÊXJ´hciA+K¶bvÊlyÚˆÌnƒ²j†®jŒ®cÀ~}ŸS`j&Vn-`e QZF\RaRf–^–n,d`\„V}IyRFRDTJ]I‚R‰`$ZwCtJTSn2nˆY]“‘[–™sŸDˆVŠŽrŸZCR<\AW?ZBX;\<_@^>[9_C`>V5T 4J
+5H <p)IÂwv¹JSµXcÚ®¢×‚Úœ‹Û¨—೥ྰÝÁ¯Õ²š¨IN¯<LÅJXÐZdÔblÕdtÚnrÙhpØnsÛpzÛptÛnvÚqzÙqvÚquÓcmÄTbÌZfµF^È`nËjvÀcyºYl¸\xÉ|‡ÒxÜ‚„ÞƒƒÚvxÒfsÌVdÐT`Ñ\jÔ`mÖX^ÖYfØXeÚjrãvyêï°ª÷ÓÈôØÊæ¶°Þ‘‘Þ~×tvØlnÑjm½Uh³Rj´Tl²Qa˜8X‰*H†&E—6N™3G4F”)?x7v:e:]7eBdD7Tp)O`P¤VrÖ””Мœ«d{~/TŽH`x+Ig>`@^>_?Z9h@hA]?dB„8S©]m¾tx¾rr¾mqÁvwË„|ÑÔŒ{ω~ΆyÏ…{ІyÏ„t΄{ʆ{΃xÆ„€ÈxÍsÍ‚wË„yË‚yÌ‚vÍ…zÈ€zË€x˃vÌ…|Ë‚{Ê‚zÊ€yÈ~xÂ{vÅ|tÄzvÀxr¼xzºts¹rs·sx´tt²q{¸rwºz}Ɉ~Ï’€Öž~Ü¥„à«â¬‡æ³†æ²†ç³ˆæ±†æ±Œç±ˆ³DR¯HR®GP®CN¤CQ©BP¢;Lž9L1Eš5G•0D)BŠ%>˜+@¼JNÓaXÞrdä€léŽpë•{ìœ|ðŸxï xñ¤|ñ¤ñ¤ó¤€ñš|ëŽxå|hßjZÎWP²CJœ.Dœ.E¡2D¥9F°>K´CM¸FL¼KQÀPS¿JPÀFP¾JWÊypªap´pÊr†¾d‰»tŠŒª^k‡8Xp,Sy4^…Gmv2^_JW>PAWLONh5g—{©•l­‰X˜šs§‡KŽ~KŒd"W^ TO FxCsg'bn2e†Xj,lh6l‚Nˆ„Pˆ–l¥™n©”k¢•|£d*NP>\H^JV<]AaBZ:`B`@]>_D^>X8N
+6D4S>¬foÉqk®FTË‚†Ö™„Ú•ŠÚ£•ڨݻ°àò׶§NM¢0B¿ANÊPYÔ\dÔbmÙguÜrzÜoxÚs|ßtzßx~ÛquÜlpÜppÝwwÚkrÖjqÚhkÑ_jÓcnÔjrÓkxÌjxÔx„Ø~†Ü‹Ú|‡ÚvØz‚Ôw}ÎYhÓVfÖ`j×bkÖ\dÖZcØVcÚ_nãx€êŠí²¬ôÏÅöÙËé¾´Þœ›ÚˆˆØ}€ÖoxÓdpÐ^jÏcpÈ\iÊ^i¾Qa¤>K¢4K¥>L©@J¢=I˜/?|8y8_ 6`8c<fBy-Ln(P_LžVrÕŽŒÖ¤£³m€€2V‡?\r$F^<d@\9[:\8q(Dd<Y<a>‹>[²jtÁrs½mn¸hpÇyu΋|ÑÒ‰zω|Í„yφz̓zÏ„zË€x̓wÌxȅʃyË„|Ì„xÌ€sÌ‚xÌ‚vÈ‚|É„vÊ‚{É‚|É‚vȃzÉ€uÇxÇxÀzvÂ~x¾yy»vu¼xy¸vw·ss¶tw¹ptµpy¾~}ÎŽ~Öš€Ü¦‚஄氄洂贅鵇泆贇賅氄粂®CQ©GTªDP¨CT¤@O¢>Ož9Iš5Iš2E–6F‘.C‹%A„:™.C¼FJÓ^WÜqfâ€rçwì–zí~îyï¢xñ¢|ô¢‚ñ¥€óžzñœ}ìnä~jÝm^ÏZS´AJž,D™,B 4H¦<H®=H²CJºFL¾HM¿HLÀGLÀFN¼DTÔ`U΀‡×w€Þ‘ÚÉpyªRm‚4^l3_j+X}8d„@dj'TRDRB^#YZJYN^Wn6h›}®šv°”hžŽhšS€d!Q\NMFh+[u6nf&aO…ŒS”{E}p4ir3k‹X’‹W…›{¬„R}|>tm-]h#RX<\<[<]>\:\;cB\?\;X<P6P <I :}8OÍ~r¾RW¿\bÛª›Ø”†Üž‹Û¬ Ü¶ªáıÛÀ¨¦TNž/G¸:JÅJVÏS[Ò[gØfoÛoyÞu|àz}ßz}Þwxáz}à{~ß|~ÞyzÞttÚnrÜrxÜouÜlpÚoxÛq{Üz~Ý}†à‚‹àˆà‰ŽÞ…݆މŽÖovÒ_iÖ_l×dnÙdmØ\gØWeØXdÛZhâr|èî°²óɼô×Ìëĺતޛœß‰ŠÞ€„ÙszÙlsÙhnÒ^gÏ[cÇV^ºHT¹FT¯HTµIO­BL 2@~8{ =_9b<f>hFs*Fn$N`PšRkÔŽŽÕ¡¤¶p„2T}2Rp$DeBg@`F[@_;o(A`;V;c@–I_ºpuÁqrºnp¼lmÉ~{щ~шxЊz·~Ë‚zχ~Їy˃|Ë€xÌ€z˃y̆zÌ‚w΀vËzÊyÇxÊ‚vÉ‚yÉxÌ‚xɃ|È„|È~zÇ{xÉ~uÇ{tÂyvÀxv¾zt½z|¾ttºrs¶rt²mx¸tz‚‚Ó—„Û¦„ᮈ䲅趈귃뵆궆洆泉貇賅籄鲇°DRªBP¦@N§<J =M8Iœ6F–4Fš2E’0CŒ)A„;w8%<µEKÒ`ZÜqdã~nçŠtê’vìšxí yð¢yð¢~ñ¤ñ¤‚ò¤„ñšyêŒpä~fÞmZÐZP²>Gœ.Dš/B¢6G¦8Fª<H´BL·DL¾HMÂJNÁGLÂGM¾ISÁNZÕkg׋ˆÏyƒºXo¯Xo–Iik'Tf"R}:iŠJxp/\^NNETKq8pdTn-fq2n‚I„„NvGxuCr‚Pk/^XDTBPDb Vp1fh&d^"YˆZ’a£ƒPŽ‹U“‚I…yDv…W„j0[M~`F`BZ@X>X;Y9[;Z<aB[BZCWDP>J @Z@¶opÉe^°FSÌ~†Þ¡Þ¥”ܫߵªâóÞÅ®­hX“&>°:GÀHRÈLVÐTYÔbl×blÚlsàv|àz|à|€Þ}ƒâ„ˆä…ˆå„‚ㄈâ…ßy„Þ|€ÞŠß|€Þƒˆá‡ˆàŠâŽã‘”ጎâ–ᆊàŽÝ„‡×ktÕ]kØckÛjrÜchÚXb×ZhÙYbÛYjáqx劌訪ñÉÂôØÌëĺ⮧⤡ߎ߄†Ü‚Ûx|ÚqwÕ`lÓ^fÏ^hÎVZÅPZ¼R^¾PY´JS¤8G„<|=c9h:f>p"Dv,Jn&N\M–NiÓ’×§¨´w„„7]~6Sr(Jb>f=Z<Y7c;f:Z9ZAl C¢We½tvÀqs¼nt¼qrÊ€zÏŠ~ÑŠ{φ~Έ{Љ{Í‚zÌ„xÌ‚zÍ‚xÊ‚yË„{ʃ{̓vÌ‚zʃxÌ„yÊyÉ€wÊ‚xÈ‚{ÉzÆyÄ‚}ÆxÆ|xÄ|tÄ{xÀxtÂyv¾wx¸yz·rv´nr³mpµqtƈ„Öœ‚᫊消鸈까빂븄鶄궂鵂貅貄è³è´†è³ƒªDT©=K¦9J£8Jž<N›6L™2F–-G–1F”/CŠ&<†;€:Ž$>®AKÑ_ZÝqdâ}hçŒvè•|뜀îŸ|ð¢ƒð¢|ò£€ò£~ð¤ð›wêŽsä}fàkZÎYP¯@Iž4Eœ*A¤2D¨9G²=G´@K¹HN½LT¼LP¼KP¿KT¾JX¾Vf¾]w¸^z¨Op®Ri¡Efy-bz?{ˆKQr1_\H_NUJ^MzBzq+f„J‰Ž[–a~@u\Wq5i‡R…f#TI ?ND^Tb[w@q|Aw\ \W V•r¥†²Œan,dt@zyAx—Œºn9_QBX@_B\>]AZ?Y:Z@\D^HZHUGNFPEŒEVÎzk²ETµXfÖœ•Þ Þ¬Ÿß²¢â¾¬àʰ´o‚!>§.Bº?LÆMSÍQ\ÓV^Ö^dÖfsÛiràuxâw{á~‚ä…懇厌懆勉甊䔑⌋䊈⌊㌌䖓◓ã”ã‘⌌ä’⊊áˆÞ||ØjrÙflÜopÛkpÙchÙ\dØ[aÚZfÚYgßov厎騪òÇ»óÖÌíÊÀä±§á¥ß˜—Þ‰ŠÜ€Ü|€ÚxyÖjsÖdlÔ^hÒY`ÊRbÃRa½L[´HU¦<M†:x>g:h>h>s#Dx&DkEXF’FdÑŽ—Þ¤ Á‚Ž„3Yv/Jp"Fb<g>X<Y9f:b:X8[:|+M­`h½qr½nl¹nsÄww͈~΋φ|Ј{ÒˆzÎ…{Í„z̃x΂vÌ‚zÈ{Ë‚{Æ€y̆xÈ„}˃yÉ‚xÇ~uÈxÌ‚yÈ€xÈ~xÄ~vÀ|Ã}yÇ}|Ä{wÀyvÁ{u¾zv»xt¶ss´pr°ovµrtƌؠ‚⫇赊꼌컈빇붃鴃êµê³‚沅贆贂粅괇¦<N¤;K¢:M 8Jœ5Lš4H–5I”.F”.F‘.EŠ"<„;:+C°?GÒ^XÜl`ã|kç‹tê’xìš|ïž~ðžyð |ó¥xò¢~ð¢‚ð˜vêŒpçeÝm\ÎZT±CK0D—-A 1A¦:H®<F²@L·DL¼HMºKT¾JR¼NZÀN_ÀVm¬P|¤L}¼n˜¾w  d¤žw´¨ˆ¸¡v¢„Jye$Ud S_QcXn*_ŠTŒv2jeœŒXŒŒSˆz7wt5r“g™{@pN>J@QFZKe&_ƒJ„•dšz@{l2nl4jwDvŽd Hb¡š¹Žfƒf$Rh&Mh!G`D^B^@]AX=X;ZDWFXHRDNGeD¾xsÃ_Z²JZË‹‘ÜŸŽà¨•ß´¢à¸ªßÁªÄš‡w=&B¬5E¿FRÈLSÑV]ÒW`ÔZbØhsÜgqßprâx~ä}}å慄摎懆æ‰è”Œè˜‘ä”äæŽŒå攎ç”㑎掓䔓â“⎎àƒÞyvÚltÛhoÜntÙfnÚ`bÙYdÖ\fÚY`ÙU^Þfr倅訨ðÁ»óÖËðÍÄæ±ªà¤šàž™ß܃†ÝzzÝtxÚknØdhØbhÒ]fÒW_ÈVcÆS[ºNU¨<L‰#=t<b=h<p Br>z-HoCW FŒ?`ÒŽ”Ü¥œÇˆŽˆ<]r&Fm A[6jB[A\:d=_<[<c@’?X·fk¾vt¾npºntÈ|xЊ„ÐvЊzφ|ψx΄x΃~Ë€vÌ€y˃{Ê„{É‚~ȃzʃzÊ‚zÊxÌ€xÇ€wȃyË~sÈ€{Æ~vÆ~xÃ|w¾|wÂxxÄyuÀyv¼uq¼us¶tr¶st±puµtvÆ‹|Ùž‚ã®‡éµ‰ì¼‰ì¾Šî¼Œí»‡ë¸†ê¶ê¶‚ê´è±„簀鲅鴂絆赌¢:I¢=L¡DQ:L™7J˜6L–5J’3F.C,C…#<:{>Ž(?­>JÐYVÙncâ€oèoì•|ëœzîwïžð ò£zñ¡xò ~ñ™uìŒoæ€iÜlZÊTP¯@Hž0C™,B¡3E¢;J¬;G³>F²>KºCT¿HX¼Qq¾\Œ¹n »x«°}´º‰ºÃ‰­»’¯žy˜Œ^~ƒVvz?ki)Yd!Rf%Tl-`e Ux;n„M„u6n‹_‘‹W€A{ŒX”’b„Fu[HL?R@O:ZJUN_T{Dt‡NŒ‰X’Œ\˜€MŒ~>x˜|¥œ°ˆ\‡n.Xš\}™Hc‹9Pƒ3TR<WB_BY;WAX@VDRCLBNF”RbÐ}p­EW¸XcØ¥£ÜŸ’ß´¢Ü´¨âĮҵ›t/B{:˜(B°:IÁKXÉO[ÐS\ÔYaÖ[b×alÜipßouãy~äxwåƒç„†çŽˆè‘Œç„è–ꙓ甋蜖Ꙕ旒甑å呑處晓æ—âŒà€€àxyÝruÛkqÞnuÚksÚcj×ZeÚ`iÚU]ÚV`ßbkä}‚袠ïÀ¼ôÕÌðÐÅé¸­à¡˜àš‘á˜ŽÞ‡†ÛxyÞptØjqÚfiÖen×_lÔXdÏXbÌU\ÂQR«DOˆ#BvAb?f@nBt!B~-Hh?U
+Bˆ:WÒŠÜž—È‹Ž’Fan!?e9V 2o#CV8_<h<_;\=f? Pa¼qwÂrsºnnÂnn̆|ÒŒ|Ћ}Јz͇{ˈ~̃|ΆyÍ„zÌ…€Î„Ê‚xÇ‚}̇zɃ}Ì‚zʆzǃ{Æ‚xÄxÀ|€|Ä|wÄ~|Ä}zÀ|yÀywÂwr¼vw½wsºrt¸rw´rv´rxƈÖœã­Šè¶‡í¼‹ð¾Œî½†ð¼‹î¹ˆë¸…ì·…ê´ê³‚鲄糅궈괅趉鸌ž7Lœ:MBU™7I•4H˜0H”0E”/F,D'?†$>}<w8Œ'>²HRÐ\XÚpbßjçŠoê”|ê›xîyðŸ|ð zò¢|ò¢€ò |ðš{ê‰næ~iÜkZÌYU°@Lš,Cœ*@ 1B 7Gª7H³?N°Dc¸Nt¿h’È„œÊzŽÅšºpŽÁpŽÈu´p‚|:V^!Sb'Zv=ph)X` Rh&Yi&Zh$Wm&Z~G€€B|x;sˆ^—z@zfŸ™nž„Ith*VWGYHT?P?XASEUKd*Yr3ot8l’_“˜kž•k•l6\NARJx2V´u€±huœPc„4UWFYE[>X?VBWDSBSFNF_ J¾}z½ba°P`Ç|†Ü¨“ட߷¨áÁ¬ÜǬ„GL]8<š.B¶<HÉMUÍOTÒT^ÕX^Ø[bÙdnÛilàptâwtâvxä~€ç„„挄蓊锎蕑ꞕç—è‘Žéœ•çš”è–æ›’痑阒皓敌äŠâ…ƒàxyÝsvÝv|ÞpuÜlrÜdoÜ^eÛ^fÜY^Û[dÜbnãw|皘ï¼ôÔÉðÎÆë»°á¡šà’Œá‹‰à‰†ß‚‚áutÜirÚbhÙ_n×ZiÕ\iÒ\hÌTZÄRY­?O‚DuBa>l Bt$Ds"C€0Hd>YG|-RЊۜšÌˆ¢\sm"AcCY8k>\;b<d<\8^;x)G¬[d¿vuÂqp¾mnÄvtц{Ð~Ћ~ψ}ΈyÊ„|̆y͈zÊ‚zÊ~È‚zÈ‚~Ê„zÇ€{È€{ɈȃyÊuÄ|xÆ~xÄ~xÁ}wÀ~xÆ~{À|xÄzuÁzxºvw»tt¹sv·pp´qvµptÂ{Öš~à¬‚èµˆê¹ˆî¼‹î¼„í»‰íº†í¹„ì¹†ì·ƒé¶ƒê´‡è³èµ„ë·Šê·Žé¸è»“™6K˜5N˜:P–6L“0G”,D•6N-F*C‰&?„'D{=x<‹'C­CMÍ\ZÙthß~lçrì•xëš}îœ{ïŸyðœzð zð¦€ð¢~ð–zêŒqä€iÜn`ËXQª>Hœ/C˜,Až0B¤6G«9L¶ET·atÂhtÇ\bÄQ^ÁXkµdˆ¬aаeLx†6hk$\m)a|:nv7hg#Ug'Yb Rf']j)b|?xu<ru7p‹Z—ŒX”–u«–l–’f‘aQXG]J\DZCWBYCVBUFYNl/Xx?v_Jh-H„CWT<D >[D‹<\¶ot·nu P_v*N\DTAY>Y@V@V@SBNDRIŽM`ÌzpµT]¸\dӟޢⴢ຤â̲¥zhO9g9‚<¢:J»DLÆQVÐUXÒV\ÔY`×^`Û_gÜfnàpxâorâyvä~~胂挈éŠè”Œê–꜔ꜘ隒階雔è˜çšç›êœ’薌玆懄ã€|â~|ßuxÞqsÞnsÞlrÜglÚ^aØ^dÙ[dØXbÛ`iãwæ™™ïÀ¶ôÒÊïÔÊëÀ´ä¢™Þ“ŠÞŒ‡ß‡„á‚‚ßwyÛkrÚepÚ`kÙZhÖZjÒZdÍX[ÄRW¬@M|?v@^Ao?nCu'Fƒ3Nd?VDy*NΆŒÔ”™Ì„„´sl=`8Z 4`6^7e:b<Y7[=…5Q¶djÄsrÂsqÀnpÈzvЊÒŽЉ~Љ}ψ}͇}̇~ψzÌ„zȃ|Ë„ˆ~ÉzÈ‚|Ë‚zÊ‚xÈzÉvÆ€|ÄxÃ}xÃ}wÂwÀ|{ÀzyÀ}z¾||¾xt·tv¶tx®nt²qu½xvÔ”âª€ç³…ë¸†í¹ƒíº‡îº„ì»…î¼…ì¹ì»ˆì¸ê¸êµ€ç¶‡è·ëºê½–êÀ–êÁœ–4J”3I’0H’0H0FŽ)C”.E‘/G‹*F†(D‚"=x;t?‡ @¯@HÎ^XÜndâ~mæŽpê–yîœ|î›zîž}ðŸ{ð¥~î }ðž|ï˜}êŒræzdßl_ËTS°DIž/D–*@š6G¥5H«=J±GV´MY¹HU¼MV¾N\ÃVeÂd|©Po•Fh>mˆH|‚>uƒAt~?rr1]s)ZiN_Vp-g‚H…WŒ_ \r6n—k¦™v¢‘bk)Xi1]n1f\I_HZ@Y@XDXCXB[HXIWEr.Rv:ncCˆ6IX<H <T<–E\´ep¼qwžRf”Lf^AUAT=T>R;XCN=J @j&JÀ}|¾ef¹TdÉ€ˆâ²¥á°Ÿß°œà¿§Â¡ŒX<Z<p<„!@¦8IÀGRÉRXÐR[ÒW^ÔV]ÕWZÚ_eÝikÞksãtsâyzãzyä~}凂针閎阉윓ꛓꞙꡚ蜒蚒界蚋ê•甈率åˆâ„}àywàtvàtvßttÞpqÜjnÚbgÙ`dÚVaØ^bÜ`gãwzçí¾¶ôÖÎñØÎëø⠚ޒ‹Þˆˆß††ß„€ßxyÜqvÚkqÜ`lÚ^m×ZfÖ\aÑY[ÆMR¦8Iu?tA[=p?hBv$@Š9Nc@Y At IȈ֔˜Àox¼…Œp?\:W 7^:n"Be<`8]<d:–EV¾mmÈynÅtpÀpqÍ„{ÑŽ~Ó~ÍŠxψxÌ„ẇzˈ̈zÊ„zË€xÊ‚~É‚zÉ€zÉ€zÊyËzÇ~yÅ|€|Ç€xÈ|xÄ}xÅ|yÀ~¾||Àzv¼z|ºuv¶rq³puµlr¹rtÌ€Þ¤€è±ƒë¸†î¸ƒí¸~ì¹„í¸„ì¹‚íº„ìºƒì·„ë¹†ê¹†è·ŠêºŠê»ì¿–ëÀ–ìœê ”2J0I1J‘.HŽ.FŽ,D*D‰*Bˆ+D†)B~#@x Aw@ˆ#@²?HÍ\XÙmdã{kæpè’vêš}îœyì ~î |ïŸ~ðŸ|ò€ï—}éŠpæxfßl^ËZU¯@Jš0F”.Dž4F¢8K¨@P¬@N°LW·KV¹LU¿N]ÆS]Ä_nž=`Š<jx4my6o…I‚ˆMˆy0`“Lz}1bdVr-h‡N„†^Žc*\\^zCš~±†Q†‚VŠd*cl8go2\`JV?Z@[A^J^FZE]FZFX@d&HZD_@ˆ8Mb:F9T?“CY±bq¸eu´€‚R^\BQ7P<R;O 6U>K ;K 8˜M]Étj·P]º\fÚ°®â°˜â¶§Ý´ ÖÀ§r9FM>Z@l>Š'B©<J¿KRÊNVÏRYÔXbØ[`×\cÙ`iÜ_cÞfkáqpârrârrä{yå…ƒé†è…ꔎì˜éœ–윔ꡒ顕è–瓊疌蓊蔅戀äƒ|â}zàxvßqràoqßtsÜlmÛfjÚ`cÖY]ØY`Ù_gÛblàpwå’’í¸®óÐËñÖÊìÈ½à¢˜ÞŠàŒ‹ß‰‡ß~ÞyxÛrtÜkrÝ`pÚ`qÖ[dÔ]bÏZ\ÀJPž2Fx@n@]<oAdBz#D‹9N`:X <sFÆ€ŠÖ“—ÂqzÊŒ†z&HZ8^=\:r&Dc:`<^6o>£PZÀnjÅspÂqnÇxvЈ{Ò€ÓŒ{Έzχu˃y·{Ì…|Ì…yˆ~Í‚zÊ€xÈvÌxÈ‚xÆzÄ~|Æ~zÈwÁ}yÇ€vÆ~uÂ}zÄzvÅ~w¾{{¼x{¼xx¼ut·rq±mp²hnÅ€yØœ{æ¯츄ðºƒí·„í·‚î¶„ì¹€íº‚ì¹„ì¹†í¹‰êºŠë»‹ì¿”ëÀ’íÅœìÄžéÅ ëÄ¢’2J2JŒ,F‹*CŠ(B‹*Eˆ'D†(C„'E„$@}>w?u:ˆ%C¯:DÏ\WÙojà~låˆxë”{ê–vì›{êŸ{ë›xïœ|îœ|ïšxð•zê‰oç~hàk\Í\X²AJš,A”.D›2DŸ8L¤9J¬ET²FRºFP¾GNÂMSÄMV¿P`Ž*Z‚=|™dœ˜n¤‘av/cŒ<l¡Rz4iz4jX„P}^(UVP][j.g‰j™a¢€Lzx?xc'Pe'U]HUDZ@dJ\E]FZ@ZFZEZEZ F^IX@y,Bj=J
+;t FœDY¬^pžJlȉÀwv‚7TL6N<L ;L
+5N :K9fBÀxj»^^·TbÆw}ß´¥à³¤â¼¬ÜȲžpmODSCdCo?ˆ#?ª:H¾GPÌTWÍRXÑSZØ\dÚ`fÚ^dÜafßioãnrânpâtuäzxå{vè€x艂ꌃ쓊êšêœ’êš“é¡é˜é•Žè—Šç’ˆæ‚åˆ|æ„uá{wßmpÝmpÜhlÞppÛhkÛfeØ]cÔUTÔ\aÙ\cÚdmÞt{䉌쭨óÎÆñ×ÍîËÀᣚޑ†àˆÞ„‚Þ‚Ýz~ÚrwÛmqÚdmØ^jØZgÓZbÌV]½LT‘+EzBa=a=i;d>{"B‰8Nb8Z
+=jEÄ‚†ÙœšÄv}Ì…Š?ZT:X
+6Z:t*Dd>]>b@€*F²adÅtkÆtnÂppʈ„Õ™ŒÓ“‚ÓŒyωzΈ|χy·zˉ|̃xÊ„}ËxÌ‚xÇ‚yÇ€wÅ~zÉ}|Å|zÃ{Ä€}Á|vÄ}wÂ~vÀyuÁ|xÀyw¼zw¼xuºtw¶op¶np´ko»tuÔ‘{⪂괂ﺂì¹î·‚ìº€ì¹ƒìºƒì¹‡í¼‰ëÀìÁ‘íÄ™ìÅœîÆœìÆ›êÆžêÅ¢Š-G‹,H‡.L‡,G†/Hˆ.H‡.I†(G~%D€&Cx>t:q:Š">¯:FÍ[[Ùroà|lå†té‘xê”|ì›xìšyë˜xí›|îœzíœxí•ué‹wç|dàlZÎZS¬?M˜*B’,I›3J¢>T©Hi¨?N°CN´FO¹JQÁJSÁK`°M{’J¥‡¶ŸyšX|w1f;ošU…ˆGzs-_]“‚Nzc%Y^&\e&^d&bXWqBt’v¨|Cv†UŽ\Pf'RgO]J`Dj$MYBXAY@V=ZBWC\#F€PvX;^5o =R>\=—CZ¢Zvš>a®PQÖ—Šºxyx2QO>H 8J9N 7Y=DPËre¶JR¾ZaÖ¤¤à±£å¾®äʶƫ”XBJ :UBlCr:‰#D¤8I»FRËNUÐUXÔX^×Y^Ú]cÜ_aÞgkághânpâtvåtwåzxäzv䀀戂猄êê—閎Ꙏ뚌ꗒèˆê’†êƒçŒ„æ‡{ä|âxtàsqÝknÚgnÝhjÜijÙbdÙZYÔVa×VYØ\cÚ`làruよêœñÈÁòØÎðÍÀá¥ ÞƒàˆƒÞ†ƒÛ||ÞuxÜzÚmtØfpØ_lÕ^iÒZ]ÈV\·DNŠ&Bp>X=e@i<iCx)Bˆ8L]8V
+?a>»x€Øš˜Æw‚Ë„‚ Vl^ER 9V:n$@eD[@h>“8N¼ihÂsjÃqnÀopÍ„~Õš†Ô•‚ÒˆxÒˆ{Їz͇y͆}Ì„y̆xÍ‚xˆ}Ê€tÉyÉ€|Ä~{Å~zÆxÄ{Ä}zÃ|wÁ{tÁyvÂ|xÅ{xÂ|x½{y¼xx¸sp¶pt´mp´oṗxÞ¢é°†í¸ƒï»ƒïº‚ï¶î¸îµ}íµì¸…í¸ƒìºˆì¼ìÀ‘ëÄ—ìÆ›ìÇžìÆîÆžíÅžìÄžêß-Hˆ/J†-L†,J‡*H‡.L†1O„'G~(H|'Hv Bo>l=„ =­>KÍYVÚpjá~mç†oé”têš|ìšyê˜{é—zì›|ìœxðxïnê…næxdàm]ÐYP«?L–,B'C–.L¦Jb¶iЬ@S®>N·GT¸F\ºTyÀq µ‡¨¦w’ƒLfn*XdUƒ>s¨qššT†s2dg˜€JqZOZQzH…l,fq0p_ a‚Yššz°‰P‡”qh(Z`!Nd"Pb"RcGl(QXBWDT<T>YET >^"Hr5LhCkAn<`@vKHc­r‚¸`dŽ)G°QUÒŒv¼|}`?J;P:R =^;¶ji½a_´LXÃnt⿵㾳æÄ²àʳCJI 9M <^>{%Ds='B¤5F¹AOÆJSÌPVÔX^Ø[`Ù[`Ý_`Übdàjnágoârtäxzåxxä~~æ}{悀狄쎅ꘊë”떎ꕌ閌鎉钉錆èŠå†zä|táxtßkiÚbhÙcgØbeØhjØ`dÕX^ÕX[ÕT[ØY^ÛckàjsညéœñļóÖÊñÒÄ㪧݇‚à†‚Ý„‚Ü~ÝsxÜszÛhrÛfmÖ`hÕ^hÑ\dÈTZ®@N‚ AdBX@h@jDnA|*DŠ:NY6T:a <´lvÖ’‘Èt}Ã~«^nj"FP :\<k>`BXBj< P`ÂohÆtlÃrnÅxpÐ~ÔzÓ{ÒŠwЉxÏŠuÏ…{Ì…zʈzÌ„xÊ‚}É{Ê€vÊ€wÈ€wÄ€|Ä}tÆ}vÃ|xÅ~zÃ{vÁ{uÀ|ÀzvÀxwÀ|w¼wyºuu¹tu´no°opÀ{sÖ–u䬂췂ﺀï¹}ï¸î¶}í¸î¶î·í¸‚캃뽎íÀ”îÄ–íÇ›îÊ¥íÊ£íÈ îÆìÚêÄžë Ž5Jˆ-Jˆ.Jˆ.Lˆ0L„0L,J€+J|'G{$Ev@l;l8„&D°CKÌZXØnhß|läˆrèŽyê–ì˜yê˜xé–zìŸîž|ðžwïqêˆnåyfÞk]ÎVV®@M–(B‘*G“'H«Pb°h‹ =X¬:Z¶Qx¿u£Å§»y¹l€Œ:bTBaUw2m¢e•º ŽEvˆ\yBj]SXVZ$]…^˜l<qi,th4n˜†»”qŸ‘\™„Z€€E|_JeOf.Vc Lj"NX@Q;O<O<SBX?r$C9Qˆ.Cˆ1Kf=€.Q4]œW~Í͆–7Rw"F½`\̃j{(LF4J
+9H<~8NÆxo¶U^´VaÔššæÀ¯æÅ³âȱ¯ƒnO:N :N
+;c@‚*Fs;‹<¦2E·@LÂJRËNVÒT\×]`×[`Ú`dÛ`dàcfàlnânoässäxxã~~å|惂ç‚}éˆçŽŠê”‹ê‹„è’‰ê”Šé‘‡è’‰é‚舀æ‡|â€yàxrÝhfÙabØbhØbfÙ`cØ^`Ö\`ÔRVÕRXÕX\Ù\fÜcoà‚‰éœ ð·óÓÉòÖÉè·´Þ‹ƒßŒ‰ß€}à{~ársÜpyÙjrØbjÖ`hÖ_hÒX\ÆSU¤5JxBZ=VAfDt$DmA€)A;R^ 8T >_Bª_lÑ“’Às~Æ„‡°gxn(HK
+7b>e@^=X>w*J­YaÄujÇrkÄpoÈ|uÑzÔŽzÔŽxÕŽ}ÒˆvÒŠyψz̈wΆz̆|΄{Ë„zÈ‚|Ç€}Æ‚|Æ~yÆxÄ~xÀ~|Ä}xÃzu¿zwÂ}wÀxu¿vu¾zw»uw»vx´su¶knºpr΋uà§{è¶íº‚ð»€îºð·|ï¶zî·~î·|ì·z뼃î¾íÂîÆ–îÇœïË îÌ¡îË¢ìÉ ìÆ˜ë–éšêÂ0JŒ1L‰/K†0N†5P…/N„1P‚(Hz"Fy#FsBl<h:€$B¬9FÌZZØofàziæ‡tê’yë–|ê˜zê–yë–yì™vîœ|ïœsî‘pèˆoçyeÜn^ÍXS­9H•)?‘*C”&F©>QÀwŠ«P³TwƆ¢ÅrŠ®Sx®d±f{&XbTz,e a¤l–ŠBjŒS‚z>ie#Z`"`\ Zt@€{H„ežq6u|T’‘qª‡Y•o¬u>l˜r¦d#Pd$Rd)P`Ff NV=P 8M8H 7V>jH=N«Zg:K‚&@b@7[’Hw¸qƒâ£˜ß¥‘®WdYD’7F¬jpkBC4@4S:²ii¾ib²Xd¾dhݯ¨åÁ°ãƮģŽZ4K
+6P 8N
+6i!A†0Hx:Œ!;¡1D¹DNÇFNÌOYÒX_ÖZbØ]`Û[cÜ]`àdjàdjàlqârxæwzå|~æ{vç~憃æ}熀掇æˆè„ꎂèˆèŽ„èˆè†å‚wä{tßvvÜihÚdd×^dØ_dÙadÔ[cÕY]ÓTZÒSZÔVZ×X`Üjmá~„蘒ﴮòÑÆôÛÎìźۂ{ß„Ý|ÝzzÞttÚpvÚmtØbo×]jÖ^fÐZ\¾HR›/Iq@T@Y@`Av"Cs'Gƒ-G<L]6P :^@£Ndё‚ŽÈ…ˆ´pwm%HH3`:]=[<Z:Š5P¸fhÇvoÈqlÂpnÍ„{ÔŒ{ÒŒxÒŽxшwчvÎ…xÏ„vÏ…x·yΆxÊ‚xÈ„zʃ{È‚xÆ‚zÄzÄ€zÁ€}ÄzÄ~x¿}yÁ|vÂ|v¿yv½vs»vt¹ww¸qr±pq¶lpÇ|tÚ˜|æ°~ì¸‚í¼ƒð¹‚î¸|í¶î¶}í¶„í¸íº…쿎îÂ—îÆ˜îÈ™ðÊ îË¢îË îÌ¢ìÇŸíÆ¡êÜêÃìàŽ1J‹1JŠ2J„-H„.L‚+K‚+H€(Fz&Dz @s=l;c7<ª<HÌ]ZØlbà~læ‰ré“ê”|ì—}ë˜|ë–wí˜vîœzî›xï”sêŠnèzfÝl\ÍWO¬<G’+@’(B’'Bš1L¹R_Æx„ºk‹´Z€§Lt§Rz¶_}°Zw‰/ep \Aj¨hŠOp9k€Kzv<l|T`!Yn4sIˆv@vŠhžj(eƒaœ‡^š‰\’žˆº{<q‘ršf#PYCZDb!Kc HX>S 8T>O:n FfP|*E©QY >PwB2`Fi¬^r³FU؞᪚½nr^<s:…8JO5D2D6v*CÄugºTXºXcË„äÁ´æÆ°Õ¾©p1BJ:R 9O
+9P =j<’9Rz:$> .B¶?KÈHLÊNVÔ\`ÖT\×Z_Ú\bÛbdÞdißdmàjmärväpqãzzävwå}ç…|æ€}æ|èƒ~æ†|挄è‰çŒ„åˆ€èŠæŠå‚|âyvßpoÚhf×b`Ö^cÖ]dÖ]`Õ[bÔV\ÐSYÒRXÔTZÖXcÛflày~æŒìª¨óÒÉöÞÒðÍÀÛ‰†Þ‡„Ú~|Ü{zÞsrÜrwÜhpØen×_kÔ\aËTW³FP-I^AV@`G_<v#Ev$Dƒ.E‡6K` 3R5]@™DZÑŠ‹Ì‹’É~€¶v{s+KI 6b@T:[?c@˜EV¿nlÈtmÈpjÄtpÑ„yÕŽzÒŒxÒwÒŠ{цtÐ{pφt·vΆtÌ„yË…{Ê‚wÌ„yÉ…{ÉzÄ~xÆ€xÄ‚}ÀxÆ|xÃ~zÂ|vÂyv½wuºvu¾zx½tt¶st²nrºrrÒŠxá§~ê´}ðºƒîºð¸‚î·î¸~칆켈îÓïÈ™ïÉìÊîÊœîËžîÉ¢îË ìÈ¡ìÈ¡íÆžëÄŸíÆ¥’0J2L4Jˆ,Lƒ0N‚(H„+H,J|'Dt=m;i9c9~7«9EËXZÙlaâ}måˆxç‘zé”yí™}íšxíœ}îšyîœyñšwï•sé‰lè~bßm[ÐTK­;H–*@“(@“+B˜1F¢?N®E\µPj®Y‡¥c“§c–¶g޹d„‡6fdR|9p’T…>qF€>x~F„c!Yt:xzB‚r3pŠh¢O‚p4t~Z—zC€fš¢·x4prJrg*\N<RAV@`E]?X>U?TA`El4d€>b6R‘8S†*Q²w~¶a`µSX+GŒ9B­h_’JW^:W4dDN 5K9R 8 S[Æl^ºWb¿clÞµ®èÆ®ÞĤ][K 7P<S9S
+:R 9n(F–AT€;Œ(@§2C³<IÂGLÌPUÐSZÔZ`Ú]`Ü\aÜ_dÞahàeiàjnälqãqvâutäwwäwuäzwæ~yå~{å|{åƒæˆçŠæ…å…€è†~ä€{â|uàrrÜmnØddÒZ\ÑY]ÔVZ×^bØdjÖU^ÖT\ÓV[ÑNV×ZbÚclàx}抌좞òÐÆöàÔñϿޒŠÝ‚~ÚÞy|ÞxyÚqyÙgoÖfnÔ^gÓ]cÃUZ§>L~AT<SB_>cCw(Gx'D„+Aƒ/Dd:Q4Y ;“9T̆‡ÍÈ||¼xx€8UV;\>R9Z<v&I¢S\ÃtoÉtlÆqmÊvlÒŠ~ÒzÒŠuЋxЇz͈x΂sÎ…t˃t΄vÌ‚ẋ|Ljzʆ€Ç‚wÇ€xÇ‚vÄxÆ‚xÂzÆ|tÂ~y¾}xÂ}x¾{v¼usºzvºss¸mo´mrÅ~vÛŸ|氄ñ¼ïºð¸|ï¶î¶~ï¸ƒì¸„í¾ŒíÃîÆœðÊžîÌžîÊîÈ›îÉžíÈœìÈ¢ìÉ¡íɤìÉ¦íÆ£íÇ¡’2OŽ2K‹0IŒ0JŠ.J‡0J‡0I‚.K| Bw@r<g:` 5y8¨8DÊXSÙj\à{måˆxêzê•í˜zí›|í™î›zðŸyò~ð•wëkç~bânZÐVP°@K”,D&B’,Eœ2G¤;T©Hj¶\|´Tt§Ot¸`}·f¦Xr'Yt*^‚@q}4j|:nŽWŽ‚Ho0et:u|D}n.rn2iq¢c%[e'_~X˜p3s‹g𣑷€>~d0T‚P†N?SA[FU>^FdF\ET?Y =e"F7as&H…/TŸG\Æwt¾HOž3Fy H‹Oi†C_aEP5W;]@F2Z<ŒBU»d_¶QV»ZaÍ‚„äÀ¨Òš~¼†{Y9J5K 5V 8U8T 9s*EšJ^:”%<£/B²;HÀENÊOSÌRZÔW]×[\Ú]`Û`bÜdfÝ^fàekâlqäqvãsråxyãxxâxväxzã{z倿‚|僂艂æ„~戅èƒ}â€zàxtÞssÛpn×`bÐRYÍNVØY`Ù`fÜdfÙ]`ØX_ÔW\ÓTZÖ^cÛdlàrwçŠêž™òËÄöáÖòÓ¾Þ—’ÜvrÜvuÞxxÞzzÛnpÖhnÖbjÒ[cÎZ^»T\›:Nd>Q>UB`A]@v"C}*Eƒ.G~(>j<U4\ <…-NÁ|†Î•Æ|€Å€˜Qf\<T:L:U9…2M³_bËxmÌwkÆsmÊ{vÒ|ÒzÑŠ|ψzΆx̆{ΆxÌ‚tË„tÌ‚yÌ~tÊtÊ‚{É‚{Ê‚uÉyÆ‚yÂ~ÆxÅvÅxÈ}rÂ}xÁzvÂxw¿vs½us¸pq¸mq·suÐxâ©ë·‚ñ¼„ñº‚ð¹~ðº€î¸|ðÃŽðÆ—ðË ðÊïÊïÊ™îÆ˜íÆ˜ìÈœìÈ¢ìÉ¢ìÊ¢ìÉ¢íÉ¥ìȨ“4I1JŽ2K.L‰0HŠ/Jˆ,E‚&B~=w;t:f6`
+2{7¨6BÊWTÙi]à}oèŠpê‘xë–yîš~ì˜~îšzîšyð~ñ |ò•víŒtæ~fßl[ÐWR¯AJ*AŒ"?–-L¥@^²\„¾d†¶Tm³G^ªMmµRq¹Ri™<ht%]-b†Asv,fƒI}–fšz7js6nt6ln-nc"a„]‘†XˆURh-nŽl¤q1s…_™žˆ¬ŽUŽ^ NŒf•WDQAZLb!Sl/X_CT9J 7^Bn&I~7a—O_™Slœ<TÆnYÈ„s¯dlBX‡=Po'FV:N 7X>K
+;Q>œh¼u|·fm¦P\ŸPfÀ|wʈ|À‹Œ°xn%JS 8O 6Z8W6W@u*BK]%<&=ž0C­:FÄKQÊORÑTZÔY]ÖYZÙX\Ú_bÞ`eággáhnánqämnáoqåxsãxväuuåzvã{zæ}~å~xä‚~ç|æ„~æ„~惀â|{àvsÞmlØmlÑSYÈFRÐQYÚ_eÜgiÜelÝgmÜhnØ`g×X]ÖY_ÙblÞlp芊ôÈ»ùáÓóÓ¿ß•ØtsÜuqÞvwÞtw×lr×djÔ^fÑ]cÈW^³N]…&CY@V@Z?aB\A{%B.H€+E€+Bf;T3[ ;r>»v~Í“•Ê…‡Ì„}¤\mg=R;K 9X<•GZ¼ikÏ{qËzpÊtlÏ€tÒzщw͈zÎ…xÍ…{Í…tÌ‚vÊvÈx̆wÊ€sÉũwÊ‚wÈ‚{ȇ|Æ‚xÄ‚~Ç€vÇwÄzÁ~vÀ…~Ã|v¾wv½vr¼rq³ll¶npÄzwÙ˜z鲂ò¾‚ðº€ð¸ð¸|ð¹€î¸‚(ïÀŽðÄ•ðÈîÊšîÊ îÈ™îÈ–îÅ”ìÆ›ìÇžìÈ êÈ¥ì̧ëʨêÈ¥ìȨ”6N’9O’2L1H‹0H‹.F‡,F†(@‚%@};v;f8` 4|9¦<HÍXVÚjaázmæ‹vê’|ë–~ì˜|ë›}횀ï{îž|òzñ–tìŒpædàmZÐVR²<J“,K”-V¦Fq»f‹Áe{´KZ°EZ¶IV®Pq¸Sr¹Pk‹8hz,`i'T†Ay‚;y”b“ŽZ‰z2hx;nh,^o3pZ Ut¥t;nPQe*bŽp£x8€vH{ ®“b–\Q|Y€j'^[Ll+[g%PX<^<l$IN
+4U=iH…6Nªj_ KMf=q%K’@T¥Sin"DT:P 3K
+2R 4T 9D:‚LfΔ‹Ø‡Ï‰Ä†‚·w„¸~ˆÌ§šÒœ…žKP[
+4O5R9\7T 7U <u.H Na’0CŒ!< 2E¶>HÃJSÌPWÑX^ÕSZÖ]bÚY^Ü\bÞ`càacàhlâlnâmpàmpãppäwvãwtäxuä€|ä~{ä€zä€yæzå…ã‚å‚~â~|ÞvoÞrwØpoÐV]ÉDMÔR[ÜchÞjkÞejßjjÞkjÞjmØZ\ÔQXØWXßlo熀ó¿°÷Ô»òζۇÚtqÜwuÛuxÝnrØipØdiÓ\eÑY`ÆX]¤BPfDR<T>X<_?a?u$A{)G~+D~&Be:Z 6^=f?´mxÒ”“Í…ŠÇ}~¨bln$CS:H 8Z<ŸPbÄrkÓyhÏyhÇsoÒ‡zÒŒxÎŒ}Έ}ψzψ|ΆwÌ‚vË‚yÊ‚uÈ‚wÊvÊ‚sÇxÈ€xÅ~yÆuă€Å~zÇ€vÆ‚xÄ~vÃ|tÂ|uÀ|xÀzyºss»sq´kq¼otΈxߢ|ì¶ð»„ò»~ð»‚ð¹zð¸…﹂jïÑðÈ—ðÉžïÈšîÉ›íÈšîÆšìÆ™ëÈšìÇíÊ¢ëÊ¥ëɧìË¥êÊ¦ëÆ§–=N’4K1H7L‹0F‹3H‡*B‡(A‚$?€ ;z ?k7e 6†"@©?JÍZVØj_àzlæŠré‘zê–zë–zꘀë–{ï™zîž|òŸyð—xìyämÞj[ÏZY´FX¢?a§Ol«Pj®HW¦?R©>P°AS¶DX°Tu¼Xt­Pv†6gq&Zf&[ŠC|‹N†Ž\ŒŒM}8ll-cXNh(f~T‘Œen.eXXUTˆb˜€C‡p3qœ¤¢°`ZvPsŽb‘SD`EZBY@g=‚4PN
+9L<WAl-TcFX8R<^FCf—Rh0LV8N8N5X:G8D3®„ŠÐ”’ØšŒ×•ڜघߨ“֠Ц[al:P
+6P
+4T6_5Y=X
+:x.JœL\’.D’#:¨4C¸<DÆKRÊOVÓWVÖY^ØZ_ÚW_Ü[dÞ^hàbfáklâflânrâkpärtävvãuwäuvâtwäz{ä~xã{}ä|æ~å‚|ä€zá~{ßvrÝrtÚmlÑT_ËFP×W`ÞccÝa^Ù^WÔVN×abÚ`]×\bÓP[ØOTÜagävsì˜ï¸¨ô®콬րvÚuuÜuuÜruÜprØmtÓirÒ_aÊ[[»QX‚&FX?V@RAXE`Ff@v@‚/Hv"?&Ab5b;\:Z >¦_pГ”Î’“Ãxz¨dpt$FU<H 9gA¨W`ÊzpÐ|fÍunÈztÓ‹~Ñ|΋|͈}̆vÍ„z̆xË„xÊ€vʆuÌ„zȃzÈ‚xdžyÆ„zÄ„~Æ€yÂvÈ„xÈwÆ€wÃ~wÁ~xÂ|w¾wx¼wv¾tp¸pr´mr¿tqÖ•€äª„î·|ð¼‡ñºð¸~ð¶~ð¶ðº‚ð¾ˆïÃïÄ”ðÆ˜ïÆ™íÈœîÇ îÆœìÉŸìÈŸëÈŸìȤëȤë̪ë˧ë˦êɨëȧ’2J’5L’/G‘0J/HŽ,CŽ-Eˆ*B„"?~!?{?t<n8Ž&A°ENÏ\]Úoià{læŠuéxê’|ë˜{ê˜~ì˜}îš~ðž~óœ~ò˜xìŽpç|dÞnbÐ\_´Jbž<\*L“,Hš3K¤9J«=P°DU¶DV´Mj¸UxŸPwˆ5a„*`v,b‡D}„D|›l”˜V†}6hi(`J
+Ej6l Š¶†Hj)]l/qVVW…N~>Š—v ² Âv3uh5]ž~§ZGXGZDu-Tj"@€4NN 4M;N<Z@g#Od![^M h‚¨w–c;P
+6Q8R<V9M 7E<\DȤ–Ö¡˜Ý¯Û¦›Þ®¤Ú¬œËž”UVb9S4Q1T
+4X5b=Z 8[ ;t+EœK]”5Nœ)>®8D´=HÅJLÊPQÓSTÖRT×WYÚZ\ÛZXÝ^aàfhàdhâfjâlpâlnæwsäqråvxäxuâ{xãyuäz|ä}zä€~ä€{ä‚zä€{â‚€àysàsrÛljÓblÐPYÜ^^ßbXÏWQˆ6t6š%9½>BÄCEÐRRÖTW×VZÞbdèŠ‡î¬¡ï»­à˜‰Ú}zÛvxÜuvÜttÜnrÙhoÕ^hÒ\`ÆZ^¦APe?T?WBT@ZGcFi>}%C…0Dt>(De8f8\ :X
+<œVpÓŒŒÎ’”Ânqµnvz0MS>VB|,J¸dgÌ~oÒ|nÌyjÊvÓŒ|Ò‹|Ήz͇|̇|̈{Ì„yÈwÈ‚xÈ‚{Ì€vÌ€vÉ‚xÈ€zÈ‚xÆ}wÄ{zÃ~z‚|Ç~tÂ|tÂzrÃ{sÂ}t»yx¾yx¼rpµpt²kpÈ}tÜ™~é¯ñ·ñ¼ò»~ð¹~ﻀñ¹ð½…ðÂðÆ•ïǘðÇ—îÅ™ìÆžîÇœìÈžíË ìÊ¡ìʤì̤ì̧ê˧ëÍ©ë˦ìÉ¥êÈ©“0J’0I‘/G-G.F‘.G’1GŠ)B†#>€"?€$>y:{;—/DºNVÒbaÝtlà~tæˆvévé–€ê˜|ì˜zíš~î™yï|ò}ï”{ìqç~hÞn`Ð]d¶FT—.G%A•.E5E¢:J«>M´BM¸DS¶Md§Qy˜?h…1\•6gz0d„Bx†H{±p‰”Iwv1hVHLM\’•m”l&`j2f|@‚Z'\vH}„Q~B’j ªš¶ŒOŒ`%S›„¬\KTDW?y5Nt$Du(Da?L
+7J 8L <T@n-^z1j¬wŒ®dldDV<YB^@R9E
+;F9ˆRi˄ז‹à¯˜ß®Ÿá·©Ú¶¨¨hnV2R
+4T4S 5V7]:c:_;Z
+9w*H¢Wdš9P¤+=°=JºAHÅFLÎMNÎSVÖRUØVXÛZ]Ü[`Ü\`àbdàdhájnápsãopåpoâxvävtåtvâuyäwwä~{ä€|æ€|ä{å€yä€}ã‚€àytâvqÞspÛflÓQZÖWZÜ\ZÉQV¨7P™.K°BT¶CR°7J¯6DÈHKÑRWÛ\\ávt袘謟àŠÞ‰ƒÜuwÜsvÚpuÙknÖinÒcjÒ`bÃVZ…(H\BVCYCTC`C`@mA‚(B‚(Du'A‚)Af8f7d:T;@ZІϔ’µgnºz€€2NQ:o F“;PÂniÐ}oÔykËvgÎvÔ‰|Њ{ΈzË…yÉ„wÊ„|É‚yË‚vÈ‚zÆ€zÊ€xÈ€xÉ„|Ç}È€vÆ€zÈwÃ~vÂ}xÆ{sÃ{tÆ}vÄsÀ{wÁxt¾xw¸ru¸pr¸psÑ„wá¢}í³|ñº‚ð¼ò»}ñ»ïº„ñ¼†ð¿ŽïÆ”ñǘïÇœïÉ›îÇìÈ¢îȤíʤìË¢íͧí̦í̦į̀ìͨíͪìˤëÈ£ìÉ¥/F‘5K’.F“-F’,D“.D‘*BŒ'A†&A†(B„&A‚$>‡#@¡7IÀSZÐhhÜyoá€rç‹uè‘zê•ë˜|ê”zì™ïœ}ðž€òñ˜{ìŽuèkák[Ò[Wµ@Hš.F’&B—2Iž9K¦<M¯BL³DPºJZ¸Mn°X~Ÿ>fŽ=fŠ3c{6g€>pw1g…Cnv1bj'`M Jq@{™w „P„i$`uF||G\ZxJ}‹`™yG|aœ›~¡Ÿv­l,dœ†¢j*YF
+8K 8^>aAYg®e`f>LAH<J>O@†T‘b€jFR :f Hf@j Ha Fl%F‚6Z©^d¿qtЄzØŽ€ß™„ߦŒ½yr)BP
+8W :Z<Y=]<_5c:[:\B|.J£Zh¡:Oœ*=´?JÂCHÌNQÎKPÔTY×UYÙTXÙZ\Û[`Ü]_àbhàbjâiiâmpâpráooãtsâuráooäxxãw{äzxä{xåzwæ€|äƒ|ãyäƒ~á}váusÞvtÝpsÔ]gÓRZÙUZØYZÚ[\Ø[^Ø^dÛ\[ÐPWÃKWÃFQÌR`ÖYhÜ|ˆæ©¢äŸ‘â„Þ„}Û{|ÛtyØnrÙipÓfpÔ^fË`d®FUiBZBYDWEQBbEbBo B…,Dƒ.Hv&B‚,Be5i8h<S=|-Nʈ‰Ò”‘¹px¾~~ˆ@WT?n#E§OZÊwmÑ|iÑ|iÊ{tÏ„zÒŒ|΋|Ίy͇v͆wʃwÌ‚wÍ‚xÌ‚tÈ‚}È‚zÆ…}È„|ÈwÂzÄ€xÈ€xÄzÄ~uÃvÀ|vÅ}rÁytÂytÀyt¼vt¸tt¶mq½tpÖŽwå¥{î·|ñ»ð»€ñºƒñº‚ðº‚ñ¿ŠñÃòÉ—òÇïÉœïÉœîÉ£ìÊ îÉŸîÈží̧ïΨî̧î̦į̂íÌ£îʤíÈ£ëȤìÈ©”,E4G,D”0D+B(AŽ+FŠ*E…!;‚"?†)Cˆ)C/Gª<OÃW^ÒlnÝwqâ‚tçŠvèŽxé’zé’{ê”{ì–zî˜|ïò¡‚òœyëvè}gÞqeÐ^V¶EMš.IŽ&A–0F8J¤:Fª@P±BQºI^º[x¸Sjœ@d2YjNv/_„Bwt1bw4`d"RN Gh,d˜l™†N{l*_y=wu:rq4mj(ltFx‹`•t5w‡^œ^Žª’¼„Hƒ–xš‚K}G9K;hG^>¼ijÜ‹t˜HVX6? 9E @WNŽ^„€@h\LH :€<\‡:\ PižOf¥Yp±q€§NR³[^½\`¶fnÏ­ªÈ°¥f$<P8P 9Z<d.Qj>^e&J`6b6[6^Hw-Jª`m£EY.@²:GÂHPÌJOÎPSÔRXÕVZØV[ÚYUÙ^eÞZ\ß_eÞbgßdjãlnánoâmnátràptâqsávuättä}~âuxå{xå€}å€|å€~ä€|äyâyvàxvàtpÝlnØfjÙ`k×UYÞX[àYZÝ^^Þ``×QXÔQ^ÔfxÕ…—ܨ嫤欞晎⋅܀Ý{{Úuy×nvÕhpÒbkÑcfÄX\ˆ/L\@XCZEVEVE`CdDnFŒ9L„2N|(B€*Df8k9`7V
+>qHŃˆÒ˜¼ls¿{~”G]^Dl$B·`fÑxiÒ{pÓwhÊ{oÒŠzÒ‹zΊzцtφzΆzÌ„{ɃwɃwËvÈ€yË€vÆ‚zÆ‚{ÈzÉ€yÆyÅvÀ||ÁxÂ{¿|sÁys¾{wÀzt¾|x»tq¸po¹nnÂztÚ”{讀ï¸~òºï»}ñº€ñ»‚ð¾‡ñÃñÄ–ñÊ›òÉŸðÈðÈžîÉ íÈŸîÈœîÊ îΣðͦíÍ©íË£íÍ£íÌ îÈ¡ìÉ¥ëÈ¥ìË­‘,FŒ3HŽ.D’,BŽ&>‘0C”.D*E‡#B‚&B†)B+B”5MªBRÅ\aÔopÜ|tà„yä‹vè’zèwê’xê’vë–zîš~ðšzð {ðš{ëvçfÞp`Ò[T¸BJ™1F“'A–1Fž5I¢:I«?M¯?P¸K]ºRfÄQcªRyjNRDj$X?sv4bn']`U\ Vf–„Ho` O`Uu:os2pn4m{G„|Qˆ†V|Bl¢B€œ~§”[”™y¤”i”RCN9fD^B­ZfØŒnÄ|f]:@
+:E 9uJql#Gr&Et*Sz3V”D[¢Ph¬Zr¼€ŽË¡™¶mbªNR¦DU°ZeÅ‹‘äв›[ZN>R?S:^:c@g$DbDf;b:Z9Z<{2K«ao BQž-C³:FÅHNËMQÐPVÒTW×TU×VZÙYXØZbÚXZÝ^dß`dàadâhjäklânoàmmàosâqrãtvärqåz|ãzxæ|{âzzã€zæ€yä‚å}xä}}á~|ÞzxßtvànpÝlqÚajÛ`lÜagÞkmß{xÚtyØoz㊌벩踭赥嫟旑⌇Ü{|ÜyxÙtx×pvÔhnÏejÍ_b²LXdBXDX>ZFVETCdGh Hp$F’?N.E4J€%@n9t$@^:\ >hA¼v}Ó™ºjt¸u}‘CYlE€.IÂllÎ{oÐykÓ|pÌ€pÑŒxЉz·x͈z͈|Ë„zÌw̃wË„yÈ‚zÆ‚{Ê€vÄ€{Â|Æ‚xƃzÅyÈ~vÄ€vÅ€uÁ|vÁ~yÃ~wÀzx¾xt¿vr·sr´rqµpmÈ}rÞ›zë²€òº€ñ¸}ðº„ò»~ñ¼†ñÁòÅ”ðÇ›ñÊœðÈšîÇœîÉ¢îÈŸìÈ›ïÉŸîÊ¡ïÌ¢îˤí̦ì̤íÏ¥ìÊ¥ìÊ£ìʧêˬìÌ®‘.H4KŽ.HŠ.E4K”0F‘0HŽ+A†)Bƒ&E‚$?Œ,F˜;R¬HZÆ^eÕrrß~vá„xæ‹zè{æ‘të“xë–{ì˜zð—~ï~òœ~ñš|îtè‚jápeÔ`Z·DO–1H,F”1IŸ5G¢:K¨<K±AQ´CV»MfÌh€Åqˆy$WUHe&U~=up&Zo,jh#bŠf›^‚f'VSL[Tt2lq1oh1j~K„‡a—„QˆŠZ‰ašr3vŠf‘œn¤™y¢œzžu8kUEgHfD‘D\¼hrªAJmFLC_,S`$LNGw0T›\v¡Um¥\{¯f€À†•Õ¦ŸÄ€žDV¢LZ°\r¼v‚ܼ¬¼•€ZEVG[HZH`AbFg$EhAfBd>\A^Dz1O¨`q¡GZŸ0F°>KÀHQÊLVÎTXÔT[ÖUX×XZÙSVÚYZÛ]bÞ`bÞbhàfiàhlâlmâljàloàpsàlpâosäwuäyzä{zä}|å€僂æ€~ã€}ã€|ã}zá|áyvàvvÞvtÝqtßklÝimßnpâ|zæœäž™à}}ç‹ê´¤é¶ªè³¦ä®¡æšŽà†Ü}|Úuz×ptÕkpÑblÌblÄ^e9SZH[IXBXET@WIaFr&Jo E˜FSz&A…:O{$@o<x(B`<\ <\ >¬ivÔ™¸kr¸u~>S(K˜ATÄrmÒzlÒxeÒwi΄qÒŒxшv͆y̆|̇z̈zË‚wÌ„zÍ„yÌxÈ€uÈzÉwÄ€xÆ€zÅ|ÅxÅ|wÈ}uÆ|xÆ€v¾|yÀ|v¿|uÀzt»xyµpn¶pp¹pnΆvâž|í±~òº~òºð¹~ñº~ð¼ˆðÂòÆ”ðÊ›ñÊñÊœîÊ ðÆ›ïÆšíÉžîÊžðÊžîÌžïͤí̤íÍ¥îÌ¥ì̦î̦ìͪíήîͲŽ3AŒ0Fˆ(@Œ*C.E‘.D‹*F)?†&@~;~=‹)?˜@T²JXÇ`iÓqsÞ{tá„våŠxèxé‘vê”xì”vî˜|ðœ}ðŸ‚ó ðšuêvékßs`Ñ`Z¶FN˜0D*D/H™8N¤>O¨@P­DV¶J`Æe„ÑyµWpz,`]Pj&]~Bzo*d€B‚Ž\’•lj.Z]RTOZSl,`q3hf+ep9pQ‰y?tŠXŠe˜{DŠ}S~œw«‡Y‰œ{¢ŽYŠn-ap*Yƒ5aƒ:]›F]’;\n'Sl(Pj&LG>TL{;d¶t„¶n†½~Í”™Ò¢˜Ò«žžHPDRš9W¼hlÓ¬¢Ë¯—l.EVDZF_!N`"JbHd$Jh%Iq'Hj"HfA`B\Gw0O¢Zl¢M`¤8L´@N¾ITÊPXÎW^ÔV\ÔW_×[^Ø\^Ú]`Û]`Þahágkàglâikâhgâfißlnßkrânrâsväyxäyzãzzãz|ä{zã~z倀ä€{ã~~ázxàyvß{zÝxxßurÞutÝppÞpoâ|wèˆê²¤ç´¨âŽç˜ê´¦è·®è´¦ç±¦ä—Žà…Üv~ÛqwÚsw×jvÔgkÌfl¸U`fBYEWHVGYHXFYFk*Kt&Mt%D˜@Pw&Bƒ1F{"Cp >u#DgD\?ZD VeÒ˜Œ¼w~¾x~‹>UŒ8R­W^ÊvlÒ|jÏuhÌ|nЂtÒŒ|φzΉ~͇zˇzʃvÊ‚yÆ‚{Ê‚zÌ‚vÄvÈ‚xÈ€zÈ‚zÆzÄ{wÄ~yÆ~xÆ€yÄ{ÄxÂ~vÀ~u¾wrÀvq½xs¶rsµnm»qpÒŠwä¤wïµ€ò»}ñ¹|ð¹‚ñ½‚ñÁŒïÆ’òÊ›ñÊœñËšðÈšïȘðÆ—ðǘîÈ›ðÉœïÊ ïÊžîË¢ì̤îΨî̧ìϨį́íαëÍ­îÌ®Š*Bˆ.BŠ&@Š+BŒ(BŽ'@Ž+CŒ*B„!={8=‰(B–8O°N^ÉdlÔrvÝ~tâ†{æŠyè‘|ê’yê”tì“}í•wðš~ð ~òž{ð˜zì‘rèjßq^Ï]W·DL˜3H*B”0Fš2F¤6J¬<P°<R¿ZtÐv‘·Zw P|{/_h&Yo)av2j„H›oœ†Ptl)Ta(UWFYRZNp0en0bm0ck.ft<un/f…V‹€R…}Iq6p“lœ„P„¢„¯˜cŽw4ju2c“KxŒ@m†<`„7Z]>Z@g HMCp*X³ŽÄ‰’Æ„ŒÍ•–С¢Õ°§º€w™=Mž?Pž@XµjnÔ»£v9@U9Y=[>]>b:d<c:h=w(Bn%Bb?`8\?mB¡Yj¢N`¨:N±@IÃIOÈKTÑTVÕSVÖYYØXYÚX[ÚWYÛ]`Ý_^ÞefßegàkkàlnàhmÞfißikãgjállårrävvãvwäxxâxxàvsäzyãyzâ~yà{zÝutÞxwÝwuÜtsÞttÞupßywâ€|皎캯柘ç—츮뻮긨賡㓈߂€Ùx}×ouÚsvØgrÔflÊaf’7NYCZHXHWAXCUBXDq(Ho#I|,F“:N{$?…1G~"Bx >w;d6V 8X <‘AYÌ’‰À‚‡»v|Œ;QžNaµ]^ÐxgÕ{hÒr\Î~qщwÑŠwχx̆yʇyÌ„|˃yÈ‚{È‚xÇ‚zÈ€zÇ‚xÇvÄ€xÃ~yÂ|tÂ|yÂ|tÀ}Â~xÂ~uÂ|xÀv¾|tÀxs¾zv¾xq¹sp¶onÀtnÖwè¦{î¸~ñº€ñ»|ð»€ñ¿‰ñÄðÈšòÊ™òÌŸñÉ™ñÈšðɘïÇ›ðÈ—îÉ›ðËŸîÊœîÊœî̦į̀îάîͨìΨîͪîίîÍ®íͰŠ,Dˆ,CŒ&@*B†)@ˆ(@Ž+@ˆ$>†;€=>‹'B—=Q®N\ÉbjÒptÝ|vá†xæŒvëwé”xê’uë’xì˜|îšyñ ò ð›zì’tæ„làr\Ñ`Y¸FJ˜1EŒ&?*?œ4G£5I¨<R¸PjÊy“·Xr²Ui¥Rtq Rp"Zr,d‚;vŸl“YƒVAZFkBf[Na"X\Np/id(Wv1lh(\w:vp.i|Bn‹a—g*at:|ˆb‘—fœŸ€ª™iŽƒ@vm$O©nˆŒ=Zh Gm@Z;O 6p E‡5VPk¼zÆ€ˆÉˆ‘Ò šØ¸«Ò®˜¤IK§IRœ;P¥P\ɤœ’]TO>U;Z=j>]<a9f6c7i;}-Fo?_;f<Y
+8o ?œQežK^¥GY®<JÀFLÈKQÏPUÔRVØVZÙXZÚ_^Ù\\ÜWZàacádeàccâhmâeeãhlàhkàloáiißlnâljãqqâtrâtuâvvâsuäytáy{ä€zàzvÜurÞstÜxvÚ{zÝwyÞzuá||刄飖îŶêÀ´æ§¢æ™ì»¬ì¿³ê¶¦ç­Ÿã•‹Þƒ‚ÖrwØrx×pvÔioÐhn¸ZdiCWBYD\EZC]CU@_Fu)Gp%I…/F’7J&A‚/E|=}'@o7f8Y 6V >ˆ=Zʇ~½}ƒ¹mtŽ>T¨Vf½c^ÔveÔ{gÔvaÏ~lЉxцwΆv̈zʇ|È„~É‚xÊuÇxÉ€vÆzÄ~zÇ€wŃyÄ|ÄxÂztÂ|vÂvÁxtÃ~wÂ}x¿}zÁ~v¾zpÀxq½wl¹rq¶lnÂsmØ’vè­xð¸zòºòº~ò¼„òÂŒñÈ–ðÌžòËœòÊ›ñÉ›ðÇ—ðÉðÉœðÊžîÉðËžîÈžíÊ í˦ìÍ­íΪìͨí̪îΪí̪ì̱îΰ‰+B‡(Bˆ(@‡(Bˆ%?†!<‰%<…&@‚"@}"A€$?Œ.F’4J«JYÃ]hÏpqÛ|và‚xæ‹séŽxç“wê•vì–tìš}ì˜~ïœyñžzòœzísè…kàtdÒ\T»CH™.FŒ%>”.Dš4J£0MªB^ÀmŒ¶\v°JbÀap¢NznUr"Z…>y“Uˆ—l”x;kTGbRd,Wp5li+_c(Wm.f]L}Brn,[h.bo.kq/d‹^c#`n3q‰]Žœl›b”bœdšŠFeÊŠt8aFV 7p#Dz+T<ZžMb¡I`¬dxƒŒÌ˜Ó¬¤Ö¹©°b^DQš:O BW¼ˆŒ®lVAR<`>e?n?fC`:l;c8n @~,Cz(Ed<c:\<j>˜L_HZ¦Qe¢1E¹GTÆJPÎPVÕTZÙV[Ú[`ÜYYÚ\aÜZ^à^`ßehàagâfjällãdhàfiàkkácdàjhâhjâikáppâssàstáqqârqàtqâ|xàrpÝxvÝrvÝvuÛxxÛ~}Þ~yâ‚~䎈馜ðǺìÂ´æ§æ”쯥íÀ¶ê¸¬ç®›ä”ŒÛz|×ntØqvÔkwÑktÊek›?XYDYBXCZDXAZAYEbFs&Lj$IŠ4IŽ4J€*E‚/Fr8y%Bq=e=Z9S ?ˆ@]Æ€zÄˆŠ´js’=VµcjÉj_ÓueÓwbÏznÎwΉxÌ„xΊ|ʇ|Ì‚yʇ|Ì…x̆uÉxÉ€vÉ„~Æ~xÆ€|ÄzÅ€zÄ}xÂ~yÆ}vÂ~yÃ}vÁ€wÂ~yÄ}vÂxnÂzp»tp³pr´ml´moÃwmÚ—xë®vñ¹‚ò»}ò»ñ¿‰òÆ‘òÈ”ñÌœóÊ—òÊ™òÉ™ñÈ›ðÊœðÊ¡ðÊžîÈœðÊíÉœìÊ£ì̪ìЭíÏ­ïͬį̂îͪíίìÍ­îα†*D„&C…&A…$B†$@‡%?„%?%A€>z <ƒ&>Œ+E‘7L¥G\À\jÎmtÙ|yàƒyæŠzèvé“{ë“uê—}í™zîœ}ìž„ñ }òžyî“pé„làtcÕ^S¸GI”,A#>’-D™1M¥;V¼g½h«H^¸FV¿dr›Hvk V5o„<p‡N€‹\†j'X^M`Qa#Vp2ff(\l2jh+^h.bt8fx7gd*Zh-bj.b}Ju{C|[V`$^Œ\Œžx«Ž[›t°x‘²\\R0U@X ?|1O®at¦`zœLf Pe¶p~ÈŽ˜ÎœœÖ»¬¾„tœDQœ:LŸ@WµkoÄ ŠX8N:R;o#@€*GdAb=]8n9f>n<{-F~,Fp?c:\ 7h:›O`›DS¤Lbž0Dµ<FÄJRÏJNÔVYÙTWØX[ÚZXÛ[_Û]`ß`_àdeàdháhlâlkãgfãiiàehàdeàfhàflßjnàioàkiÞosàppÞqrÞmnÞtsÞnlÜprÝqrÜrsÛttÞzzÞ||á‚€æŽé¨›íÁ±ìÇÁ毳騫îĽîÀ®ì¸¨è°£ä–“ÙtyÖjqÖjuÒjvÓkqÂ`jlEU@Y>XDb&JYFXDXAe"En$Gx)H…1IŠ2L€%?.Fs >v"?v!<b9Y 9U
+>~7U¾wzËŠŒ·kn—E_¶`gËmaÑsbÑwdÌ}qΆrΊyΆvІu͈xΈzÌ‚v̆xÉ€yÇsÆwÇwÀ{Æ~zÅ‚}Å~zÆ~yÈ€xÅ}xÄ}xÁ~|À}xÂ|yÄ|uÄ{pÀxtºup»rm²or³ljÆzpÞ˜vê°xñº€óº‚ô½‰òÄ‘óÈ”óËšòËšóËšñÉšðʘñʘðËžðËžðÊœîɘîÈšìÊžíΧíΩíЭíΫîÏ­íΫîήîέîϯîΰƒ,B| @{ >€ <#?‚)D…'B$D‚!B~ >„#Aˆ&@‘5M¦DWÂ]lÏntÚ~z߃xåŒ{è‘xè”yê–|ì—|îšî{î¡~ñ¢|ðœ{ì–vè‚iàs`Ó^VºJO”*DŽ%B’(G™/O´ZsÀn†¥CZ²ASºHV¹]t”>mp&Yr*_]H‰YŠ„Hy_Lc"T_Nn5fc&X_&Xn9rh,b{Fxw:p~@st5kh.do6kp7m|Ftz@~YWh,d”l’”c—–r¢„¤p0[@@IB}0V¶`rËsË–£¿•ª¸–È•£Û³¤Ø¸¨Ë‹œ>H¥JU CW²^gƤŸx?KPDO@R?„3F•:Pd@a@Z8l>e=l=x(E‡8Nr"@b6`<h:”G]™>P¢L^œ5J²8FÁEPÌJPÕOSÖVZÚY\ÜZZÚ][Ü[[ß\_ÞggâcaÞfjáffáhjáheÞekÞcdÞehàihÞlnÝlhÝefÜjlÜnlÞhjÚglÜonÛroÚprÝkrÝoqÜzzàxá…zä‚zæƒtå„tê“虇쳘뛎苋ꈈæšâ’ÒfnÏ_lÑdsÔlsÐjq¡DYZHVAV>[Ea&LXBZA_BbAu)Hz)H†4KŠ8P~&Bˆ0F€-Fx">€(>e7T
+5X>n)L»w|Г’²dj£Pc¼ieÎqaÔu_Òx`Ëzh΄sÏŠwЈv͈{̇zÌ…yÊ‚wÌ„zÊ„{Æ‚yÇ}xÆ€wƃzÈ…wÄ€}Ä€wÆ€xÄ€zÂysÆ}vÂ}xÅ}yÃ|uÂzsÃypÀwr¼wq»tr´nk³kpÈ{lÞœxì­|ðºò¾óŠòÅ’òÊœòΞòËœóÌ™òÉšñɘñËžðË ðÌ ðËžíÊ¡ïÊŸí˦ìͦîͬíÒ°îЭïϯíάïͪîϬîΪîά€(Ex?x>{:~"@‚#=‚&C‚$A‚$B~=„ ?‹,C’2I¦FWÃ`lÐpqÚ~{à‰xä‹zè’}ê•{ê–}ì•~ë˜zî›~ð¢ð €ñ{ì”wé…hàueÕZRºCK•-E‰&H‘*L¥B\Âs‰¨Ic§:M¯>S¸J]¼l„?ifO[L_#W‰Z„“a’d"Mb$T^ Qf*[f$XZSd+cg(dq;k‰T’€Au†J~z>wu<rp4li*aˆZ‡w;q]Sh0c|B~d™˜s›‘h—TTMA§Td¥Bd¬^~И¦â¾´ß¾¾ç°⼩Ҥ’ FI¦JRœ8P«OXÈ“¢qfO 9Q=S:V;„+B˜AJ^:b:Y6n<m:p>-D|,Fx"?`@b :l<–J^8IœIU•/D©3B¼?KÉKQÓTTØT[ÛZXÝ\ZÚ^[ÛYVÞ^\à^^Þ`_àbdáeeâjmàjnàfeàfgßffâjiÝccÚ`^Ú\ZÚ[ZÚ_\ÚdfÚgcÙopÚifÚomÜkgÜolÞjbßhfÞefÜ`aÜ^eÜ[bãjpâhpävxæz}ãoxãeuãhrÞtuÐjh¿L[Æ[fÒkvÒotÆenrCYFWCXA^D\D^D]FcBdDy-Jo(F‚0J‰6J„,B‰4L'Bv>}(Bg9T 5Y <hFºv̬Za®ZhÆkbÒudÖzfÔvcÍznφ{ΊyφyΉyˇzË„wÊvÈxÈzÄ}vÈ€tÄ|yÄ€xÇ„xÃ~|ÄyÄ‚z€xÀ}|Â}z¾~~¿||ÃzqÄzrÂxq¾wt¼to¹rq´lm²hkÊ~qßšzí±€ðº‚ñÁŠóÅ‘ñÈ™òÌŸòÌŸñËœñÊœñÈšðÊšóËðÊ ñÌ¡ðÌ îÊ¢ï̦îÍ£ìÏ«îΫíЬîЭîЮíЯîάîЭíÏ­ïÏ«s?r>v=t<v<z=&A‚$A~ :}<‚ <Š*A’4H¢DVÁ_jÓqsÚ€yß‹|åzꔀê˜}門뜀ïš}ðž~ð €ñž€ðž~ì’qèƒjàraÔ[Q¶DK)F‰!E›6X¹i„±Xq 6Oª8O±H^ÂbzÃsЉ4a^L[Ir4lb!M|Gsl)[b&Xa&Vj-]f*[^!TTT`!^s@v’k£‚E|‹Qƒ†Ny?rp4kh+hl1d‹\Žz;|j5m~PŒL€Žg †N‚‹V—_˜Öˆ¬k–¡hÌ¢ªÞ¶´êɾèűִ¤©ST©LT¤CR¢DY»{}Ħ˜Z<P?R;\=V<l8ˆ4F_;]9a:f;j<v%Ax%C|(A…+E^6^:g:šRd3H˜D[’,C¡1Fº<HÆGLÏNPÖRYÚ\\Û^_ÚXVÚXYÝYXÝ[Yß`]ß^_àcbâhlâloâliájiâldßrgØ[T·9@®6>ÀGHÊSMÑYSÕ]Z×`YÕ\YÔ[VÕWXÕQQÒOOÓPOÑPMÐKMÐLNÎHRÝRVßWXßf`ÙbaÀ>D´3@ÆDMÂNSµ>L¸FUÍadÙrtÏov¦J\]CVDXA[@dDXB\BZ>fAf@‚3JlBˆ2Fƒ,D€*@8K{%>k8z&@p=U 5\ 8cD°hvÍŒ¬Yj¹jsËodÔzg×xaÔ|iÊvnφxΈvΉv̈zÊ‚uÌ„zË…xÇ‚xÈ‚vǃzÇ€wÃ~zƃv€{Ä‚|€yÆ‚vÂzÄ{¾}wÀ~yÀ|uÃ|rÄ{rÃ|uÀzu½usµrq±kn´hhÍ‚uá tï°|ò½…ôÅŽóÈ•òÌŸò΢òËŸñÌ òÌðÌ¢ðÌžñÊ›ðÈšðÌ ðÊžïË¡îͤïͨíЬîЪìЮîѰîЯîÏ­îÏ®îϯîЭîÒ­pAp>s>vAv"?}#A‚&@€=}>=†%AŽ.F”:S¨K\ÂblÒvwÛ~|à†yåŽxê˜~ëš|ëœ}íœ~ðž~ï {îŸðœuì“vè„mào_ÑZR´DM”-K&O¹m‹½g~¡:T£0Lª:TÃe~ÑyŠ¿l‚ƒ0`bQo)\|<iVE\HbPg*Zb(Xl1cm-`d&[YZ]^wJ—{¦‘a“ŠTŒ‚J€ŒVŒ}@xs=va'`o6j^‘gšNƒf)^xM{n3jy0]µj‚³pˆ¯^uÉŠŠÜ²ºÞ»½ê˼ÞÁ®´xxœ=L«KT¢AX³clе¨…JPP =ZB^@cAT8c<.>kA] 5d9b8h:r @q ?|+Dˆ3Jc 3Z 6f8”H\ƒ(F•ER‘3H–*@´<FÂFMÌRRÓQRÔ][Ù_[ÙZWØ[[ÚXTÚ\^Ý^_âd`àgeãfhájiâlläojânhßneÑTR±:N®;K¨.>¥*=®0<³6>¼:@Á:BÄ@HÉHLÂ>F»;D·8C¬4A°/=¸4BÄ?HÕGO×LRÖRXÈER·?T²6TºDXÇWfÆQ^É\bÖlpÖuxÇinv$FW@UDY>`BeCZ>Z@bBfAt*Gz*Jm CŠ8L€*B‚0E‹6Hz">e 5€0Az)D[8]@^D£ZlÊŒ´htÄvzÐoaÔ|hØxbÖ|jÌ}qЊẏyÍ’€Ì†vÊ…wË…uʆwÊ„wʃvȃ{ÇxÄ€{Å€u‚|¿{À}xÄ~yÃwÃ~zÀ}w¾{xÃ|sÀ~yÃ|uÅ{s¼xu¸tpµnm®jl¸khÒ„qã¦xï³|ò¾ŠôÆôËœñΤóÍ òÍ ñÌœñÌžòËœðÊ›ñÉ›ïÊœðË¡ïÌ îÌ¢ð̦ïͧíάíάíÏ­îÒ¯îÓ­íЮïЮïѰïЮïάl@l?p>q?v?|>€&A~!A~=€ B„$@8O•:U¯L]Ä`jÑvvÜ~uâˆxåyì˜~ë™zì›~îœ{ññ¢zð¢|ïœ{ìqæ‚lÞnbÏ[Z³BU˜/T°\€Är†£BW.J£2L²LhÎrŒÈm„¯_z~,\bKu2ch#O[HR?d&Rh-Zc,Wl5ff$Yj0bh*j^ bwH€˜v¦–k„L…€H…†K‡‡O„ŒT€Dv;y…L}Ÿ|ž„I~e'\j3n]"\ƒ>n·h{ÎmkÚb`Þbfä¼¶ìØÑæÌ¾Ó¬›¢LR¦ESœ8Q´\hΩŸª~oSAXA`<b<^9T;`6s$>s >]=b<j<k=o ?n@v(Cƒ5Jj=a=d<’G\„*E‘=O6K“.C°6D¼@IÊNSÒRRÔUSÕ\Y×VTÙUUØXVÙ\]Ü]bàbeÞdbàadàejáspâppâqmànhÙ\^ÔV]ÖW^ÏR[ÈFVÅ>LÄ<LÊ@OÐDOÒLWÕN\ÔNXÐO]ÐP^ÌK_ÊN\ÌObÒLbÚUmß`vÚe|ÞjƒÚb}ÕVrÒ]nÔjqÒfhÙilÜrqÔnn¤DRZDVBRC\@aB^DU@W@f?gA|.HpBr?Œ6Jƒ0I‚+DŒ8Hz"Ah;{.F€1K[9\<\BšLaЈ„¹muÍ~|Ôq_Õ~rØxaÑ~mÍ}rщxЉxЈw·xÊ„wË…yÊ„zÈ…|ʃxÈ‚yȆ{€zÄ€v‚Â~yÁzÁƒ~‚|ÁyÀ{¾|xÄ}tÂ~vÂ}xÁ|u¼vs¸uq´on°kn¹nlÕ‹pæ§~ñ¸‚ôÄŽôʘôÍžòΡòΠñÌ ñÍŸòÌžñÊðÌ ðÊœðÌ ñÍ£ðΤîÍ¡ðΦî̦îÏ©ïÏ®îЬïÑ«ðÒ°îЫðϬðЬïϬðάh@kAp?q>v>y?} =|>|=z"B†,EŽ>U•AZ­QeÂeoÓsrÞ}sâˆxævé™zê›~í›}ìœ}îž{ð¢|ð£ð¡ïšzêqælÝqgÍVb³C`µgˆÇ|Œ¦B\“,Eš.L¨<X¿bÈqжYv§Rp‚7bj(Wj&Rs/^^K^MXA]NVJn7ig%]d(Ws8xY^uQ„¦–¼˜g™n0jzE€Lˆ~KŠ…P‚‰P‚‚Gw‚Hxn2a{Do~D~l:vL„¥`ŽÜ}{ån[Ýa_ÓVaÛŽƒïÚËÛ¼«´pq GT¨FX²WeГŒÉ¦’`$FQE^DdAd@Z>U=`:p"Br <^9a;j?r"Ao!BpBr"@€2Hm?^@e>‹CYz)FŠ6JŠ6L’1H§7I¶@JÇMQÏOOÎSUÔYUÕTRÖTWÖXXÖZ\ØZZÞ_aÜabÞffÞghßpoàsnàqnáonánnÞfdÞZZÚVWÛVXßU[ÞRbàNWáKVäSZäZiã_pæZpèm…åw”æ{šå„œè„¡ë“¬ì µë¤±ìž«èwÞ_{ÚbvÒrsÚtvÜnnÖpoÂ__mAV?XCVBb@fB[AXAZAgAh Dy%CjBw&C‹:L-E†.G5K}(Gh>z*@„6O_=_<\B’B[ÊŒŠÀw|Ð}yÕs`×qØwdÒnÌ€rÑ‹tʉyˈwÌ„wʃwʆ{ʇzÉ„wɆzʃxÌ„{ÅxÀwÃyÁ~vÂ~vÀ€x€vÀwÀ|xÁzÄ~vÁzyÂ~vÂzv¾vr¹vu³no°gi»mf׋vçª~ò¼‰óÅ’ôΞóΞñУòΠòÌŸðÌ ñÍ ðΡðÍ ðËžïΠíÍ£ïΧîÏ©îЪîÑ©íЮðЭîѯîЬïϬíΪîΪðΩïϨñͤj>iBp@r@u@z!A#B"@z={?‰*F8L™@V«K\ÁalÐsqÞ}vá…væ‹tê˜~ëšzìzìž{î î¡yð¤î¢}îœvèzäƒzÚpwÉWrÂh‡Æ|Œ¢EZŽ)D—,Eš0N®KhÊuµVtµOh¦JlŠ9fm)\f"P‰S‰k(U_ LN>XKSHvBzp*^b!S~J‡V\xX‡®œ¼“e’d+cd(d{I€†W’Š]•D€z<p|Cq[%_i-\VŒ‚PŒ’fž¥h•Õy‰äocÚb\ËP`ÎyâÌ„x¤FQ¬NVÁgiÚš€Ö®˜„FPSGZM]EbAb@Z<X;\8g:n6k=^8c<p >k:t!Bt">~+Ht@]<g?„:O|$A8Kˆ6I‡&@˜.A¬:HÀIMÉLPÌKRÐTVÐPRÔRVÓWZÔXYÖZ`ÚY[Ü\`Ü`aÜglánláppánnänlåtláhhÞ``Û\_ÙVUÜWZÞTWàVZàLWâNZæP\åZmçetêy„錎휘뚘ðŸ˜ï©¦ï°«í žænÜ`wÕhwÕvyÜxwØllÈjl5JX@WAT@ZDd>g?X<V:_;j=l=f<j@~-D‘@Sƒ(@‹0DŽ/Lv=o:~)A‰<Ob>`<]D>\ˌƆÐ|uÔxb×r×xdÑ€r̃sÌŠzÌŠz̉{ΊwÆ…yɉ|Ê…xÌ„wÌ…vʆ{È„xƃxÅ„wÃzăwÄ€vƒxÂyÄ~x½}zÂzÄvÁ|vÂ~v½zwºzv¶qn²oq­fn¾ojØŽtë®|ôÀõËšõΞôУñÎòΞòΠðÌœòΞñÍ¡ðΤðÍ¥ïΧðϧðÎ¥ïÏ­ñШîЬïѬðЫîШîÏ©ïϦîΤðͤðͤðÍŸñÊœhCkAl>r<s>y>z<| =w<w>‡/J“:Oš@U¬HXÁ^hÒrpÞ}t߇xäŒwé“{êšzì}ì }ì{ï¡zî¢ðŸ}î˜|é€â”Í|¡ÁlÃpˆ B[Š$B‹$B‘+Kœ:[¹`ÄxެIf±Ib–Dkw,V|2ks2`šzžp*WT<S@UI^%X‚N‡m+^f!YwFzf"ff”¬š¼•f–n1p] Zv=vt=w|Mfž„P‰z?wn6hg-b€@rJ‚žx¬ªp˜É|˜âkiÔU`ÊhsÔ¯²Ï°¦˜?P°W`Æjfä˜{Ø•~]TVCXJXB_DbDcB_<Z<^7^:k<k<_=`:j>p@r At#B},Dx(D`<c={+Cz"@Ž8Nˆ6L"?3J®<H»BKÄHKÉMPÎQUÊPVÎPRÐVXÐTYÖTYØYXØ\bÙY\ÚadÚlhàjjâmiätpäplánfßhfÝbdØY[×W\ØRUÚVZßU^âYaæR\çP\ê\nìjxên|îu‚í|…ìxƒîn€ípì{ŒçxŠÜ`vØk{ÖrvÜxyÜttÒnp¹W\d>U=V<V@Y>h?d<Y;W8`8g8w(Ba:h=€,A–BV„,F‘8JŠ5Jz$Bw$Bx$@‹@Rg:_>\FŠ>^É‹ŠÉ„‡ÏzwØ~hÖ†yØzaÑ€rËrÍŠvÌ‹w΋xÌŠxÊŠzÌŒzʈyȇzÊ…xÈ„wȆxɈyÄ‚wÄ{ăzÆ…yÄ€xÂ|wÀ~tÁ~yÂzÂ|wÁzwÂ~v¾ztºvt¹rn²kl­hk½mlÛ’x첄õÄõÌšöÒ¤ôПòУòΜóΠñË¢òΠò΢ñ΢ñШðЦðϨñϦðЦñѬðѨðШðϦïϤïËŸðËžîÍ¡ïÉšðÉ™îɘñÈ—m Dg?l>n>v:x9v9v:s:w<†,G“8L™?S¨DTÁ\cÒppÜ~vã†væxè•yé™}ì }íž}í |î¢}î¢|욈瑊匉à”Înx°NW'D…>Ž$F”/Q¬RtɘµRl­C`°OiŒ=dfOl&Z„U~”fŠf KZFZHRJd/`L…g"WA‚l8mt6u˜ƒ²¡„¬]Œ‘bž` \q9nzF…_%_xNw•j¥‡TŠŠSŠ~LŠŠZ–†Pˆ§‚°Ä„¢¼w–×t…ÉJ]Õå˸´pw®SaÐtkå‘r躓¬ihZHYKZG[F`Ed!Fg CbBZ>^<aBe@f>f@b@f>t)BlAn"By*Fw(Ec>cC}6Q„.JŒ9Kƒ0Hz!=‡*D 6I³BL»HRÂKOÇLPÉMMÊMRÐQTÏTVÓZZÖXYÔ_dØZaØ_fÛfgÞhiÞjhàloâpmâmlàieÜcbÛ`fÛ^[ØWVÖTWÕVZÚXZàYZâVYäT`è^pébxêbyëk‚êiìdzébzçcxâ`vØfzÔt|ÛvtÙ|xÒlnÈfhŽ4KXBXBVCSAZCdA^@`>]>h>f8r"Ad<k<‰.B’9T~&DŒ8M‰1J(C}'Ds>ŽDWe<b=^F‡>_ʈŠÎ‰‡Õ~tØ~n؆x×{fÍ‚tˆỷzÊŠw͉xˈxÈ…xȆxʉ{Ɇy̆wÊ…xɇxÆ„zĆxÄzÄ‚wĆ{Äz€xÀ}yÂ{¾|½~y¿~{Á|tÂ}uºvt²rr°nn¬dn¼njÜ’w÷Ê”öРöÒ£öѤóРóÏŸóΠñÍ òΠòШòÑ©òШïÒ¬ñЩñÑ©ðÒ¯ñÓ¬ñѨðΡðÌžîÌŸîÊ›îÈ™îÈ”ïÇ”ðÆ’îÆ–ðÇ”e>d:j:m9l:s<v:q:r=x;‹/F˜8K˜EZ¥@PÁZ`ÒnlÜypàƒrä‰sè’{ì~ë |îžî¡|ð¡{ï }ð¤€í›‚葃â‚|ÚtqËY[¬@J(@ˆ"E(M;\Âz“¿g¥Da°H^®Xx„:`cR`O•t˜’]€X?XDZMSOwF|xDwXPv:qv8xv<r§šÆ–hš{D}”zªEˆ|D€‹c–n3ne+duFo˜n¨ŒN‚’x­™pš‘T˜eŽÎªÁ€Çp“ÄJaÍ~ƒÖ¼»»iqÏwvärí¸—À{fn,NVL]"LXI[G^Gg"IdDb*L\D_FbEcH_Be!DhDe!Dj Du&En#Cw,Fz+Hh Ed>v-H‚0L3F2Hy"@…,D˜-A¬;H¸BJ»DLÂDIÇLOÈOTÊRYÌTUÏX\ÓXXÖabÖadÚbdÜddÜfkÚfjÞhlàijâkfàmjÜddÜbdÜ^_Ú_`ØYZÖ[`ÔXYØQTØQWÜQZáXZáZcá]hådvä]gã\jáWfÜVbØdsØuvÙxwÛsrÖttÌjj¦DMx#DT<XCVBX?_Cb?`Ah@`Bh=j@p"@e=m:ˆ4F‹8M'FŒ8H‡/E…0Gy >x:’FVi;d@`G‚;_Ȇ„ÏŽ‰Ð}m×€p؆tÔjÍ„sʉvËŠvÈ‹z̈u͉xȇzɆzˉ|Ë‹ÿwÈ…yÇ…zÇ„wÅ„xÄ…{ÃxÀƒzÄ‚v¿z½€x¾~x¼~yº|x¼yw¼vs¾zrµtt²qm°lm¨co¾liÝ•{îºõÊ”õÒ¦öÓ¨öÒ¢òÑ¢óΞòФòÏ¡òТñϦñÒ¬ôÓ«ñÓ®ñÒªòÒ¨ðѧñТðΠðÊ™ïȘîÈ–ïÆ”îÆ—ìÅ–îÇ—íÈ–îÆ–îÆšf&Lc<f<g<j:r=p;n;o>w;Ž3FžAM˜8L£=NÀV]ÒjfÚvoÞtä‡uè•xëœ}í¢~ìŸ}îŸð}ï¢|ð¡~ïœé’|ä€oÜqfÍ[T®@K’)D"H†$N©TnÄ€—¥Fi¨C\°Lb Nil"NcUo&Y~™‹TyQ =^HSHRKP‚j1^XP\!Vx=‚~F±¯Îœv§o1n|R„–q¨ˆVŽ’lœw=tg,bb$czP€–€³“r£ŒMv§oŽ|?k­xœ²_~¶z¥ÁUoŜ޽܂pì—xì¥Ø¦{v*KXNXJ\H\ L` LdDhAj!Ca@Z?`Bg!B`?_@`Bq!Cp$DhBt(Bn"@t(Bx&Ek?a@o$C3P|,C‡6Ls >|(DŒ'@¡1A°<F¸GO¾DN¾BFÂKSÇLQÉRVÍQXÏTXÕ[\Ø\]Ø\`ÚdfÚaeÚflÝfkÞjoßkmßgiÞimÜdfÜ`eÛ`_Û\\Ü^aÖZWÕXZÎTXÐQTÑMSÒNRÓQTÒOZÑLTÐP\ÒT`ÔhsÝ~Úz|ÜptÚrqÏqm¶MP…)AiBVBT>R<W>^?]>hBeDn"Cc<i;h;j;n<”DS‹2JŠ0H‹5H‡3D….Cw=x"@<Jq:d?`F€8[ÉÒŽÑ{lÙrØ‚vÒ}mʃwʉxÊŠxȈuȈẅzɆ|ʈ}ʇ|ɆẍzɇzɆzÌŠxɈzÄ…yÁƒ}À…|ƒyÀ„zÂxÁy¼}z½}v¼{xº|y¼xqµvt±on«ik§agºjiÙ–xöÍöÔ¨öÔ«öÒ¤ôУòУòТñТòЦòÒ¨òÔ¯ôÔ­ñÔ®òÓ«òЦðͤñÌœíɘîÈ–îÈ•íÈ—îÇ–îÈœíÉšîÈžîÊŸïË¡ðÊž`?b>f>k@h;h8j8f6k;z"@”<OžBPš>O¦>N¾V^ÐhbÚrnÜ|râƒté’|ëœìž{ì {ðž{ð¡{ñ¢~ð¤{ðš{èwå„pàqdÍ[Y±BL*E‚DŒ'P®Xrºu’ @^ªF`£Rj{7]\L^Ty2ct”€=bXAXFOJ\'X€I~g&X\TNPa&df²¨Èž€ª‚Gˆg5n™‚´b›‘nŸG‚q<zr6w~Z™mœ™x¥–cš–Ms}7j˜f”–Mx©d¾QgÔ©ªÛ­¬Úzræ›å®~ƒ8GVFVG[J\F[C^BeCj Al BdAW?]<k@]@bAa!GfCm BqBj@l @o?s#@l?dAjB|-Kv$C†:Rt#Cv#B‹,Ež,@¤6D±<Eº@F¹CH¾GL¿ELÈNSÌOTÐUXÔXZÖ\^Ø\_Ø_fØaiÚhmÝfjÜjlÝllÜhlÚgnÙbhÛdiÚ`dÙ^dÙ`dØ\cØbf×dgÖ^cÏ[bÌW`ÎZfÑ^jÔbgØirÛu|ß{zÝ|}Úp{ØhrÖpnÄlm€ >…*Cc@V>UBT?Z>aBcDdCl+Im$Cd @e>j>i>k>•GW3K‹.IŒ8HŽ<P€)Ex<z%BŽ<Ls=o"EgHz4WăˆÕ—ŽÓ}nÚ…zÕ€pÎ{n̆vʈxÌŠ{ËŒyȇ|ÌŒ|ɇzˉ~ˈ{Ȉ̈~ʈ{ÊŠ~ÌŠ{È‹LJ~ņzÀ†€Âƒ{À‚{¾z¾€w»~x½~wº{x»zv¼ztµro°onªhj£_e³hhÜ•vð¾÷ÏžõÖ«öÔ¨öÒ¤óТóÒ£óÑ¢òÑ£óѨòÓ¬òÓ¬ôÓ­óÓªóЧñΤïÌ›ðÊœíÉ–îɘïÊ›îËœïËžðÌ îË¡ðË¢ðÍ£ðÍ£òË¥bD`=f>h>f=i<h9f<j:„,FŸL\§GR¢FR­J\ÂX[ÐedÖqjÜznâ…tè’vêš|ìœ{î~ížzð|ð €ð¢‚îš~ê’xæ„pàtbÒ_X´CL)D‚EŒ#N®Xt¶p”¤<Z¥Bc:`y2Zi(WcUt4]b†h L\HUFGC`-\y@p^Kf)\WUR[˜¬«—½–{«™p¢t<„f“š{°–p ‰TŽ‚SŒlžLtBvl Ÿj–Dlh-b–mœ˜e˜—`–§V„Ö ¢Û¸½î¦ñ¿¡QTVIWJXEZF[BZD^Ah!FgAl Be BZB_Ah"EbDgD^DbDf"Fk BlDl Bp!@t"Br'Ch@d=x%CpB€3Jt'Cl?‚,B“+Až5C¥6D®<G¸CI»EN¾IOÆJTËRWÏU]Õ[cÕY^Ö\aØajÙ`cÚdjÛikÛflÛfkÞfnÚfiÙgiÚcjÚejÙglÙbjÙefÞmnÞqpÞrpßtyÝx|ßx|ãvxâw{ãzyá|Û|Þx|ÙhvÙqsÑol«JUc8€,E^DXAR>UBX@`B`BhBg Bj#D`?f;d8g=j<˜DTŽ4J1HŽ7FŒ7L‚)Az"<{">Ž:Fw%Dk8fGt,N„†Ô“ŠÖ~oØŠyÔ|oÌ|nË‹yÉ…xÍŠxÊ‹|ʉwʈ|ƇzȆzÉŠzɈ{ʈÊ„ż|ȇ}Ƈ|Ć|ÁƒÁ…zÁ‚|À‚|½€|¼x¼~{¹|v¸}zºzv¸xw´sq®kl¨gj¥bi²gjÛ—vðÀŽöÒ¡õÖªöÖªöÖ¥ôÒ£ôÑ£óѤòÔªôÔ¬òÒªôÓ¨ôÒ©ñФñÏ¢ðÌ ðËžîËœîÌ¡ðÌ¡ñÌ ïËžðÍ¡ðË¢ðͦðÍ£òΣòÌ£òÌ¡bBb@d<e<c;e:d6a:i<Ž5F©ITªLU¨FR²JWÅ[`ÎhfÖpkÝvkã‚tévë˜}ìœyìœ~ìž|î~ïž‚ð ~îš}èzå‡rÞtcÐ`Z¹CH’+G… DŠ$K¤Ff´hŽ«HgHmŒCf5\l&U~8gu3X}Ck^H\EPHLEh4ay=nWKi*[] ZY!_Žlª“¶‘q§ƒO‚—m¢|M€¡’½–q¢ˆWŠaŠ[Šh1guFƒ‡b”¨n–§Dbo&WŽbQw¡g”·e~྾ïÊ¿ö¾“Ât_m#HSETEYF\I\B\A]@hBlChBg@U>\@h Da=c@c@\@aChAm"Cl@p @t#Aq$@h<`<n <r?t$Ax+DhAv$@ˆ&@Ž%;¢4C§6D²AH¹DL¿FLÄIPÈKTÍU[ÕZ[ØVWÕ]aØbf×cfØikÚdkÚfjÛgmÜimÚimÚjpÞjmÜhpÜpqÜnrÜpsàvwà{}â€ç…‡çŒ†æˆ„æzå|å~{à‚ß|zÜu}ØrwØspÈnhy=a<„.FXCR=UAZBXB^B^Bm'Fh@o&G`<k>a9d@n>›LX5Jˆ-DŒ7JŽ7EŠ/C€(A„.D6F|'Bf<pLp"JăƒØš‹×†sØŽ~ÐxfÉqÌŠxËŠvÊŒzÊŒzÉŽyËŠ|Ĉ~Ć€ÆŠ€Ä†ȇ{Å„zǃzƇ}〯ˆ|¿„{Á…Á‚zÀ…€¾€y¾|»z¹||·|z¶xxµwt°qr¨jp¦inŸbl¬djÛ~ñÑöТõÖ¬õתöרôÔ¥ôÓ¤ôÒ¦óÓªôѤòÐ¥óСôУñЧñΤðÍ¡ïÌ£ð΢ðϦðÏ¥ò΢ñÌ¡òÍŸòÌ ðÌžòË òËœòÊœóÉb<^>^=d>c@c>a>b@rC˜;KªFP§HT©FS´JSÅ^eÑdbÖqhÛwjä„pê’xë˜|ì›îì›zîœ|ðž}ðž‚î—{ìwåqàsbÒ^V¸DO˜1Jˆ"Dˆ&L”;Z¨\€¬Qt¡Nl†3`v.\s-]ŒKt€>ev8b`JVELDNHr:fx;lQH`Pf"^Z[}W€œy£‚ZŽ€TŽŠ_–‰Srœ}°Œ\ŒŒ^’€N‡v;wp:tsBuœpœÀPv¾Eh k–r:d’Y„¤JiÓ–„êȱÀŠjh$G\IUGZI[J`!J^ H]G`De?l Dm Dd?W@`Aj"@_@f<l@^>\<`;j Bi>n<t!>t&Ai=b;l>v$Dn ?{,Bh=q;†+@€:!8˜0>¦<G®<D´CO¾FNÅJPÌNSÓWZÕ\_Õ]e×`b×cfØhfÚfnØgnÚglÛkkÚmpÛnpßoqßtwá|}ây{Þvxãvv䃃拉猆蒋鑎劂æƒ~â~}ß‚€Ý|~Üu~ØuvÔyq®RZ\ 7b:v#BVBVBVBVDVDbEi$Il"Bt,JfAf:j<a>gAo? S_‘7LŠ0F‘>QŠ1F…-G&@‡.F8N{(Ep>mKp H„†ÖšÖ‡zÚÌuiʆxʆyÊyÌ‹|ÉŒzÉŠ~ÉŒ€ÄŠȈ~ÇŠÃ…Æ„dž~Â…}ņ}Ċƈ|‚yÁƒ|Ä|¾‚º~z¸€¸}v¸{w¸|y´zx´wv±rr©lp¤ek£`l®imÞ¢ðÄ•öÓ§õ׬õÕ§öÖ¦ôÔ£ôУôТôТôÐ¥ò΢ò΢ôΠò΢ñΤòΤñϤò΢òΣòϧñÌ¥ñÊŸñÊšòÊñɘòÈœòÈ›ñÆ–ôÆš^:`;b:a<c@b@`>`Ft>•6H¨JT¬JS¦DR´LVÉ^_Ñd`×neÝ|tâ…qèvêœ{êšyꚀëžzížyîœ}ðžƒîœzêŽxæ„oÞraÐ[U·CJ—.F†BŽ*Hš7R¡LhHl¢Ji‡8fw/Zu5a€;aˆGmr2\`"OXJNFMFu@mx6gPIZNf"Vj)j~Pv˜qœQuB}„YŽŽdž‰]–•t¢’h¡’m Œ_˜†Tn9vl6n¡y¡°R~ÒLd±p˜r7bKx™Pv¶u†Ð¹¶t:RXJf FUDZB[E\A\CZC_;i$Eo'Fn<`AX<bAn"@`?b?l=c>]<`<f=n>l=n;r ?lAf<g<t"?o@s"=k=m>‡.A€";r6ƒ7ž-=¡2>®:B¶@HºBIÅJNÌRVÔVWØ`d×`cØacÖbfÙfjÛfkÚinÜorÚmtÜnpßtxàxxää…„á‚âƒæ„„å‰è”Œè‘‰çŽ…èŒå‡†äŠˆßˆ†Þ}~Üv|Ú||Ïun‹/IX>kAw$CTAUAXDTBbHb!Dm$FhAo%Ca;q$Bo!@b:l;x A O\“8P‹3I’@V:M€'C€'?ˆ.DŒ3Hv$B~&GkEr$LÂ…‰Ô•ÖˆtØ‚ÊtiÆ‚tLJxÍ‘|ÌŒxÉŽzȆ|ÊŒ~Ň|ȉ€Å‡|Ć{È„{Ć~Ń}Ȉ|Çzˆ~Ã…{‡}¾†|½‚~»€|·||¶}z·|vµ{y³xx´uv®tt©lp¤hpž`j²prଋòËžöÖ§öجöת÷Ö¦õÒ¢ôÑ¢óСòΡòϤòΠòÏ£ôЦòÒ¨óШóФòÏ£òΞòÍ ñËœòÊžðË òÉœñÈœðÈžñÈœóÇœñÆœóÔ_:]>]>`>_>]@]A\@t @—;L¦IQ­JR¨GT°GRÆ\^ÑhdØneÞxoã‡xé{꜂êžvì›|ìžwîî›|ïž}îšxëtè…qÞs_Ï\T´CL”+G‚B,H–4Q¢Nf–GhKiw&Rj)So*]h#R\ƒm0YZLa OUIOHr<ft2fQF]"T\Ts2r„M€dŽ‚XŠ`#Xb&`i3eˆa–h¨™{­—t°j‘fšQ’p8yžz¡£b–ÁHjÆ€žˆBfn5a•Ut£X`Ǭ¤i)Pw:U_LUIZH\H^G[D^H`@p-Lp$DnBb?X9d?m"B]?\<f>f@Y>`=`?f;k<p>k?r<j=h@q!Al>t%?t"Ah9€-Dƒ,Ao6p4€6&:ž0>¦<C²=B¼BHÇMPÍUZÔZ[ÓYdÖ^cÖbhÙdjØejÚlqÝsxÜsxÜqvàzyá}€ã†ƒåŽŒäŽŒæ‰ˆæ†ç‹ˆèˆæŠˆäˆ‡èŽäŽçŽ‹Þˆ„ÜxÜvxÖ}xÅgfj9W?v'BgBSBZFZEUF^F`Fn'Fk#@m @^<s*Ee<e>jAy$FŸVb•?P2L’DWŠ6G„(B…0G….Cˆ*Bv$@….JbBlI‰Œ×™Ö†~Ö‹€ÉykÈ„~ʇxËŽ~ÊŒ|ÈŒzÇŠzÆ…ă~ƆĆ~Ä|„À„}Àƒ~Ä|Æ{Ɔ{Áƒ{„zÁ{¿‚}¼|x¸}z¶{z¶zy´{x³vt³ss®qp©np hjŸ_m¹wvç´òÌŸöצõÙ®ö×§÷Ô¢ôÏšóΜóΠòΟóϤòΦòÒ¦ôѧóѤôÒ¥ôÏ¢òΠòËòÉ™ðÉšñǘñÇšñÆ™ñÇœðǘòÅžòÆšòÄ–ñÀ‘^=^<aBb@\<[<`B`Dt#D‘5H›<K BQ£CP¶HPÉ\[Òh`×leÞ|lã†qè“yì™ë›vëœwëšzîžyîžxï¡~ï›|ëpè†nár^Ò^X´EL’+D†"CŠ,J“7R G]¢Pp€0Vk"Nh#Rb!Tb&R—kŒj*XY!N`#OWLTNk4^t2cRJ^!RWQj,i^Š”j˜…X‹f&^Z S] \l5p„Z–u¨–s§†Z•”s¦’s«„X˜”h•ªnœºf“Âx–¯G`Fv…I`…@[l1Lh)Nz6QTLWI]Gl$F^D[D[F`Dq#Dr"Dr"CaBZ?dBq&D^>_E`>n>_;^<d=`>k @m?n>l:k?k@p$Bl<s"?t(Ch>z(Bƒ,Ar<d7g5u3†";š.<©0<°8DÁEJÊOTÎT\ÐZbÓ^aÒaiØcjÙfnÜhnÝmpÚrvØstßvzâyyä~}æä’åŽŠåŒŠè‹æŠŠåŠçŠˆè‘‘ç˜å”ŽÞ†„Ùv}ØzzÑzx´JQa?\>v%D^FWK^!JVJXJ_ J^Gn$Cr(Ef:`=j!<e<b>l=x#AžT_•=R5O’GZˆ2G…*D‹9H†0Fˆ.Hv&@‡/Kp"HjHƉٜ’؈|Ö†ÈxoÆ€wɉxÊ‹xʋɌ~ÈŠ{Ljz‚zÁ‚~Ć}Ć¿‡Á‡À…€¿ƒ}¾†~¿„Â…{Á…|½„}º‚€¶{º|z¶|{µzxµxx³zy¯vtªqq§nq¢ckŸcn½€x긎ôСöÚ©õÛ°öÖ¥÷ÒžóΙôÍ™ôΛòÎóФòТôÑ¥ôÒ¨ñѦóѧôΟò̘òÊ™òÆ–ñÅ–òÈ–ðÅ•ðÄ’òÓñÔôÁŒð¼‘í¶ˆêª‚^B\>`D`C[B\AY@[>mA‚.E˜:L¡BPªGRºPXÌabÔiaÙngß|nå†tç“xè™zêœ|뚀ìzížzîž|î |î›|ì’té†nâr`Ð^X²@K‘*F€ D‡(H’4N G^¤PbcG^GbLYRe.Z•_~€<dVJd$Pi)XVJo2`l*]RGZNSOTU]’œt¤’st2hZPl*jl8p{I„ˆYŽ‹d™’j£—z®–x­†Z“Œ_š›c–´tŸ´~“Lv–Dk‡Ia’N`^%No+N[#JX!KUE\FaD\EYBXDa@r @t@u$B^@V>eAx(C_@`B`Ai$FaA[@d@]:b>k Bn!Bh<l!>d=lAn#Ep =t'Df@r#?~+D„+Bh<a:a5s5‚9’(>¡0A²>HÂJLÈRVÌSZÎTZÐ\aÕ`i×bf×cmÛkkÜrtÛqvÜvtÞv|àvyä‚‚â‰„æ‰ˆææ’ç’‘æ‹åŒˆçŽŠäŽ‹â‹‡Ý€…Ørz×yvÆmlª@IY;[<s#HZHW"M^$LX$OX L]J_!Jo%At.Id;f?e>i@i<l@„,E¡Wb“>LŽ/G”CT„.Fƒ)@Ž6J‰/D0Gx"@ˆ3LjFgHȆ„Ö–ŠØ‡wׄÆ|tÅyÄ„uĆzÈŠyŇ|ň}Â…€Â‚}Á‡Ä„€Å…~†ƒÀ‡½…~ƒ|†~À…¾„|¿‚z¸~}»~y·|z´||²yw³xw¶xv±xt­rv¨nq§jk bl£dkÉŠzî¾óÒ¢öÛ­õÛ®öÕ¦÷ОôÍ›ô̘ôÏœòОôСôÒ£ôÒ¨ôÒ©ñЦôΡóËòÉšñÆ™òÆ“òÇ™óÔðÃ’ï¾Ží¹ˆìµ„ê«|äœz؇eÃgOa?\?]=\=[<Y=Z<\Bf@y$B“5FžDPªHTºTZËcdÒd^Úqià{ræ†tè’zê˜ë—xë–{ëœ|î›|ð~ð›~î’tç†ràuaÐ^Y¬ALˆ)Ex D{&GŽ3N¦[t€3Ub F\DWIj)[v>j€<]4^YKd&Sl(Vd"Rp.\h,ZSHUKUQJPR‚ ƒ²’sš‡P…_Vs9ro5q{D|‡WŽ€K†Žf Ša”œ†¶˜‚¶˜}°Ž^– s¢¸‚¦¾Ž¨x4cyAfŽMeŠIdZ"LRKT"LTJZJ\J\H\JYHd Jr&Hw+H~,HcAWBc>v(D]@^Dd CgBe D[@b@bA`?f?l!@j Bn%Ck%Dh>nBn!Co#DgBo"D‚.F†+By"Aj<e9f9n:|:Œ,Bœ.@«8@­<I²@KÀJMÅV\ÇU]È\fÉ\hÓbiÕgkÜjkÜprÝrxßvzâ}€à|~åˆ‰æŒˆæŒ‰è’Œä‡„ã†‚ä†‚à…„ß„„Ûz|Ör{ÐsrÆgd©@NZD\Ej$LY%OZ$N[&PW$O\$L_"L_ Is+Hr*HbDm'Ga@fBgChA„.JžQ]’?S‘;NARŠ5I†0F;NŒ8M3H~,HŠ8NmJl#LĈŒØ’…Ø‚xÒˆ}Äxp¼tp¸ur»|tÂ}sÀzÃ~wÃx‚{†‚¿~À†‚À„€Â„~Á„|Á€¿‚|„|Àƒ~¼|½€|µ}~¸}zµzw°yz´zx²uv³vw®ux¨no¦hlŸcl¤foÔ˜zïÃ’öÓ¢öÚ©öجõÕ¤÷ПôÍœôÍšôРóÐôÏŸõÑžôѤöТôΞôÍžôÉœñÈšòÆ—ôÅ•óÃ’ò¿”ﻋꯆäœpÙ†cÄfP¢FF€-@h;X<[@ZB^@^?X=W;YC`?p@Ž1FŸDP¦FR²IRÇ\_ÑfbÙukàvãŠxéxè˜|ê–{è˜~ꘀízî|îž{ï˜zì‘rè„pßq^Î[T®AK„&DiEƒ/T¬f„¦^„ˆ9a“V~WHNIp-ZŒJro,Tv0Xf"Mg%Se&Tk(Sl,Wc$RTFTHYRKRlDt¢Œ»˜w£^g(\n4er6pf*d€Dx`‘hž{F‡^ˆ™{«„TŽ’nª›t¨¯¸¾‡ª›gŸ£x¦¤iˆ¨n„h1VLHNFSFXJZF\H^F\DmB{&B.Fz&DX=X=g!Bx,E_A]A_>hAjA^@_=c>[<a?f>h@o"An BjCq(Fs&Er(FiDl D~+G‚+Dˆ,E|!>u>q>n:x<{7";† :„":‹&<–0@¡;I¨@KªMX¶TbÀZaË\cÏdjÕjnÔnwÚwzÞxyÞtzá~|㇆䉆劄ჂÞ{zßzxÞ|xÜ|zÕpuÐppÊkhÉ^S—0BX@ZB_FXJ\"MZLX$N`#Na!L]Jn,Jl!DcCiBbBa<gBo$H„.J O_“CZ’>R=S‹8M„/FŽ=P‘:N7I&@‹6Lo Kt)OÈׂֈ~΂x¹roµhf°ff¸lk¼rl»vn»uwÀzw¾z¾|{¼~¾„‚À„~„ˆƒÂ„~À…~¿‚~¾‚}½€|¾€y¶¶~~¸||´}|°yx°ux¯vw­su§nq¡dp `k­ouÞ¨ƒñÇ—õÔ¤÷Ù©öÖ¦öÓ£öÏžôÌ™õΜõÎôÎôÏŸöΞôΠõËŸõÌ›ôȘõÈšòÇ“òÂ’ð¼Šî´ˆëªâštÒ~h³ULˆ0?i8^<X@U>Y@[>`A]>Z<X:X>V>];q B†-DšBPœAO¤ANÁTVÑe_Úpc߀rã†rè’yé–|è˜~é—|ì˜xìš|ëœzðŸ„ð˜~êŒtã„lÚn_ÈTP¤<Iz H†7`ºšÓ°°¥Lm Ut±t‡UEOGc M~8`q0YfJl'Ph#Qa Np/Z]H\LWFSHWLMPa4e Œ¹~©Ÿ­ƒJ†€L‚z?ub$`p6m”h˜’w¢J„q;o…c–}S“o¢Ÿƒ³®’¶¢y¤ q¢­†¦·™·±«•fŠX)SQNRKUKXJ[JZG[Il E‡2L0Ix)EZAT>e<{-D_?`A\A_?hA^?`?d>]>c=dBf@n!Aq%Bo"Cp$Dr%Ds%Bo!Bh=p!@‚&A”6G‹-D~"=z@~#?Š/J.MŽ6O”5OŸ>[§H^®H`¯Lc²Kb³Qc¾UlÄ\lÌXhÑ_jÖdtÔdzÚuÜu~Ý|…ßz‚á‡â‹âŠŠà‚Šà~†à}‰Ý|ƒÖp|ÈhrÆipËlb½VUˆ4KYF\F[L\#NY'OY(SY La%M^"L^Go,Nj$Ff Dd@a@_@d=r$D€,HœN`“BV‘<P‘CR‰<R„4Lˆ4L8L6H}&DŠ0Kw*Px*PÌŽ‹×Ž{؉Èyr¯ff®`b«Z\¬\`´c`µjn´nn»ml·tu¼rr¶tx¸wx»~z¾~~¿|zÂ~z½‚¿…‚¼‚¸€‚¼€|º€}·|x´zz±yz¯uw®tvªuvªlp¤jqžcj `l¹xt䲋òʘõÖ§÷×£öÒ¢öÏœöÌ›ôÌšôÎõÍšôÍôÌ›õÌœôÊ™öÊšõÉ›ôÇ’óÂ’î¼î·‰æ¨~Þ’nÎvZªMJ€-Bc;Z<T 7U:Y;^>^@`=`A^>X>V<X9T<Z;q;‡-A—=G˜;Ož9J¼IRÎa\Ùqeß~oä‡yæxè–|ëš{é–véšwë™wêš}î™zî—{ê‹päjÚlbÃRP¢7S˜Agتܾ¬–JT|$XšRmŸc~TGMCUG~>gv4]aKj$Pj(Rf#Pm*S\H`JTFSETHNOb0`™|¨–n¢¥ˆ¨—jœt8dy9kf&b`!]~Hs—w›zC|n6t‹o¥o®t>x˜{§§Œµ ~¥‰Tˆq2[“Nm§†®‘´“ah0YNBM@RDVBZGZDlCŠ0H…4Hu(GT<R=h Cu$B^B_@]?^Be@c?\?b>c?_=e@`?g?m$Cj#Ei?o!Al!Am Ag=j<€'=‰,A‘-B0E˜1H¤?NµLZ·OZ¾Q`Æ\gÌajÒbkÐenÒptÐpvÏjmÌinÊlnÎpzÒnr×uyØuz܇܃Þ‰‚Þ„}ß„~Þ‰‚ß„Þ‹à“Šá•Žß”ˆÛ”Ž×•–ÕšžØœÃ–£av{A_e.MW#HY'PZ(NZ#NY$L[!H^"N^Gl#Dh"Fe D`>^BaA`=t%D|*F›LZ–HYŽ8M“FVŠ:T…2L‰8N‘:N0I}(B‚,H{+S~5WЖ‘ׇtÕˆÁmh¦X[¬bb¦RX¨V^¯ZX¯`f³ae°bb®fi¶on±hl²nt¶st¶tuµwx¹vu¾~xÃ|¾‚~»‚€º~yº}z¸}|´yx°x|®xv­svªqp¨mp hp`i¡_kƉ{ê¼õÏöÕ§øÕžöÏ™öË’öÌ–ó̘ð˘ôÌœóÊšöÊšõÊ–ôÈ”öÇ‘õÇ“óÁŽí¸ç¬†á~Ñ…o¶ZQ‹/@e;X;T:O:R<`<n<p>eCf DfBfA\=W=T:R<X;o<‚)=3CŒ0D’1E¶OTËdcØobÞ|sã…wæ“|è—|ê˜}ê˜|ê˜|ê–xé˜|î™}î™zèŽrã{eÖh`½Qb±UxР¯â¿¢GLˆNmr%TžftŒLaX KT MPD|<d{8an*Vi&Tl&Tt2`_H^Iq1]VIXIXJVQu>s™v¨’fŸ’hŸ€¬„N„u8fj.d[Ob%XwFswAƒZ˜žŽ·“qšv>|R‹žˆ³¨’ºšw£x@xb"Tu6`´”¨¾¢»¦ƒ¨f1aGCNFSFUE`Dn@.HŠ8Ks @T>T=o*Fy(B^@]@`>^>cCh?\>a>b>\<aAb>d Dg An"Bj<u&?j>f<f8c8t$=†,D’3D“6I¦AO¶KX»O^ÄZfÊ]`Ð`dÐ_fÓ``ÔcfÔlhÔihÓheÑjkÍljÔpn×jk×mrÜtwÜzwÝ}yÛ|Þ|{Ü{zÚ€€Û€xÚ‚~Û‚|Ý„‚Ýˆà“‰ä›‘æ§œè²é¸£á·¥Ð­¤²˜‡\{j9]R#IW$MUJY$M[E^EfBf BaAX@XCi!F`?s(F{(G—KY”G\‰7O‘FX:S…2LŒ5J;P’8L‡-F†,Hy&N‡:WÓ•Õ€jÖ†}¸ff¨QX´\\­Y\¨O\ªSW¨U]¯Z_®Z^­^c¬_hª[c¬bi©bg®fk¯jn®ln²ps·ss¸zzºzwºwv¶yz´z{´z{°xz±y}®ts¨or¥jq¢gnœaj¤dlÔ—ïÀ”öПøÔ¢÷ÒžõÊ”öÈ’öÊ—ôÊ•ò˘ôΜòÊ›ôÉ™òÈ”ôÈ”õÄ”òÂí¼Žç®Šß›|ÒŠu´e_Š8Hf:W>S@P:Z>e8w"=ƒ.F€*Di>i"Dl$Fd!Ed@^?Z>U:V 8l=z!=†/A…/F/E¯KQÌcb×lbÜ{må…vèzè–{ê–ê•~é˜|ê—|ìšì~î—{èŠrâzjÕklÊj€Û«®Ü°Œ’BH}0Ru&Pc K[xj*PL?MDNFw8^„Bhp,VdNl)Rn)R` M`Q~Dmd!Q] M[LXPp4j“k™•m Z’šxœœ~©|Fv^&UNK]#]‚WfŸ“x§Ÿ´€PwD|RˆŒe’ž¦”T|”iŠƒFzd P–_vÓ µ–«¢‚¬a0`MKSFVFbEs#Gƒ-GŽ:KmBXDXBt.Hx-I]C]CbF]B_@fB`@_=`>_>_@bBbBjBo#CkAl?r#Ce?d;f@m>‰-C”2GŸ:JµFSÀQXÊV^Ð`fÐ_dÐ^gÌY^Ë_cÏ`aÐbcÒfeÑedÌglÏihÓjoØmpØpqÛvuÜvtÚzyÙ|zÛwwÙyvØvtÙut×xtÙzwÛÛˆ‚ß”‡âŸŒâ¨–çµ¡è·¥ìº¤í½¦ì¾¨à¼¬À¢¥“k†n?aX,QW(PYH^HeEc E^BVBXB^DdEv.N‚1L—GU–Ja†2KŠ;PŒAZ†3M„0L:OŽ6N‹1H/Mw,U‹>[×™”Ö€n΀y¸bc©Zb°]e¶cd¬Zb¬V\ªYa¬R]®\bªX`¬V`ªYc¬Z`£Yd¨\b¦\g§ah©di¯hl­jp¯mp±pr¯pw°tv±uy¬sv°ux¬rs©or¤ktžfrœ`m¨dnØž€òÄ“öÏšöÒøÍ˜ôÆöÈ‘÷ÇòÇ–ôËœôÊšôÉšöÉ™õÈ—ôÃŽóÀ굌⦋ה|¿rj—DLoB\BS:W;a>k @})DŒ1GŠ4H‡/Hx,Hu*Jp(Hm%IhCfA`CV@W@f>{#?‚)A‚%=ˆ,B¦BLÄWXÔf^Üthå‚sæŽxè”}ê”|è–zè’|ê•|ê–}í™wì‘wè‰tâ~vÚ~Šß­¬Ó’u4Hx$H‡8[f"T|<_¯xˆVDLELCLDn*VˆDjv*TdLg&Ri'S^NeT|Ej~<he"S\M[Nf&`q>n‘j˜šr£†S…“m——w¨p7w`*fq9tƒP†˜w¤¡’¸k’h1jwJzF‚|FŠf”—p¤~Dn}@lt8v„Ii¬ˆ ¢jƒ¨Œ ¡‚¬T"MODVFf Es)P,H‘:LhEWCXDj%Fy/NfH\Fb D\CZ?cCh"G`B`>bCbDfD`@eAm#Fm&FjBl Ce@i Af<m>‰2G–7K¢:K·J\ÂT^Ê[`Ð]`Í_dÎZaÌ[`Ê]cË`gÎfjÒbcÏejÐhlÓknÖilÖgmÙnsÜqqÛvtÛvwØsrÚrtÔqqÔtuÖqxØrtØvuÝ{ÞŒ‡â™‹âŸŒãª˜æ´žçº¤èº£é¼¤ê¾¨íÀ¬î«⿭Ȫ¨œu‰sDg[,NV!G]G[H` HUCTCZCdFp(GŒ>W•IW˜Ob†5Pˆ=Sˆ<U/L†2I’=NŽ4N„+G…8Wx,V”H^Øž”×iËvr´be²bfºmoºhf·jj²[_°ag¯[^°Y]¬V\©U`¨T^¨T^¥P]¢T`ŸS^ V`¡W^¥T\¦^f¨^e§^g¤bm¨bi¬hm¨joªlo¨kp¤ip£fmžfnœ^j§dqÚ ~òÂŽöΘöКöÊõÅôÄõÈ‘ôÉ—õËœôÈ™óÊ›öʘôɘðÀŽí¸‹ê°‡ä¨†Ý–Èzj LTr=]9Z:_>gCr B+C0G’8MŽ5J†3I™NdBZ‚5L{,Ft%Dj>f>_@a?q>}$>{$>t<~"<–4F¾NRÐ`XÚtjãƒtèŽwé–zè”xê’{ê“|ê•wì“xí›yí“|玅䌎贞ٌr£EP€#Dx$I~0^t+V´‡šWaMBJ>J@K
+>\ H€;dy.YW@d%N_H^J`P~GkNys*[l)Zf%Sb$YUQm8fœx¦’a˜|Axm˜fŸ”o«˜t²˜w­¤”¶š†¨l3du>}s<x†Z€Jˆ~N‚”q—‹Vm.kh›£rž¸”¯£f‰w6]«šªh”RKTBcGnB~(EŠ0HaBRCUCr,H~1J`B\@bD[A\>^>i DaA_@bB_Ad=aB`>l"?l#Bk Cp@hChAf>k:*C’8N¨=K¾LVÂP[ËYZËX^Î\_ÐZ]ËZ^ÊZbÎZ_Ð^cÑbhÓdkÑjlÖhn×ijØklÚknÛpnÙrsÚqpÖkjÖrpÕllÖpp×rrØroÛ{xÞ€zà’„á–‰â¢ã¬•洞湣黢꾦뾤뾥êÀ¨ì¿¨ïÀ¨è¿ªÏ²©¤|u@\Z BR?Z>O:N>P;`=p!D‰=U‰8LšPb‚/J‡:T†8O‚/Kƒ.K’=P4K~&E~3Pr#JžQbÙšÔ~lÈtmµfi´lkÀqmºljÂql½ni¸ed¶fd³bh±^c°\`«U`¦Vb£PY£P[¥OW¢OWœLWšL[¡Q[¢Ua£X\ [hžX`œX` [b \d \i\e]j—Xc˜Uf§agÛ¡„òÀöË–öÌ’öÅŠôËóÄöÇ–ôɘôÊšôÊ›òÉ™õÊšóÈ›ðÀ’ë´Šä§„Ú“v¾fX–=Fq<Z7[5f=n>t"B€*E‹1G”8H’7L‘5J”?N¢kz cv›RdCW†5M}.Gv$Dj@jAz"@x:t<n6p8‰(@­BKÌWRÚoeâ‚pèxè“zè“yê–ë’tê”x薀ꗈꡞ갢朄à€lÆ[Z¤APˆ'I€,R˜Ko§rНnsi$ILBRBO?NBXCg$S:bn)Ux;fM :SB^Lr0[’R{|6fs1dj%Pr>lKENLˆd…qœ`Œs8ja)UtAiƒM|‘l’Ÿ®uFqo7vxF~‚P†x?x‰b—ˆ^–†YŠ[†P‰”f˜²ŒŸÅ–¢•Tn|7hvB^ª”¤o8cVAgBmB€)DŠ5K`@RBVCx-C‚1L_?^BbA\>\@Z<jCf>`>a@]?^?a?dGl$Hp&Ci Bs BkBj?g>f:~(DŒ5J¢<K¼MVÄY`ÁX_ÆV\ÏZ]ÎZ]Ì\^Í\aÏ[dÌ[fÐ`jÔjlÑhhÔfkÖgjØglÜjmÙlsØln×jr×moÙklÔnsÖpmØpoÚtqÚ|wÞ„}á’ƒâ˜Œä§æ®›è²œç¸¢é¸¡ê¼¤ê½¢ê¾¨ìÀ§íÀ«î¾¦ïÁªñĪîĭ̲¨ wf5TQ<L8K<M 6`:n@‡1Jˆ:O˜P`€(F‡;Pˆ8Qƒ1J‚,H9H‡*D€*Dz%Er$N¦XgÙ’Ò}pÈtrºlnºsn¾trºqnÁrlÄvrºke½jh¶hh³eh³dk®^e«Xb©Xb¦S^¢MW¢KUžJT IZœJU˜DTJVšOY•KX•LY–N^šLX U_žX^—R]GXŽDYžUbØ•yﻆöÉöÉ’öňõ‹öÅõȘóǘõÈ›ôɘôɘöÈ•óÆ–ï¾å¬ˆÚ–t¹f\„,A`<^<c>m>z&Ax&B~(D†*B‘8J”4G‘4J“9L–<N¦q€¦q€§k{ _n“I]Œ=P‚4Lv'Dv$B„,Fy<s9j8n9…&?¨>GÉTTÚncâ~næŒ{ê’zê’~ì”}ê˜zë–z蔂ç ð§Œìš|æˆtÛtgÌ^YªBTŠ,PMwª`lЩ£w(DT?M>L>L=O=VARFr-WŒPw‚ItG7L=[Nm&W“`ˆ†Drw2ew8exCrJ @J C^"TbŒx§•g›|JzCvv:pƒW‘p™^#Z€UŒƒVL††T‹ƒLŠg˜fŸŠ^’‹Y•a–f}°’­„Cj~:ct/e|Dež|Ÿe"MiChB€'D…/I`?T?U=y+C~-Ca>`@a@Z@\BY>iBg Bd@_B[A\?bBh BfAm$Dl"B{0Ip!Bl@g=b<q!>‰0F¤?JµHPÀPUÃRXÌXYÌY]ÊY\ÊV\Ì\dÐ[`Ñ_eÏ]dÐhkÑfgÚddØdiÙhjÚjlÙlnØinØouÖkn×km×om×rnØvvÛxuÞ}àˆ~ß•ˆâ Šä«æ¯—粖洜跚鶜ê»í¼¥ë¾¦í¿¦îÁ¨í¿¨ð¬òëóƯéȱÀ©ªWvR#CH<J <[<q&D€-IŠ<O–Ob},L†7L3L}.I~(D5H‚,Eƒ,Hy)Kr$O¬^kØŽƒÒzqÃvxÀvzÀzu½xt¾vpÀunÁzt¼rl¼qr·ooºlo¸jj´hn³fi°bkªYa¨Vc§U_£P\ LYHT›FT˜DR–JVHUAV‘HXšJW§W[§XZžRU?Q„0J–DVÑ‹töÈ’öÈŽöÄŒõÃŽôÇ”õËœôÊšôǘôÈ–òÆ”ò•걈ޗ|ÀufŠ5D]6`;pA€*D~,Bƒ+F€,E…0G3F“8I‘7J–8J•8L“7L©|„§u„¨t‡¦r} bpTf’AZˆ7N‚1Lˆ4K~(Dr=f>h;€(D¦>HÉTRÚncâ‚qç‹tè{é’wê’zì”yì”|ë|ìš~í•zë’rè…rßtbÊ\]¨C[˜Dd³gˆ»w}ÆŠ‚W?RCNAQAQ?TDR?WNi(V‡QuƒHpE 9H<PFh%Tp–‡Fsv2b†R}w@iTIOETHo4b[ƒa‡›v¨ˆTŽRˆ˜|¤„Th.mc-e‡b’ˆ[”ˆ[ŠY“v@nœ‡¶‘j¢’hšw¡£b‚¢iz¨ˆªu-X^JŽR„ vƒWbCgD‚(Ax(D^@R;Q;|5Ix-GbC`Cd!E[?\CW>f!IdDj'Ib!F`B]B`BgBdCi Eq%H|,Hz(Fo BjBc@r B‡4KŸDV²HR»NW¿NUÅQZÇY_ÌX\Î[_Î]dÓ`dÐ^`Æ_hÎaf×fgÚehÚkhÙjjÜnkÙjlØlmØmnÖjj×mnÙrrØppÛvtÝ}yÞ„yà‚á—‡ã‰ä§Žå®•尖沚篖粖è·ê¸Ÿê¹›ì½¤ï¾£ð¿¦ðÀ¥òÁ¦ñƬôȲòɮ׿®œ~”\-PJ:V<n"Br@†:K—NZx$F‚0J4N{*F~*D1G„(D‚0Ju Ep#M¶lwÖÐyq¿pnÀ|zÀ~zÁ~|¿xuÀzwÅyr¾wvºsr¸su¶rtºqr·noºmn´jl¯fk¯ah®^e¨Zb§Wb¡P\ŸL[˜FZ”JXBR?S“FV¤T^´ca±`XªWZ“CQ0L’>PÓsð½ŒöÈ’õÈŽöÆŒôÆöÊ™õÌ ôÉšôǘõÈ–òŕç¦zÜ’jÀhYŽ8Gg<b;s$B‚,F‹4J†2J…/H†6JŠ4L”7I”<L˜<K”6H”>P”9M¨zˆ¨v‡¨}‰¦y‡¦s¥hvŸWj—L\Ž@U@N…3F{#@d8e4€(@¤>HÈRR×ncâ‚qæ‹{èzç’yê’xë”wì”{ë–zë•|ì—{êŽwè„lÞrfÆZ^¹dºuŒ¯^u¿zzŒH]RFP?QBR<N>Q?ODv;r•U”Y†DsSIGDXU‰UˆŸvšu0`y7hHra%PXKQHi,`‡N|u2cd"XzJyŽ^–yŸ~Kpd(]k2iq6xh2fŒfšf–_“ˆH„”o¤Œ¸”hœ›tšœY‚”Lb™oyœ|¦ZN^ MŽLp¬†žm,UeD|&EnCYBS?W@‚1G~.Gd?cF_BbD^BXC\CfFh&Gh,L_A^C^BcAhDi"Do"E|,H‰4Jn"DfBbAo Bˆ6J•BU®HUÀRXÄQXÆRXÆVZÍY\Î\bÏ[`Ð^dÃZaÊahÕdhØhkÜikÚnpÚlnÜlnØkmØhhÙjnØde×mrÙstØttÜxvÞyÞ‡~à’„áš‹ã ‹ä¦’ä«”ç²šæ°•æ®™è±™è¸œé¶ éµœë»¤ì¾¤ï¾¤ð£ò©ðĬòÆ®òȰò˳åʱ°š¤m:ZQAfBeBx1M‘K\u(J}2N4Ny(F„0GŽ9L'D,GlDs%MÀ{€Ö†zÎwjÀqtÊ~yÈtÄ|wÁ~|Á~uÁzvÀxt»vu¼uvºtvºtw¹sv»rv¹or´jm°go®bj¬elª_f¨Ze¢R_¢T]›Tb”HV‘@TŸOY®XZ¾k`¿eYµSN”DR‚+I›FTÚ–wñ½ŠöÊ–öÊ•öÈ•õÉ”öΞ÷ΞöÌ›öÉšôÈ•ðđ窀؄e´UQ|$>_7a:u$Bˆ0EŽ6LŒ0F‰1I‹3H:O–;Lœ?Pš@N˜AM’7I•2G–7N¦x†¥t„¨}Šª‚’¬„ªy‡¦lw \l™K[–EUŠ7H}&@f8e9z=§<HÉVUØplá€tçŒ}ê{ë”xê•zè”zì–xì˜zî™~ï”zêŽqä€lÝmdÈ_eÒ‡–Ü¥¢šC]¨^tv.RPBPBQ?R>L9N 8H
+<VF‡Ip›d’p¢šb W’›lžiŒ|8`r.fn,^k.Zs:rPLf+c’a‹€Ald!Th(`ƒ^hr;bn5l^$[h2ft?up;uq@ra–”jŸ–e𣅲ž~«š­–ršžvœm'RaLˆg{ˆ`ŽMLm/a¡m{˜l_Ew"Fj!KXJUFTF6O|.JfFfHaHbE^BWD] He Hh$If$Ha"GaD^Ba>r%Fi%Hx%Gw$A‹5Hy%Df?hDmA‹9Lš?S­LYÂRTÃRZÊTYÈT[Ï[aÐ\aÍ[`ÆZ\ÆajÔad×diÛikÚnqÙpoÜmmÚlm×nmÚddÙfk×fgØoqÛrmÜvlÝxÞƒyàŠ~à‚á”ƒâ˜‰ä£‘ä¨’å®“å«–æ®æ²˜è²–é²–é´—ë¸›í» ï¾¤ïÀ¦ñÀ¤ñħòƬñÆ®óȰôʶìÌ®½¦¦t@^\>WAl$D‹BTp"Ft(Dy,Ht"B~(A‹4J(C„(DfAr#JÄ…†ÖnÊunÀuŏxÈzÆ}vÂ~xÄ~zÄ}vÀ|w¾xy¾yz¼wxºxwºux¸qu¸tv¹op´op³kp±in®cfª_h¨`i¤Zb WešSa™GW¢OV·\YÈjTÄk\µXP›BN‘2N¶^Zà£~òÄ‘öʘö̘÷˘õÌ›öΙøÎœô˘öÈ•ôÆ•ï¿Ý›r´\Wz$>\@Z9t!B;K”:H‘7F‹0FŒ0GŽ1H–;M?O CRŸDQš>L‘8K”2D˜8N¢n‚¤v†¨‹¬ˆ“°Š’®„¬|‡§m|¢`h›P^;K‚(@j=^ 7w @¤=IÈYV×njâ‚tè‘zê–}ëšyê–{ê–|ê•uë˜víœ{í˜vêŽnä€nÛkjÔ{ˆéƸԕ„=]‘@Z1Yd"RTBP>V>P7T?P@VCk)Ta!Jc%J|;`VvLlu1\n.[v8hc!Vb([k0fx@|”fŽŽTu^HTHo2b”y¢m-X^"Vf.ba$[]"\r>r€N†r<t€R†Œb’—n¢žx¤šw©šz¨Ÿ€­£«ƒDuH JvB_¶¢°|H{\Jƒ?f·›ªl(OlD`CRBRBRDz0G~,IhDfA`Cd H^FSD^Dh Cl)Jj&F`EbD^E`Dt&Hl!Fz&H},H˜:K’6FiAe=h>Œ8P–9N²P[ÃPSÆQWÍWZÍ^`ÐZ\ËZ^ÄTXÊZbÔgk×ik×gkÛdfÛikÙnlÙllÚlnÚlmØffÙhlØjmÚljÜtnÞzvß{à…{àŠ€áŽ€á‘†ã–†â ‹ä¢å©’娓檒谓殑貔級긜캞ì¸ð¿ ñÀ¢ðħóÄ«òȰóɱõʳô̵îδŭ¦DdT?j Dˆ:NjBo!?t%Bn C.Dˆ/H+By#D_?t#FÌ„„ÖuÇvq½toʃxÆxÆ‚xÂ}yÂ~xÂ|v¾{tÀzy¿|x»z|¼|z¹vvºsu¶su·su¶rr²ln°ko¯gm­ho§clªal¤`k¢_kŸR`¥X\ºbVÆkXÈjUµXQ¤DQ <PÎxeè³…ôÆ–öÎöΜöÍœöÍžôÎ÷ÏŸöÊšóÄŽòÁé±Î‚jœBP^=Z<p!D9Mœ>L–>N–8L“8K2I•9NŸDQ¤FR¨JUžCPš?N“:L’6Hž@O l~¢t‡©~Œ¬„”­Ž™®‰•¯ˆ“«|‰§nz¢`j”HV.Fl @\8y#B¤<HÄTW×nfãƒuçyé™|ì˜yêš|ê—}ë˜zì˜yíyî–zèŽrå~lÞnnæ“”öо¸nzTr;\`FdJ_J[EbJXCWF\Ei$Rv8_XGLEh'Rw5_n(Rk&Sb!N^Kf$Ss4gzDozChx;]e#PN BUP‹]‹™{¤XR\$Vd(\n6me)dd*c}L€‚PˆxA{zD|‹[v¦œv¦˜r ¥“¼ž‚šŸ{¢SQSJ´†º£½m*X‡F{žvPwh@Z@Q?TDSFv-Lw,HeDhB]E`E`GXG^GcDl(Ir(H^B`A`E]Gv.Jr*Hv Aˆ1K£?I©BKt#C`;_;†0H’4H¶S^ÄQVÉPWÎVXÏa]ÇU]ºO]ÆW_ÖadØfkØmlÙhjØdgÚflÚjkØihÛmjÚjhÙklÜjmØouÜqrÝwsÝ}xá€xà…~àŠ€â‹â‚ã•…ãœŒäžŒä£Žæ£æ§’ç¬æ¬—鯔貒굘춗췞ð¼¢ñ¾¢ð¤óèóÅ®óÆ®ôÊ´ô˶õιñϲĪ¥s;ZeE€4Ll#Fm"Fm#Fu'G…6M‡2H-Iy*J\G|.QÔŽ‡Õ|mÄxr»wxÇ€yÅ‚zƃÀ}z¿~|À}z½|w¿‚}¾zx¿}z¾yw»wx¼x|¹ww·tx¼vy¹ss±nr±lr°orªjt©jp¦hr¤ai¢\e«_Z¹gZÂhVÄjS¹[O«NR²NTÙqﺉöÈ•öΛöΘ÷Í™öΛöΛøÍôÈ”ðŠë¶á¡s¿n^ƒ*C\;k@ˆ0H›;L˜=L—?Q—:J”7M˜6JCS¨JR«LR«HRžAM˜<N–=P˜<P£FXœl€h|£r„¬‚¬‹–¬Ž˜®Œ–ª‚¬x‚©tz XaŒ<Ls ?a7z#@¢7FÆVX×ohâƒrçŽvéš|íœ~êšuì˜|ì—xìšyí{ì•vèŒpänßqs蜒îʬXr¼o}±xŽJ :S@P8\Bf Hh"KfJh$Nn(Sx3Xf#QPDr/Vx6\j#L^Hf$Qh(Tf&S`!JXDVCVI`!PRNXRV~ŽgŠm2jPH`(`p:kn6lj0jk7j‡ZŽK~xAuK›pœŸ«œs¢œ ŽbŒšwšˆVŒm-n–VrÌš”¡r”€Et@V°Œ¥c JXHTFVJWL|4Qv.Ok!Hh#Lc Jb$L^$KZ L^!Jd%Km(Lt.M_"Ib"M`$Nc!J€5Sy/Ls"J‰6R©FW­HU…0MbF]B~-J’9P¸ZfÈW\ÈT[ÊSZÅW^¶N\ÃW]ÔfmØafØceÚjnØfjØgmÚimÛmlÛnmÛhiÙhjÛifÛlhÛroÞvqÝxtÞ~váƒ{áŠ|߈|á€á‘‚┆⑃㜇垇ä¤ä Šå¤‹æ¯Žè®é®é´—쳕췘ð½ ðÀ¤òĦòƪñǰôɶóɵóλõѺîг¿ž p-Nl!<n=c>l;p!@-Dx"D„/H},LYE†4NÖ”‡ÒufÄwrÂwrÆ~xÅ‚{Æ‚s€xÀ€x¿}v½xrÀ~zÂzs¿zx¿{x¼y{½vv»wxºuw¸vv¹sv´rx±nr°mp¯lpªitªjq¥dl¥cj¬``µdYÃjVÇiUÄdP¶TPÂ_SáŸyð‘õÌ›öΙõКøÍœøÎšøÎœøÎóÈ’í¼‡æªxÕ‰e¯XSwAf@~(Dš>Pž=Lš<NžAQœ@Qš;K<N«LV¯TW°V]¨JVDR–=N•4J›@O¢AQ‰JdSjŸl~¦yˆª‚’°Š–°’°Œ˜´Š•°‚‡¥fm@Rr >b8| ; @LÅWZØneá€rè‘xê›zïš~íšìœ{ì{ìšzî }ìšyêpè„nàphÞ€zᵪ˜Ah˜HT¤fjJ=M>S@T;ZA[CX?^Ao2Xl%Ko+R]D]BZ!LXDZKw:h}>lf(P_ IS@PBQCp5^^"NPFZ SuBoˆV…b&[VLh*`d)\p6nk/ltBtŒ^–€F‚z<t—n›¡~«œt™s¡`”“j” x¡ŠMz–Y‰“Rl¦„žr8l]J¸£­v4ZRESHQERC€5Nt(Ij%Fl&HaA^@]DUDaBb"Hj#Fx,K^HbDa@`@s#Bz*Ew$F’6N´MR¹LPš8Hh;^<|(F‘6K´YdËXYÊPV¿M[¸T]É`cÖ^aØadØbiÚhkÝhiÙhiØlkÛjfÜijÜojÜhgÙnlÚllÛllÛqlßvrÞytá}râƒxâ‰}à‡zà‹€áŒ}ã‚㖆✊㚄äŠä‰å¦Žæ£Œé¦è§‹ê®‘쵕븚ð¼ ð¿ óèòìòÆ®ôɳôʶôͺõνôÒ¼îÒ²²ˆ•c?j;^5e:t;‚-Dt!B}%@r"D\C”@RÖ“„ÑrdÂtpÄ|tÆ‚{È„uÄxÀ€x¾~xÀ}u½ztÀ{wÀxt¾zvÀ{v¾xt½zx¼xx¹wz¹txµqt²rx·qs°nt±ot¨jr§mt©flªgl®fd½h^Är\ÇlSÅcO¿[RÐw_ꯂóÅöОöΜöÏøÏš÷М÷МøÍ™ðœ鵅à›tÉrXžDPx Dz(H9N CRœ>Q™8Lž>Nœ=Oš?R§HT´PY´TY°MT¢HTœCR˜=Nœ@Q¥GU¤JYm*L~:\—\rŸp…¦}Œ­Š˜¶™¢¶˜˜¼–˜µ…‰©pu—P^u!@`9v=£;GÆVXÚsjä„qè{ìœzîœ}ìœ~ëš}ëš{îž~ï¢|î˜yétékârhÚ‡Ê~‚¢Xx{/Qf AL=Q>V@Q;T>]@UA^Dr3\\@e#HeHTCUBQC\K…JxˆK}d!Mk-W\KKCQFr<_v8gLDNKf1c‡T‚GvYQ^$Tl2jo3fh,fn5s|Q‚ˆYy>v–k˜¡¨šn—˜pž|¯˜n›¦†ªŠEodƒq%Ux|ª…ª\O¾”“še‚L<N<OCR=€3Fr%Dn@p"Ad@`A^@TCb@aCq&Ht.K_D_=Z>\;r#@{*Dz$E™>P¾QUÂSV§EPr<b<r"D“9M²PbÊTVºER¾O[Ìbf×fdÖ^_×\\ÚfhÚhfÛhgÛilØheÙljÛghÚhiÜikÚghÛmpÜqpÜnlßtnÞxqÞzqâ~rá‡{â‡zâ…}àŠ~ã~ã‘䚆â‡âš„㜈枎ä§ç¦‹æ§Žè¬’ì´–ë¶˜í¸›ï¼›ï¾žñÁ§òĦòŬôȰôȲôÍ·õмõÑÀõÔ¾çͶ—dwi@Z8a8n8{'@n>r%Ch=[A¢J[ד†Îq`ÂvmÇ~vÅ€vÀtÅrÁ~uÀ€uÂ~w¿|uÃ}zÂxt¾xvÁ|v¼xz¼||¼wv¼yy¾ut·tt²rvµqt´rv¯mq«luªjt¬il®in´hfÄmZÌsXÌqUÆhSÄ`SØ„f﹄óÉ™÷Ñ öКöΜ÷К÷Θ÷Í›õÊšðĎ讀ւd¸XPŽ1J{(HŠ4MœAN¡CQ™:Mž=NŸ@NžBU¢FV¬PX²QY¯RX«LS£DTž@Oœ:P¡AQ HX¥KYZ<n'KˆKfšf|¤wˆ­Ž˜·œ¢»šž½š˜´‹¬xzž\dz.G] :t>Ÿ9HÆXYÜphäƒrèyìž~ðœîš|ëœ~ìž|î {ïž|홀ëwæ€jàvtâwÍš¥ª_p‚2Tk"LO>O=R>R>T;\AUCu<bh%LUD\#J^A^GTBSHf%UŒS~…EyXBx=dj$QOBNCd)RTWOPH_(Zz>tt;f}Cy_"Zh-bw:ps6ni4ih2f‚[‘€D|“i–ŽZƒ[Šb’Ÿ‚²x§² ¾¬yžª[r[Ie*J¿¨¯:p°€„º›L>N 8M:Q>{,Bn$Fm Ar%Ff?eB_@V@aCcCl$Fo$Ea"HbH]@Z:q#>x(B|$Bª@LÃUXÄRT°EJ|!:`:q$B’6G§L_¾GTÁMZÒ[aÚcbØcdÖ^aÖ^`ÛcfØefÛfmÚliÙkkÚlkÛgcÛkkÞhfÛjmÝnlÜllÝrnÞpnÞxrÞzsàzràuáˆxâ†yà‰|áŒ|äŽä“‚ãš„ãšˆãž‡æ¤ŠæŸŠè¥Žç®Žè¬Žê±–ê²“í¶™í¸–îº ñÀ¤ñÀ¨òìôưòǵó̶õ̺ôÑ¿öÔÂô׺պ²†<TX;_:m9u=h:r @iAaB´\eÖˆzÌl^¾wpÄxpÆ€vÄ~tÄ€vÆtÂ~qÃ|u¾|rÄ€uÁ~w½|vÀ{u¾{x¾{v½xwºxz¸xzºvy´rtµxy®ru®ns®kt¬ip®os®gj·jcÈpXÍpSÌrVÈpSÇjWà”mò½‡öΜøÔ¤öÑžöΞöМöΙ÷ИõȒâ¢zÉnX =K†,K‡/M”<N DS™>Rœ?R EV¡BR¦FW®OZ¶U\´W\­LVž@Pž@Pœ@R BT¥L\£J^ L\P 8\?z9[Zqžs…²”š·˜ ½ž¡¿œ·Ž®{ fm€1I[ 6n<˜7LÉXXÚvlå„té’|ëš~î|îŸ}îž|훀íž|î {íšzésè„fâsgât`Üž¥Wy‡@`v8]K;M<UBR<VAXAZEƒFl\DUFSDZ*Q]F\EVIm(X—aŒ…FuUBl2Vi#NQ7O @RD‚Jnt4iQJUK}J~h$Yw@l~Bf$^~L}~Gxq:t`(an<qI€h™|@t”_Ž˜k›d”{©´ž¹¬«LtN@HC‚Q`–^Ž£tɬ´M=J 8H 6R :x(An&Dk@z)Dd=d>\>V>]A_Gt3Ts+J`B`Be(L^9p>t<†$@²DNÅTVÈUTµJK‚%>` 7j=‰0J£H[ÃTZÎUUÖY[ÙacØedÕadÖchÚ]`ÙdhÜegÜfiÚljÜlfÛmlÜlhÝmhÛjjÚllÜmkÝmlßvtÞxsÞ|uà}và‚và„}à‡zà‰~âŠyãŽ~┃㔃㘄䙈䟉䢋ç¦åªŒè«ê±•ê²–ì¶–î¹Ÿí¼ ò¾¤ñÀ¦ðªòưñůòʶö͸õϼöÔÃõØÃïÖ¶½‘”\<X:d:r1Kb6q!?]>b@¾kkÌ‚vÆmbÁwqÄ}tÄ‚{Ä€tÄ}tÄ}uÃ}vÂ|r¾uÀvÀ{t½xrÀ|x½wt½xu¼yyºvu½vv¹ux¸tu¶uy³qs²ps®lt¯no¯ls°ge¹eaÅlZÍoXÎvXÐv[Ðw[ä¤uòÁŽõЛøÓ¡õΜøÏ›øÐö͘öË•õÈ“ì¼~Û”r´TNŽ.D‹.H”6L›=N@Q”:N FT£FU¥DS­JU¶TYµRZ®JTBPš<Ož>NžBR¨GU¨JW¦M`ŸJ_N 5V8fGBašk‚®Žœ¹ž£¾¢£Á¢¡º”–±†ˆ£ot6P[4d7“2FÄV[Úulå‚rè’yì›ìš~욀ì›|í |ì €íž{î™yítè†jàxfÔdb·Y\Ä“ ‘JYw0MG <R?VCSAXAZDo3[l)LWATAXFYJVBXB\Im)Y–g‹…K{PCg,Vp*RV?S@L @`LƒAns.fQDwDug#Zd*[†M€z>yw@s†Q‰…W‰Lˆj2lq=r‰\‘z>v™oœs£‰Xz¬°›¸§ˆ£ŒRz_"UD Cd&U£z®¨yˆØÃÂY@MEF?P<w*Cv&Cp>~/I`>b=_=Z=bB^Cq(Gx/L\BcF_C^:n?s=•2H¸LUÁRUÇSR±DJ"<^ 8d@Ž4N¨K^ÊPTÐRT×][Úbb×^\ÖaeØahÛcgÚb`ÛdbÝgcÛjeÛmkÛigÜkhÞlhÝmiÚmlÜprÞrlÞrqàvnàuqáyqà}ràuàˆ{à†zàŽ€âŽâ}㒄☄⛆䟌堊è§ä¤Žæ¬ç³˜é²‘뵘츙ð¾¤ñªòìòůñŰñ˸ó˸ôͺ÷ÒÀöÖÄõ×»äÊ´ˆE]X>b8j9a7n<W
+8f;ÅrpЀuÃk_ÄvpɃyÈtÆ~sÇ€sÃ}sÅ}rÃzo¿zpÃt¼zt¼wu½wv¼xvºwvºxv·uw»vuºvw¸ss¶tt´sv®os®ps®jm°gl±ed²a]Ãi\Ñ}fÚˆbÞ–là t곈òÄŽöРöÒžöМøÑžøÐœöÌ–öÊ•õƉé²|Ð}^¦DL‹/J•6Jœ;L >P˜9N™@T¦GR¦KZ®OW²RW²SX¬FR BP˜:N›8L =L£JY©IW¦J\ H\œLeJ5K 4^Bx6Y˜mƒ®Œ—ºŸ¤¼ £ÀŸž¼–š¶ˆŠ¤ov8NU
+4\ 3,@Ã[XØrdâƒrè}ëš{íŸyìŸxê™{ì™xîžð ~ñ{í’vë‡käxfÕdZ·P\Ì“z2R^CLAVDT@R<[!N^!Jj&O]EU?WBZGVBT>TA[Go,W›m”~BrQCf'Pz8^^EN 9M <QF_K„Dq|;qt?qn.dZOq2dz@yw<w‚R‡Š\ŒŒ^Š~Ji0fzDyt:te’£y¨Ÿ‚µœušª‘ª’d…bCh)VRFr&Z³Ž°¾‘¡ÖÄÂ` GI?G>P9€0Et!>w!>ƒ2GaAb@\BXA^D^ Gr)Ep"E\A_?_A_:l<p<š:H´CJ¾INÀRS³AHŒ)?`>i=–:P­FRÊTWÐRT×\YØ``ÖXXØ^`ÚfeÚcbØ__Ý`^ßgfÛdfÛlkÜlfÜmjÝlhÜlfÝlhÞtsÞuqàyvÝxtÞunàztàuß~tâ„xà†yá‰{àŠâ㔅㗆♇䜊䢎å£å¤åª’讔箘괕캠캜ᄂð¿©ðîòƱñÈ´òȵô˸ôνöÒ¿õÔÂõÖ¾óطÕ™[8\9f8^ 3a7R
+<t>ÎzqÒˆÁmgÄxmʃrÈ‚wÅ~rÈ|pÄ}pÆrÀ|qÀ{sÃ~s¾|x¼yw¼zw¾xwºyx¼us¶st·svºxu¶st´ss³ru«np«jn«hk°hg²_a¸hfÌxhÜŠqäžuê«xì´òÁŽõÉ—÷МöÒ øÒšøÐ›øÎ–øÌ–ôÆŒñ¿†ážlÄgP›:H—5Lž9N¢BPž@R8O¦BQªHU­KW²QX·TW´NV§DNž=O”8K›;M¡?N§JX¨KY¢LašJbL`L 6M<[Dq-T–ay®‹–¼Ÿ£¾£§À££Àž˜·Œ¦rtt(BP 4\9.@ÇWTÜtfâ„nçvꞀí¢zìŸzì{ìœ|í zðŸðš€í•têŠiæ{fÕc[ªKd¢Ub¨s‡TAM@YFUAR=ZCf(N_D^AYAYDUBUGYESCUK|<j ¢w;hQA]C|=^\CM:R?[LVJd&Vz>i‚Ftv:nYJr5mf-\Hƒ}FxŠ\Œ‚R‚~J}r9pvC{s6q…T…¦‡®¨´£~­¨Œ©¢„¨^KRCz8meR­†¦È¨¶Ìµ¸T:I ;B ;N <.DnBw&BŽAT\?c>[@[>`@b@q&Ai!B[A]@c#FfBp@~(D¤AN¾NN½JL¶>G²DH–0Ag7o;—<N¬HTËTXÑVYÚ]\×`]×XZ×[^ÚbdÚecÚedÝgdÜhdÜfdÛjkÛibÛigÝldÛlhÜnkÝsrÞvqàsmßphÞxràwqÞzqáuà€xÞ„wàˆ|áˆ{áŒ⒃䘊䘇䜉äŸåž‰ä©’æ©æ°–毕貕꺠컟ðªïÄ®òƱñDzòÍ·ò˹ò̺ôÐÁöÔÃôÖÄöØÀäʵ„<UY 8`9W
+4_4K4?Ñ€tÖ†s¿lhÇ|rÊ„rƃtÆrÉ€vÅ}sÄtÂzvÄ~uÂ}w¾}u¾xs¿xv¼vrºwtºro¹rr¹rs¸st±tx´pt²ru®lo¬fj¬hk²b^ºb`ÍshÞŽtê¢zí³„ð¸†ñ½ŠóÅ÷Ì™÷ÒžøÓŸøÒœ÷ÑœùΘõÈ•ôÆë³~׊a³ML›:Kœ=L¢>M¦BPž=P¡AN­GU¬IR²MT´SX´OTªJQœCT–=N˜>Nž?O§EO¨LW¥JWžF]™K`NbH
+6R@XBl(RŽYtª‡“º› Â£¤Â¤£½œ˜·ŽŸipo+HN :]9“2DÄWWÚteà‚së‘xì|ð¡€ì |ìŸ{îŸ}î¡zð ‚îŸvì–uë‹kå|hØha¸e~¬_p«hjPHWLZHRDUE`&N`"IXE_EXBX@VCZH[BRKXRW„¤¢x8cNAQ>}8\j(NN 9bKaMUA`(P\K[Eb%SVIu;mf&\{H}‚E~ƒR„~J|xE|n4kwB{r:uˆVŠ£|¤¥Š²w¡£‚¡¨ŽŸx>qK?_J^P¦~™Æ³½¶•ŸJ?K=KDTA‰8Jv&Fu$C’CU_EgA\D^HaAcDt(FfEY<Z<_>i;r=1H°FL»HK­?HºHN¾PS›0@f:l>š>O¨CRÊPVÕ[[ØaaØ_[Ù]^×\`ÛcdÜhbÚgdÞhdÞidÛfbÜf`ÜhaÜjdÜkeÜmgÛolÜliÝpnÞtnÝplÞsoßyrà~sà‚tà€x߃xá„zà†{á€â€â–‡â–‚âŒâšŽã¡å«“媓媒殕籖궛뻠ᆭðÁ«ñíòǰðŲñ̺òʸðÌ»óϽõÒÂõÔÆøÖÁò×·¼…ŠZ
+4\ 4S.\
+3O6ˆ.EÐ{lׄrÁqnÅ~vÈ‚sÈ‚tÆ„uÆ„uÄ|rÅ~pÈ€tÂ~rÃ{tÂ|r¾|x¾|r»utºyv¸ts¶ts¶tt¸tr²tr°np²nj¬lm®gi°ffµd^ÆofÜŽqé§òµ‡ô¼‡õ½‡öÁ’øÈ“ùМøÓ¢ùÒŸøÒžøÑ›ùÍ–öÈŒñ½‚ä¢oÊjR§@Jž8M >M¡?LŸ<M AM©ER­LV°LU¶RVºUV²LU¦DN™>P™;M>P¢DT¨GR¨M[¡HVšCZ›NbœPdI2L
+5V@j,WŽVo«‚¶”žÀ ¥Ä£¢¼œœ°Š‡ kpt-HL5[
+4’+@ÃXUØr_å„qê“uìž|ížzêžzëž}îœ{í¡xï¤}ñ €í–yêˆlåyhÛg^Ø”“¯epz?]NEa GR?Q@a#Od%QZDX:`BU>T>[F\DUBRIa%Zšsžœu˜v2XL>O=l+Mm(OR<i)Qf#KUDb.XWLO?ZO\Ph.[r4jj8j‚N†R‚‹^’|L~s?um4rg2nˆZ‘¢~¥žy©^‘¬Ž²Ÿt–_†YGNFZNŸwŽ¿°»†VhREI
+;I<]>Ž:Hr Bz'C†5I`<j D^A`<a@e>p*Hf(K[B\<b:n?u:–:H­>F²@J¼GPÈZZÅST6Fg<o;™BU¬HVÊRTØ]^Ø`_×\\Ú]\Ú``ÜhdÜfdÜdbàhdÝheÛhdÛb^ÜfaÛhbÝibÚieÝjgÜmjÜtpßqkÝroÞtlÞxrß{qà}tà~wà‚zà‚wâ‹}àŽˆâá“à—„ã—…ãœŒå¡‹ä¢ä¨–殗宗泖鵜깢Eð¾ªï«òÅ®ðIJðɸñ̹ð̼ñͼòÐÀóÓÂ÷ÖÃöØ»àÀ®u @X:P2X 4N8”?OÏzgÑ|lÄysÅsŃuÅrÄpƃxÄqÄ€pÄ~pÅ~pÀyoÀxt¹|x¼xt»xv¸tr³no¶pq·roµqs¯ru¯np®lqªhj¬ef®ef¹baÔ€q殃ò¾öÅ•ôÄ“ö¾‹÷ÂŽöÈøÒŸöÕ¤÷Ò øÒžúÑ›úÍ”öÅŠì±sÚ‹a¼VN¡<N¢<J >K£;IŸ:K§DN®JW°MV´RV¹UX¸UW«HS?N—@Sœ:MœAR¥JS¨IR¦JXžH\šI_žSgšQeI1K3O
+7bJŠRmª‚³¼ž¤Á¢¡¾Ÿœ±ˆŸmqv0JQ2^7Ž)>ÁTTÚrbå„lé‘vëœ{ì¢xíž|ì {êžxí ï£|ðŸzî—tìŠhæzcÝf]ØŽˆ¤^jn9ZN?bJTC]Gh)O^DWAV@Z?XBT<_IM 9L @NIyJ‡žp–œjŽu.ZNCQBYDa$LT>r4\cJYJZJ[$PTCXKWHZLt3ji0bw=v„Q„c’~H{l6ik4n`%f‡Sƒª³—nš•j›¦²’m”ž|¤“_’j)hd TªŒ¬««v;`’s¡J=H <b@Œ7It$F|,H~*DaAg>c=d?b>bBg?bG^$J_De>t"@"<™5GÀFNÄNQÊWZÌWVÇSR 8He 8w"BŸ?N´JUÌOPØ]\Ø`dÚ`cÛaaÛ`]ÜgdÝdeÜc`ÝhdÝedÝfeßd_ÜeeÛebÞfcÚfdÞlfÛsqÛrjÝrmÜtnÝrjÝwoÞ{rß|tÝ}vß‚xáƒwá‡}âŠ{ãàŽ‚à“†â˜‡âœˆåžŒä Žä¤Žæ­•å­˜æ±—ê·žê¸ î»¦îÀ­ðÄ®òűðɲðÈ·ñȶîͼð;òÑÁôÐÁöÕÄ÷Ö¾îÕµ¥fpP 6N5U3P5§LXÒwdÐvnÈ{qÄpÆ‚uÇ€tÄtÁxÄtÁ~tÄpÄ|p¿|vÀ|qºtq½zr¼wr¸tn¶tr¸qn´rp±or­ln­kp®moªln°kl°`bÃjbáž}ñ¾’÷ÌœøÏ ÷ÌšöÄ“öÄ‹õÉ”øÐöÔ ùÔžùÒŸ÷Ϙ÷Êò½€çŸjÏsX¬JI¡<I¢>J¡=L >M¡?O®DN°GPºSV¹TW¼TX·TZ¥ESš=P˜:N>N ER©NX¦MX L[šJ`šHZ™Mb•H^G0I 2L6^D„Jl¦{г‘¼ ¢Å£¤ÂŸœ¸Œ¤rvx1KP0\ 6†&?¾VWÛr\å†pì”vìž{ï {ëž}ì {ìž~î¤xï¦zòšwî’qì‰dçyaÛh\Î}u’HdR>PAdH\Id"N]BZAV:W<X?W@ZBXAJ =J D`*`œz§›f‡—f„q0XL>NAR=` H\Dr-S^AbJS=ZJVCa"QWHTHl,\b)Wj4ju@o˜pšˆZˆ^ R^![a&e…X‹¤ˆ®žz©¦„©¨­B|€Ll¢~¥I~™k¤¶¤Â¡w« |¥À±»\GK?d>„,Dk?x(Cr"Dj Bg@j"Di@`?c>h@b@^DdGhAu?’0E±GTÊRSÌXZÍY\ÎZWÆTR¢:Hg9~,G›<L³LTÐRTÖ`]Ø`_Ø`bÛhfÙdaØgiÞe_Üfdàe`Þd`ÛcbÜhfÛhfÜf`Ûf_Üh`ÛnhÜomÜpjÜqmÞphÝtnÞslàymÞ{tà}uÝ€vàƒ{à‹}áˆ{áŠàŽ€áŽ€ä™…âž‰ãœˆâ¢Žä¥‘æ­‘æ¬—å±–êµœé¶ í¼¥î«ðªðűðÉ´ðȵñʶðͼòͽñÎÀóοõÓÃø×Ãôض̜”T
+2D-M2R 3´Y\ÎreÐ{jÄzpÆŠ|ÈsÆ‚uÄ~vÂ~tÁ~tÁztÃtÄ{u¾{q¿yu½vq¾wo¹xtºxr¹sp¶qn´rt´qr®kn®jm­mu«ik¬ei´aeÊp`ê°ˆòÉœøÒ¥øÔ¦ùОøÈ•öÈ”÷Ì™úÒœøÕ¦ùÔ¢ùÓœøÐ–öÆŠí²tߊ`Â\O«BIªAJ¦;F :H 8H¥<L¬EP´KS·PU¼QSºRW­JTŸCS›<O˜9O AO¦JV¥KZ¨O]žJ^›Lb™Mb˜PcŽDZG 4I3N7[B‚@b¢t‡´”¾¡£Å¤¢ÁŸš¶¥tx9MT 2[ 3Œ"<ÀVXÜoYä†të’uî ~î¡{ìŸzì|î¢~î¡{ï¤zð xî˜qìŠhå|eÚh^ÊrlF_VF\Je"Lf$N`HR :YBS;[<X>XAYBO@MEd&XŽ_’Z‚‚Fg…IlbINAM?QD[Fj)Pp,TU9b HO 8O=VEh*T`"PP>k/^d&V]&\`$[‹d’g”g(df.htE‡‹c– z¦Ÿ{¦§‰²®’³ z¦ˆDq¦r—x5h¦ˆ²®•±lˆ’qz†_jQ@V>n"@}(Da?r!Bm!As'DeBf!Bl!Bd@h"Dd<c=`?f@z&F(F¡:HÄTRËSXÏZZÍZ\ÎWVÅTV›-Dk;†6LŸ@O¶LVÑTV×WR×][Ú]`ÜdcÚhdÛgaÝefÝicÞfbÞf`Üd^Þd\ÜecÜgbÜhdÜidÝhdÞljÜlhÞnjÜrkÝrpÞxtÞvoÞzsà€xà‚{á…zàƒxÞ‹}àˆߎ€â„㘇ãœä—‰ä¡ä¦äªå­˜æ¯šç´œç¶¡ë¹¥î¾ªí¾¬ðƳïIJðǵðȶð̼ñ̽ðÎÀñÏÃöÑÀ÷ÔÄöÙºãİr)EE9H4X 6¼_[ÍndÎwkÃxsÌŠ{Ä}pÆ‚y€sÃrÂzpÃ|pÃ{sÁ}u¼xtÁzr¾wp¼zs¼vp¹xs´ql²on¶sn´pn­mo¬kn­gh«de¯bd¶^_Ñ}iôÍ øÖ¨øÔ¦ùΚùÊ”öÌ™÷ΛøÓŸ÷Ô¤ùÔžùÑšøÌõ¿‚è nÖvU¼NI­DL°?F¤=H¡:H 7F§AN«DO´NV¶TWºTU²MT¦BP @R›>Nœ>R¡FQ¥NZ¥L\¡L\šMa˜Pf˜K_“J\‡:RH.J4M
+:\CDc t„´’›Á¢¤Æ¨¤Â¡œµ‘¥xy‡BTU 4\
+4†#;¾QRÜp_älë”vî›yíž{íŸ|îœ~îŸ|î¡î¡xð ~ì˜nëŠjå}iÙjfÃbbŒCdbOh%Rj&RZEXGS>X>X@Z<[>Y@VDRH^L’d‹|Fj^LƒNz~Bdg.XTEPCSF\!Kv4]k&LR8`GR<Q:[De(P`#RK
+=a)Rl.`a [j:p‡cœŒZŽŠ[’~L†TŽ‘f–¡x¢¢|¡¤z‚Knœw ¤z¦¨²²Ž´Ç®¶¡}[BG9K6f&Kk"@y(Bs @]@u"Bj>n$Bb?f Bf?a?l$Ad@b@bAoA€(B-F­DKÎWSÒZXÒ[ZÌXZÌTTÀLO&>g:‹<P >M¸KTÓXVÖXVØ\]Ú`_Úb[ÚebÜgdÞj`ßf^àgdÞh^ÜgcÜf_ÚheÚjgÝjdÝjhÜhcÝnlÞphßpjÝrlÜpjÜvpÜxnÞvmà~rà~uàwß…à„|á‰~âŒâŒ€â‘„㚌ä‹ã¡Œã¤å¦‘䮘沚浘踣뺦뽩íÀ®ïƱîÄ´ïÆ´ðÇ·ïʺðÉ»ï̼ñÏÀôοõÒÂöÕ¿ïвšYhA1H5a:ÀlgÎl]ËzjÄytÈŠ|ÄrÂ|rÂ~rÀ~vÀ~sÀ~t¿|tÀ{r¸xu»xvºxt»yrºzv¹us·wr²rs±rr²qq¯mm¬jm¬fg®fe²`a»b`ÖŒrñ¾ŒõΞ÷Õ¦ùÒžöÍ’öÈ“÷É‘øÏž÷Ô¢÷Ô¥øÑøÎ—öÇ‹ï²záŽ_ÌiT¹NP´IM°HO­BN¦@J¨?Jª@K°GP»RR½XT´MR®IU¢CR=N<O CS§KZ¦O]¤N\žL_˜IZ–DZ”E[ˆ<Qˆ=XF0J6H5[A|Ddžvˆ°˜¼Ÿ¢Â¥¥ÀŸž¶¦yy‰JY]8\ 3…$?¸PTÜqdæ„pê“vìŸxìœ{ìœzëžxêžsèwëxîwè‘vä‡rà~rÖplºZm”Lsl'Xg%Ne"PTAS?T>Y@W<X=Z>WGZLi*Zn2[j.PQHYJ”b†~<bn2XVBTARB`!Lw7[h#JT@XAU@P@YDf&Rf(WL A_(QAq_OTIf/`n4f‰]Ž”kž•s©–jœrŸ¦‡¨—i|6p‚Kršm›v¢²‘´º•«¦€¨†YŒ^&Z`'IBXX<x,BjBdBz&DfAn$Dk"Dk$BeCd<o"B`=c<e<}!@‡)@–4G¹GMÍXSÑ\ZÏZXÎXZÍ[[ÂPN…!<h8;N¢@QºKSÓZXØ_`Ú`\ÚbbÚ^\ÚdgÜgfÞfaßlfàkfßebÜdcßf`ÜfcÛe`ÜffÜhcÜieÝidÜjfÞjbÛliÚpmÞupÜumÝtlß|tß~và‚zßy߆~áˆ~àŒ€â‘„á”„ä•ˆä›Šâžä å¦”䭘江粚綞꺥뺨ëÀ¬îIJî²îÅ´ðƶðʹñ˽ð̺ñÍÀòϽôÑÃöÓ¾ô×µÇG0D5g9ÃigÐn^ÊwkÅ|vÈ„wƃv¾|vÀ~tÄ~p¾~t¾{p¼{s¾xm¼yv¾znºwp½zs¹xt·xr¶tq´st²st±pp¬ovªjl®lo¯fe´c`¾jhÛ’wñÀ÷М÷Ò øÍ“öÆŒ÷ÆŠö˘øÐŸúÕ¦øÔ¢ùÒøÍ’óÁê¢nØy[ÄaT¼RR·KL³EJ®GOªAIªCL«BM°HP»PQ¼RU¶QU­IR¢DRœ>Ož@P BR¥JX¥LZ¡L]žJ^–JZ•CV‘@Tˆ9R„5RH3F0L4W?w@b tˆ¯š¼¡Â¤¢¾  ¸”§z|Uac:^2„ =·NPÛodæ‚në’ví›|뜀痂æ˜~ä˜|ß”}â—‚åšä’{à…uÝ|rÒki¸fy§kk(UXFYJTCVDW@X>W@`HZFWHm3\€Fp]EUFTGbM…Ai~<d~>bVCWHTDc*Pd"Kf(QXCU>VFTC]Jj1ZXFZQi+V}Bpe%VTFUJZXm:njœœ®špž”d“Ÿ~¦–l—ŒU~}>hs:_zQp’s•’b°ŽŸ¾˜«£x¡ªv “Rz>nŽGfo&Hf Cj Bk F}0L‚9Pm>e@hAi>d;`<f<†(AŒ)@Ÿ5DÀIKÌZXÏZ[ÐYYÐW\ÏZX¾NR?j<•<N¨GTÃRRÖWR×[WÙb^Ùb]Ú_\ÚefÚjiàidßojájbÝhdÞe`ÞjcÛbYÜ`]Üb]ÜfbÝg`ÜjbÛlfÞndÜpgÛmhÞqjÞulßvrÞypÞ}uàxàƒxÞˆ~à†yÞ†zâŠ~áŽâ•„à”‡á–Žâ¢Œäª’ä«“å³˜æ²˜ç³Ÿêº¤ê½«êÀ­í³î²îųðƸðÉ·ð̺ò˺òÏ¿ò;óÏÃöÒÀöÔ¸Û¸©`<D6w:Æh^Îs`ÇzpÂ|sÆ„vĉ{¿|t½ztÁ|pÀ|t¼yo¿yr»zpºzt¸vt½xn¼wr¹wp¸zs¶ur´ts³ut¯lqªoq¬kp®gg°fe¹c_Æmcà–zòÃöÐ÷Óœ÷Ê–õÄöÄ÷Ê”ùÑ›ùÔ¦øÔ ùИöƈð¶yâŒ^ÎhUÂZTÄXTºLN²GOª=G¬AI§CMªIT¬FL¶PV·QR±PT¨DOž>Nœ;K>NŸHV¨MY¡I[¢Pc™EZ”JZ’BZŒ<T‰>Y…6RF/D2J:U>v9^šj}²—Àž¡Ã¤¤À¡º•¬~~U]]7[ 0‚#<·JNÚl]çƒlí’wîœ{ï~êœ|æ›~曀ãšå †èœ€è“qè†nâygÕlmÆr„®bl&Rd&MRBVBX?W@X>T<`B]DXBe*Ma#GUA]HZKYH‚Bk}?dv2W^HRD_#Lc&OWC`$J^EYHZEZHZGd*S^R^P|Bk}@qYJTHF BQNi5nf˜£„¬“f•™r§˜qš˜pœ†Fl†JuM
+7O :i0Kp1XX:h#A†?QŒG[†Rp o“¦lž\Œ¢sŸ˜_‰”Y{†>Vj@n @c>c;k>a;e>o>†(B™4I¨@NÇOQÐ\ZÏWYÒ[\Ò]bÒXV¶HM%FtA›?P«HTÆTZ×[V×]^Úc`Úd`Ûa`ÛabÜfeÞicÞljàjeÝhbÝidßgcÛdaÛc_Üb\Úc]ßf_ÞjeÛheÞohÝneÜmfÞnjßwrÞzsàymÞ~yà~và}xß„y߇{߆~⌀⒂┇ãˆà˜ˆá¡ã¨Žä©”存氚泠鸦軨꼫ìÀ¯íòî±ðÄ´ðƶñÊ·ð˼ñνðÍ¿ñÐÀõÑ¿öÒºè̯‡=WF:*DÇh]ÌtgÄ|sÀ{sÄ€rÄŠ|Ä~sÀ~v¾|r»{x½~t¿}tºws½xo¸wr½xs½xr»yr»zr·xt²sr¶ts®mn¬jnªgh­ge´hdºgdËnbå¡|õÄ“öϘøÑš÷ÈŽöÉöÄöÊ“øÒžøÕ¦úÒ¢ùÎ’ôˆé¥jÙyYÇ]TÄTPÅXV¹KN²JP¯BL¬CM§?K±DN³JNµPS²NR¬DL >Oš<K9HžBQ¢KY¤N\ L^šFY˜DX˜F[‘<SŠ>WD[‰AXJ 4H2G6R>n1U–dx°Ž•»šžÃ¥¥Ä¢Ÿ¿œ™®„„ŒS`c8\9‚"@¹NQÛk]æ…kê’yìš{ï›yìžyìž{럀ì|í¡|îœzë•tèŒkäzfÕwuÆm~˜Fjd"ObFXG[DW@] FX>ZA^BY>U@YBVD] Jc#LXEXFw7\r5^j)PbFYGg*S[HUC`$Gc"H[BY>S@Q@d&R^ RXH†Ott8fL<e,eLE\ Zo4s‡Z‡–lœ}Jvœ‚ª…P‡bŠœlœY~r0cN =`=|FmP>W<x1DI 8N;w/Ir/Jl%Kžcf–[o‘JaOsy/Qk Bd=f!FhA^=f=}'C.Ež6E´CLÎUTÏXZÐZ^Ð`bÎ_cÑ\^­;ItB~(Ež@R¯ESÎX\Øa`Ù]\Ûb\ØdfÚcbÚgfÜjfÞigÞkdßggßgbÞd`ÞjfÜfcÚbZÛc^Ûc]Üe`ÚgbÛgfÝkeÜohÛplÝohßsnÞtnÝzrÜzvÞzwà‚yà„vß‚xß„yáˆ{à€à“‚㑅ᙊࠊ⢌ã¤äª˜ç²›æ´¢è¶ èº¨é»«ì¿¯í³î¶ðĶðÆ´ñȹðɼï̾ñÍ¿ò̾óпõлñÒ®´pzF5‘8JÉi\ÍvbÃ|r¾|rÃqÆ‚s„xÀ|pÀ~rÂ{u½|u»zt¹xnºwrºzo¸xr¼xo»yt¸xt¶wt¶vrµur³rqªloªnm®lm²ki¼hbÌn`䤂ôÁ‹÷͘øÒöÈöÁ†öÄŽöÊ—øÖ£úÖ¤ùÑ™øÈŽñ¹|ä•bÔlUÆVSÀPRÀVX¸KP°HN¯HM£=HªCM±GL¶HJ¶KN­FP¥BL8I˜8Fœ9J¤FS¦LZ¤N^žO_–>T”CX”FY=RŽAYŽA[B]I5H2I
+8R?f%L’_uªˆ“¹šžÂ¤£Ã¤Ÿ»œ”«…†Ubf<_4‚%BºLPØl`ãnë’wì˜zîž|íìžvì |ì¡zí¢}íŸxë–qç‹oâzbÓoqÀsƒ—EhbH[A[@d"LZ=V@W=X>\HZCUBYH^"Le(PZFTFc Nq2Yj.Uc!KYD^"L\FXGXGg+Nh-TX@S;N=T>g%SOD\NƒPvr9kMJX$Pv@k,dt@zƒLhš‡[”dŽl …Q‹˜n›žtš–f“\E];„TvR7g>b;H?]@w,HfCYDp1Ja"Ht$GŒ?Mœ]fo&Hh>b;`<_>o@†*A–5F¢<J¼IMÍVXÌXYÎ\`Ð^aÐ`bÎ^[ <In?Ž4M¥FU´MUÔ``Ùb]Ú`[ÚbcÛcbÜggÜggÛlhßhdÞgbÞhdßlhÞjdÝifÜcaÛdbÜdaÜd`ÜfdÛkeÛieÛlhÛigÜlißmjÝrkÜupÜtqÝxsß|tÞ„{Þ„{Ý„z݇{Þ‹}à~á‘„â“áš†à™ˆâžˆä¦’å¬“ç²•ä³ç¶Ÿè¸¦èºªê¾®ìÀ³ïÆ·ïÅ´îÆ·ïȶðɺð̽ñÌ»ñÌ¿ò̼õоóҲРšM 6š>LÇbWÍxgÄ|uÁ€vÄ‚w‚sÁƒ{¼zwÂ|t¿zvÂ~tÀzsºxo»xrºzrºwq¹ytºwu¹xt·xu´vr²rr°oo­no­kl±lm¸lgÀeXÌobà˜sò¹„öÈ’øÍ÷ÇŠöÀ…øÃ‰÷Ì—ùÔ¢ûÔ ùÏ•õĈî²x܆bÐ\NÅWVÀTQ¼RS¸OU¶HN¬DN¢>J¦:G¬EP®EM°IP¤?L—6Jš8L˜<J¢CP©HU¨OZ¢M`›J^–BV˜I_”DW@XAXF^“H`P9L9L ;T>f$K[sª„‘µ’˜À ¤À¢œ¶š”¬†ƒ”Ydf<^:…'F¶LQØk`ãhê’tëšsëžzë yìŸyìž|ì xíŸzî yì™tê‹sàxeÖy|Äv†¦KkgHbEZAdHU=WBS@V@^HUAVFd$Pq/Vg)PVCXKXGj*Wd(Lf(Ng&M^ LR?UDTD]Hu<aUAQ=RB]Fa"NMBl2gt:jk5^s>yTPs@q|Bu?kL|}JvŒg”‰`”„XŠ}Iz‡Zœt¦€¢„Hy[>‡Y|T9d=P<Q>p&Bp<e%H[>u)Dh@m ?a?|-@r&D^@^;[9b:z$AŒ2Cœ6F¯BNÂJOÍVVÐ[ZÐ\^ÎbdÏ_]ÄTV”/DjB“:L¨LZ¼RXÖ_^Ø`aØ`^ÛggÜhfÞhfÝheÝfdÞleßlfàlhàngßmdÞhfÛfcÛdZÛfdÚc^ÛgdÝhbÜjbÝh_ÜmbÜndÝpfÞpmÜsnÝvrÞxoß}uÞ~wÞ€xà‚yÞ‡Þ‹àŽ}áâ‘‚à“€ßš‡âžŠä¦Žä¨‘å­’äµè´¢è·¥é»ªì¼­ê¾­î´îòíŵðÆ´ïȺïÊ¿òͼòÍ¿ðÌÀôξóжßÀ¬o E BLÉfZÌxiÆxÀ€vÂ…|¿‚yÀ€t¾}wÀ|pÀ|t½|vÂ~vºzr½{t½{v»xt»zv¼zx¸xqµwu²tr³sq®nm«mp°pp¶rn»leÄl]Êl`Ú†dê¨xñ¾ƒöÀ‚ö¿„õ¿€öÃŒøÎšúÖ¤úÔžöΕóˆë¨mÚ|XÊWSÂTRÂQT½NR´JN³LV§DM @M¤>J¬DL®JP <H•2E‘.Bš9M¡IW©KX¦L^ GXžJ\•BV”BZ—F\”E[•C\@Y‘Ea“H_L
+4L
+5K 8Q >b L‹Ul§Œµ”˜¾£ ½¡¹˜•¬‡†–_jm$Cd<„#B¶HNØj^âiéŽuê˜xìŸyëžxìœzìž{êž|ìŸzíœyì—rè‰gàtfÛ€}Ðz„´`ufEo&PV>`FQ>R>P ;VG]HUIu7b…@ek'L^CTBTDZIh*Sb#Md$Kb#G[F^JZFVJSGd)O`PO<UA^I^$TPF~Huj+``&V}Myn2jQFyT„NyM‡zH‰‹lœ–u¢ˆb“u<nd&]{Fq˜j }§}8bKp\ CdHL <T?r$@j @`;`;o$Dr(Dg>^>d>i#C_?]<^8j:€$=‘3G¤8H»HNÀJRÉW]ÍVVÍ^_Î^`Î]\¹NS#CnD™:N©IVÄUWÖ`]Øc`ÚbaÞjgÜkhÞifÜjiÝgißkeÝjgákfßkgÞhbÞiiÚecÜfbÜgfÙedÛfbÝkeÞg\Þf`ÝibÜlbÞlgÞtoÞtlÞupÛuoàwqà€tßtà‚uÞƒzà‰~ß‚à~â€ß‚à–†â›†â¥ä¦ä­”氚涢浢漪꽬齭ìÁ®îÂ³îÆ´ïŲðȸîɼñ̼ïʾð̾ò̽òϺìÌ®žVjªGOÍj]ÊzfÀ|t¾xmÀ„x¾z¼|x¼~t¿}w¿~t¼}y¾zuÀ€s¾~w¼|u¼|t½{u»zvºyt¶zv³vu³vt²rr®np´rl¶nm¿peÉo`Ëm\ÐsZ܇`è¤nê­rí®vî¶xõÃøÏúÖ¢ùÒš÷Ê‹ôÀ‚è i×rUÆTR¾PR¿LO»LO´FO®DK¦<I¢<J¥AN«HPªAJ˜6HŠ&@1FŸBR§LYªLY£HUŸJX™BTŽ@S“BV–BW”BX”D[‘Ha‘JaŽCXR<R>N:M 9[F„Jf¥}ˆ´”𽡤À¢ž¼š—®……šdlx0Jj>ˆ)DµFN×i_ã|dëuìš}ízìœzí~é|êŸ{ì }îž|ì”qê‰káxhØvvÚ‡„±WmkFˆ>d\BZAL>NBN@_Im&R~=ey:]c!CX>TAPDTGb(Tk(Qf*S^FYFVHh&R_$LZ#PRJZ$It8hRBbLXG`(\e.\m2[b%Wd$Zs:b‚R‚LLII‡pœœ–ÈŠb’—|±‡d’ˆ^ˆL~i.gd+dŒdž’f”˜n “_šzFp˜}¤N <[Dn AeD\Dh Bj Ap"B`>b@`>c>_>Z8a:p >‹1B–5D¨;E¿LPÅPTÊVXÌ[^Ð\ZÏZXÊVW®DNnC~,JBTªLXÌ\^Ø^]ØffÚgfßhfÞkiÞjgÝhfÝmiÞqpßifág`ßgbÞf^ßgaßf^ÜhdÜc`ÛibÚfaÙe`Þe]Ýf\ÝjdÞldÞnhÝpjÞupÞvpÞ{ußzsÞ}rÞ|tà‚xàƒx݆zàˆ|à‹~ጀâ’ᘄ☄➊㢎䬒䫖洠泠纩黨輮ëÀ±ïóîÄ·ðĵðʸðɼò˺ð̽ðË»òÌ¿ñλðΰ̌‘¸LNÐpcÊxf¹tj¬he¸rp¼wn¼wr¼zu¾{u¾~z¾w‚tÀ~x¼zº|x¼|t½|w»{vº|v´yw´wt´us°op±sqºrm¼tpÄthÈtfÌr\Ît_Ît\ÖZÞŽbà“fê­zöÆŽ÷ÑšùØ¢úЗ÷ÉŠò»}è—`ÒiVÆRP¾MN¸KQ´EM¯DM§>Lž9L£ALªHT¦@N>M’1GŠ.H”9N¤GTªT\¨LXŸHVžGY”?SŽ;T‘AS”F[”H\”BZ”H[H^†8OR:M 7M 9J
+9WB~Ddžuƒ°’˜ºŸ¾¢ž¼›–®‰‰žnu~:Pn?†&C²FLÖi\â€hé{ë˜tîœíšzê›}éžzéž{ìž~îžxì”pçˆnßxfÖprÜŽ‚²Zrs&MŒBb^JXCO?OD^ Nw8^{;^h$K[HSBSBTARDVHj-Zd Om-X^I\I`$Nk)Tm,Uc)RWIPCk,W`$N\HQJd*V‚R‚^GUD]Mc$R‚Zm4fOMS IoBdq?oˆbŽˆh–‹e“p8d\R]TxK|g£©Œµ¡v­¤Ž±ŒfN:^@k@^=T<n?c=d=]8b<`=[8]=_:f:z >Ž,D¢>H¶FPÄORÉTVÎVWÌ^^ÎYXÌY\ÄTZ—3Gh?Œ4J¤BQ½U\Ô]\ØadÙedÚfeÛgiÝnjÞmhÞjkßleàqmâmhàldÞhaÝjfÞieÞh_ÚhdÜc^ÙfdÚd\ÞdaÝf_Üd[ÝmeÜpjÞlkÞpkÝslÞsiÜwnÞxlÝ|sß}tà}sßuÞ„{Þ„vß‹|߉z߂ߖ‚àœˆáž‰â¨ã©ä¬”è°ç´¤è¸¦é»¨è¼®ê¾±î±îÅ·ðƸðɺðÌ¿ñʼð̼ïʼò̼ñÌ¿òϸ䶫ÂWXÒw^Åxa°e`¢Z[©cf«ff®ij´nj¹pk¸snÀxs¿~t¾~wÀy¿€w¹}wºxº~uº~v¸|y´{u·zt´tr¶ztÀsnÅxoÈxnÎykÏvfÐt\Éq^ÊiRÎsZÙ„dë®xøÈúÖ¢ûןûÒ“øÆ‰ð³qâŠ\ÎaRÁPPºNPµJO°>H¦BP¡9LŸ<N§BP©@L¢=L•4GŒ,D6J?R¨LX¨PX¥HZšCV”AV>RŽ>V@T•EX“F]’DY“Nb‰@R0JN:M 6L
+5NAR@zA`žx†°š¸œœµš·™–¬ˆ†žlr€;Pn>†&>µBJÖfYâ{bè‹të–vìšzë›vêœwêžwì{ìŸxíœ~ë–xèsâ{jÓfeÚ†z´ats'K‰?`b I[CUBUC\C`E_CWEWEXEV@ZHTF\ Jd"Nk+Xj+P_ LYIh,Ue#P_Ic"Ld!La!Ij*N]APCLDk6]}Fq\FPEYNl0c‡`ŽBzi/bUKWKVKWNtHr–}¡„X„[K\Rn0lrBs˜o—¶¥¼¨˜§_$JY<d<j?V>X;r!>e!C`>]<b<]8Z9`;e8j:ƒ(A‘.C§>J¿LPÂOTÊPRÍXXÎ^`ËYYÍXVºIM}@lD˜=R¨EQËZZØ_XÜdbÚbbÛghÞhgÜgfÞjfßkhàidápkàsnálfàhhÞhdâkbßldßlaÜhbÛfcÜdaÞgcÞkbÝfaÜlhÛmfÜllÜphÜsoÞwqÞwmÞxpÝysÞ{oß|rßuÝ‚wÝ…{݇~ߊ|àƒâ“€à›„áˆâ¢‰â¦ã¨ä°œæ´ å¸¤è¸¦è½ªê¿­îÀ±îĶðȸðȺðȼðÉ»ð̽ð;ñ̾ð̾òϺïÊ·ÒspÓv^¾iX¢OR–DQŸTZOU¡Y^¢\_ªfj®gf´nl¶qt»xu¾zv½|s¾{»z»€x¸|y¸}z¶|tµyu¸vs¼wpÄ|uÉ~rÌoÐ}hÐwfÐt^ÊtdÅk\ËviÖ†kì·„øÌ–úÙ£ûÖžûÏöÆì¦gÜ|ZÉ[NÄNL¹MP²HN«DP¤<Lž7L¤@NªFR£AO•1F1F1G˜<M CT¨LZ£IX H\™FV“A[<R‘@W”CZ–H[’DXŽDYCW„9Nv'?P:N6J5L;TA‚Fd¡x‡­•¶˜š¸œ£º™—®ˆŠns€<Ps#@†&@±BJÕdXß{iè‹pê–vì›zêxìœzëžxìyíŸ{îœyë•tìŠnå|bÔd_Ðtr®_vx,N5Xl%PZCS@R=X@]B[CZDVDT?TD[JXEk,X_Jl)S[L\!P`'Pz>ag*PRCYE\@c#I]AS@OAP?wBjb J`IUJXNx@ui˜Žg—i0^o6jZHX HLFSO‚`z•mœ[P\"RJ~€Gxr4^˜wŽˆVuO 9a=j>e>R7]8h<a:^8^;]5X8Y8e9g;o>‰*?˜6D±FKÂKNÅPTÊRRÊUXÌ\^ËY[ÉXX£9LiIx&F<L¼PVÓ`\Øa[ÚgcØfaÚigÜgfÝiiÝifÞhdßmhàniáncàpjájdàjbâlbàldàkbßjdÞe\Üg`ÜhbÜg^ÝldÝhaÛoiÞojÝojÞqjßxmÜvrßvjßypß{qà~uÝ|qÞ‚wà‡zàˆ}àŒzá}ß”á˜„á˜‡âž‡â¤Žäª’æ®šå²æ¶¡è¸¦è¼¬êÀ®ìÁ±ì´ïÅ·ðȼîÈ»òʼñÊ»ðͼñ̼ñ˽óϾóÑ´Ý—ÊlX§LN”@J†4H˜HS‘;F–DI˜KRžOT¤Y]©`d«ef®jh²ll·vv³trºyv·xs¶xt¶zx·~z¸|wºzrÁvmÆ{o΄tÒ„jÔ}gÑx_ÐvYËq[Éo\Ðt]Ýmñ¾ŽøÎšúØ¢üÕ™û̉ó¼|ç˜^ÖmTÅRL½KMµEN¨>Hœ:Jš6I<J©BL¦CN—8MŒ,BŠ-F–9MŸFX¤L]¢JY CT›DT–CW:P’@W–EW›Lb˜J]‘DVF\†:R},Em>N 5L 2M
+8M :WB|<]šk|®Œ–¼ ¡Å¦¢Á¢µ– qt€:Or$@ˆ(@´?GÒe\ß|fçŠrê˜zê–xìœzêœxìœ{ëž~ì¡zï yì–wëŠmè~bÖf]Èbd¤Vn~4V„>^p(PT>N?SA]Fa"IYDYGXCTBTC]Ja&Qe&Lb&Pj(SVIWLd,Ur5Xl+PVDT@S<ZF[CTAR@XDr9bTA^ Kj+^\Tj+`Œd•’g’‘h–\"Ki)TZNJ>VLf1To<Z_#WYM†V†[††G~Œ\y}BbU 7g9l@^9U>b<^8^9^@^6T 4X 6]7i=u&C~(C‘,@¦>HºNOÂORÇSSÈSTÊZXÌ]]ÎZT¾PO‡&@c>ˆ/I¢ANÊZ\×fdÚd`Úb[ÙcdÚfdÜhfÞkfÞkfÞnjàlfánlßmjÞpnàlhàjfàjcßngßgdàhbÝd\ÛhfÜhbÜcbÚgbÛicÚjbÝkfÜrkÞrhÜulÞxpßumà{pß|qß~rÞ|tÞxÞˆx݉|ߊzâ|ß’~à•‡à•„á˜‚ã¡ˆâ§Žä°—ä¯œæ²¢ç¶¢å¸¦é¾¯ìÁ´íŵðÆ·ðÈ·ïɼð;ð˽ñÎÁòÎÀñÎÂôÏ¿ôԺ亩·ZV‘6FŒ2D€'?Ž6C„,=‹8EŽ7G“DN˜DR›FPžOUŸR\¤^_§ad§bf¯jj°nn®pq°pr³ut¸ywº~{Ä‚w΂oчtщkÎz`ÈtXÇiQÈiTËnWÔ~bå¥zôÅúÒœüØ¡ûÒ’øÆ‚í²râŠXÏbSÀQM³FJ¤=I–1G“0F–5J¢>L¨DMž>KŽ.F‡(BŽ2H•:N¤N[¤IXœDTšDV™FZ”CT>X•BWšN`œOb•EZŽEZ‰>T€6Mt%@j @L 4L7J 2L
+6Y@o,P•f}¸˜œÂ¤¤Â§¥Á¥¡¸—–¦z|‹G[n!?~ ?®>GÓaUÞveä‡nç”sè—vèœ~êšyìž|êž}ì |ð |î˜uìkçcÛk^Æ`eŸPn€4Z„;^{3[N>N@ZAj%LZ@WAT@RDVGXHj(Qj*RVDd+Tb&PUKWJg(Oh&Jr.V]EXCXAT>R8]DQ>ZFYBWHa,ii0a…X–„NŽƒN„˜pžŸ†©m;p[Lj0\l0hVCe#L`!N\Jd%U‰b•r/Z‰Jt—b‹d CX;b<f8R :Z<c>Y<Y:Z7Y8U4Z6b:l;+DŠ,B˜6G´FNÄQQÇRPÇRSÈPTÌVTÉVSÇVT¤9Dd;r$G’:N®ITÔ`ZÖa^Øb\ÙebØffÚfdÜhgÞnlàmhÞohànjátiÝkgàgeàjeàndándàlbÞlgàiaÞe^ÜdYÜhbÛd\Üf`Üh_ÜlcÛieÞpdÝpfÞrhÞrgßvpÞypÞynÞ|sÞvÞuÞ„v߆x߉váŽzà’á”‚à–‚àš†âœŠâ¤Žå¯˜ä®—ä²æ³¢æ·©é¾¬ëÁ±ëĵïÅ¶íÆ¸ïʹñ̼ðʼò˼óÌ»ñÐÁôпöÓ¼ëʶ±UZ…,D4F…0J‹:L€)B‚+B„,@‹0@Š8J9F“@M“CN–DP˜MU˜JZœR\Ÿ[`Ÿ^d¤`j¨gj¯rs¹tpÄ|t͈vÒzÓŒpÎjÃv]¼eXÂjYÈlW؈eöË•üÕ¡ý× ûÎó¿~éžbÚuVÈYR´FL¥:F˜4EŽ,C2G˜8H¤BP¤@L—6H…&@Œ0F™>N¡HV£NcBT—BV˜CW”CY“AV’BX”D\›PbšOc”F]EY‚8Pz-Fl<n;L3H2H
+6L 8Q=h#O–j|µ”˜À žÄª¤Ä¦¡»›‘¨~~”Wcn<&@­=FÑ`WÞviå†pé“wê˜xê˜xêž{ëž|ì›zïž{ïžvî—xìŒnç~aÞodÃX^¤Uu†6[ƒ<^|6ZP>O>`FdAV@V@VBUCTFZHo0V\HSHl,V^MQJXNk,Vj(Mj)OaF`DY?R@WF\B_G\FSF_'LnErFDpBlˆZŒ|E|‘h¡‘jޡޏx>ua&Vf(Sj(br2az8fv2fEl „šZBf#Br4KbB`=d9]6R 5b@]:W8Z>\=W;W9]5i @q;ˆ0C‘1D£<H¾MNÆRRÊXUÆTRÈSUÉTTÇVT¸LPx;Z=„0NžCQÀVXÖ`]Ùd`Ú_ZÜhbÛa_ÙcdÜf_ÜjgÞjhàlfàleáoiàlfâgbÞjcàofájbßf_ßjeßgbÞe^Þh`Þg]Úe\Üf[ÜfaÜg[ÜleÜofÞmgßofÞrjßrjßvlÝvjß{lÞ|tÞ€tà‚uÞ†xà‡yàŒà‘€á”€ß˜†ä—†â˜†ä¢Žä¬’㬘属洢湪辯꾮ì´íÄ¸íÆºîÈ»ðʺïÊ¿ð;óÌÀñÏÂôÐÁöÒ»ñÒ¸¸rwˆ/G’;HŒ8H’>N‹2IŒ5J†3I†2H†6J„/E‰1D†5D†2E‡4D‰:HŠ>K‹COETIX–P]\f«fjºrlÆ~pЊpÓŠlÎ|bÍ‚hч]Ð`Í|_Ûjðº†øÐ›üÖ¤üÔ˜úÈ…í°và…XÐdN¸JK£<G–1D*A‹,B’4FžANŸ>M›9H.C‡,E”:N¡IY¤J] K]˜BT–AX•FV“@T’?S–GVšLbšRg–I`“H^ŠAW}0Lt*Cg=l:O:J7G 6F1L 9g"Ošl|´’À ¢Æ§ È§¢½œ—ªˆ…œblu">€&@¨>HÏ]WÜvdãˆpæ’tè—zêšvêœ{ìŸzêš|í¢|ð¡{íš{ëŒpè‚cÙk^¨?P¦b}‹:`ŠBcx3ZK <Q>bFX;RAV@R?RB\Km.Vi'OTFUGo0U\KRGXHi(Ri(On,Sc!Lh(ObFP=U@[D\D`K\JZCZHF ARHxF{v:vˆ[‹ašššŸ©Š_–] Rn5h…K€”^‡•[ƒj%Do#>U?X<X=f?h#CcBV6X;`;X:X9X9Y8W8Z=`8o;w <3Eš4DµDJÃRUÆVUÈWVÈRWÈYXÈTTÂPMŽ*@Y:e=”;LªO\Í^ZÛbZÚd`Úd]Úb`ÚdbÞegÝegÝjfÝlfàjdàieápiàjdÞhhÞjhßniànhàjbÞjaáhdßf`Ýg\ÜhaÚdZÛaXÛfaÚd\ÝkdÜmcÜkcÞrhÞmdßqgÝwkÞwlßynßzmÞ|nßt݆zÞ†wß‹zàŽyâ’à”á”â—„ã Šã¨ãª–实岤温軫꾯êÁ²ìÅ·ìĸîȺðɸïÊ»ð˼ñË¿òÌ¿õÑÀõмôԴЖ”†,FŽ9J–>M–BN˜DS“>N’6I6HŒ:L…0F„*A€,Av&By(Ax+Dz)Ay,Cv.Fx.Fz.FŠAP”NV©``¼l^Ær^Î|fÔŒgÝœpÝ–f؃^Õ„câ¢uôÃŽúÒýÖüЕöÀxèšdØuWÀSJ¦:B.B‚;};Œ'?–2Fš8F•2G3FŒ.H3IžFV¤N\¢JX“<P•>T”BV–EW’>T“>T˜DWLb˜Nd‘D[‘F[‡?V~2Hm!>k=k@W9R7P
+4R5R :f"K—hv²’™ÂžŸÇª¤Èª¤¾˜±‡žjtv&?|$>¦<HÎ_UÜwlä‰pæ’wé—vëyë›yêž{êœ{ì¢}ð¡íš|ì’oæ„jÌ[WŒ1N¥bxŒ>`„:[t-WN?\G\AS=V>S=R@YGv6`b!G` LTCTFr4]\LSH[Ln+Tf"Km-Tf'Qf'QaG[ET=d LVA^"K^!HVDN@OFXMQFzJ†€L†’gš‘q¨•ošŸ‚¨‹SŒg‰IxƒBpSu€Ft`FO<V;Z?e=h@\9S8\=]9T7U 6X6V5Z8X:d9r @‚'=š;D©=G»GJÄRQÇPLÉTPÇSRÆUSÆUN¦5<a6Z:,F¢CRÂV[ØbXÛb\Ûg]Üg`Ýb^ÚfdÜhdÜijÞigßjeßieàhfàkeàjbàljáofâneÞniÞjdàk_âf^áicÞjbÝgcÜf^ÝdZÛhbÚibÞj`Ük`ÜhbÜqlÝrlÞphÝslÞxpßwlÞyjà{oà~sÞvÞ„vÞ†zߌ|â~à“„à“…âœ‡âŸŽã¤Žä«—ä®˜ä¯æ¶£è¾«ê¼­é¿°ìŵìôìÇ»îÆ¹îÊ»ïÌ¿òʾñÌÁôÐÁõÏÂõԹ⺭“6M9JšBM˜GTœFSšDN™BQ˜BO–:J8J8J….D-Ew%An Am=h;d6`7c8j <w,B?JŸOR´VTÒ„nä®|çµ|áªp׉dÚŽdê­zøÆúÒ˜ýÔ–úˈð¸qãŽ`ÊaP«?E”/D}"@v>| <‘2J”6I’2F‹,D0H’:OšDS¤Qc¤P_›FY“AT’=S’=S’?V’@T“@TšHZ›Lb—Ob”I^DW„7Rs$?n ?i:h;`<^;b;hAhCp(Lšft²’–¿¡¢È«ªÉ¬¤Ä£·˜”Ÿkpy+E}'A¦@KÎ`[Ûwhãˆsæ’xè™xèzéxèœzéžxì¥}ï£zïš~î’pæ„fÀWY’9T¨i~…6Xp&LeKL>\ IT@R=XBS@[Hf$Lh&KXEb$OYDUDe*P[IQD_$Lo0Tc"Je%Mb#Kc"L\De$Jb"HXBP>R@^$Ib"KVG[HUENBUNxG|ˆV™}¢›„¹‰[šv‘\‰…Fz}Axp0^ŠSxt5bT;R:^<e?]8Z<X@aFXFU<]:T7T 6Z8Z6h9{&@‹-BŸ;I´DHÁKOÇPRÆPQÈUSÉTRÈVO¶AFt6V :k?–;N¶RXÓ^YÖ`YÙd_Ûg_ÚgaÝd`ÛfbÛhfÞhhÝkcÞhbÞhdßfbßjiÞjeßnlàkhàqkáicànhànfàj_ßk`ßi]ÝgcÜgbÞg^Ûd]Ûh`Ýj]Üh\Üg]ÜkdÜngÞocÜukÜ{nÞyoÞ{n߀pà€rÞ€rà„rÞ‡yÞˆ|à|á}à”⚃â ãŸŠâ¦‘⪓䭚峤繧軦꾮ìÁ²ëöìÄ¸íÆºîȸï̾ð˾ðÌÀòÏÃõоõзîͲª]h6H–DQ›IS¡NY¢LTžKTGSšAM•?O•<L‹4G‡,D(Cu'Bo@d<^<X9X:U7]<k ?:Q»ldá¦}î½„íº€æ®rÝ”cÝ’jïµ€øÉüЕüÐøÆ‚ìªfØzS²KH•/@†(@s<} B…(A0D’5HŠ2F†*DŽ6L›AQžHZ¤Sb P^™FV“>P“>R8PŒ=R’>T•BXœJZœPh•LbH[ˆ<Ry2Il"=p <h8g:i8~,C.B~,Dy(F|4Q˜]p³ŠŒÄ¤¡È¬§Ë°ªÈ«¢º›’žin~4I(D¨BLÍ\SÜviã„qæ“xê˜xéŸvêžvæšvèž|î£vð¢zîœzípã„h´MZ™@W¶t„=^j$KbJR>dHR?TEYDf$OdFWAYCWCc#LW>SEZHh&TTBh(Ti+Qc%Mc"Nc Ic"Lb"NZD]ER=SAN <R<l-P_EXAe&QVEXCRJp<oŽ`”£Ž±˜³‘qž“lœu8p†Nˆ˜`ˆ¡n‹~:fP<T;e>a<S7W7Y4X7U7U8T6P6X8Y9d=}$@‡*@•3Dª@J¼HKÈNNÆONÊSRËPNËXT¼LL:W;]<ˆ0H®JRÎ\ZÖ_VÖ`^Ûf_Ûf_ÛgbÝe`ÛhcÛeeÝfaÜhdÞifÝhcÝgeÞllßleàrlßlhâpjànháldàjaáifàj^àjdÝgaÜe]ßh_Üd]ÜhaÛngÛg`ÝjbÜjbÚnhÞjcÞpeÛujÝthÜxlß|nà|pÞvàƒqÞ‚x߆vÞŠzß|ᓀ㜄䞌⟊⨔⫖å®ä´¤æ¹¤ç¹«è¼ªêÁ°ëÃµìÆ·íÇºìÆºïʺð̽ð̽òÎÀôξôкòбĈ‰Ž9J”BQšKT R\¢NV£OU¢MVžKU˜GW–@P>P8J‡2F~1Ht <h@`<Z?R:L 5N:Y?<RÕŒp컇ôĆïÄ‚æ¤dÛ„[à™pô¾†úÍ’ýÒ’ý̈ô¼zã•^À[M˜5C€!?u@vA…&BŒ/F5FŽ2H‚&D„(G”:QžH\¦Rb¢N^—FY‘=SŒ8R‹8QŠ<Tˆ5LAW˜H]ŸSfžSh—MbDY…8Pu(Es'Bj<f=n;Š6IŽ>NŽ:Lˆ6N†9RˆB]—^r­„ˆÂŸœË®¤Ï²¤ÊªŸº™•¤rv…@R-C¨@IÌ\PÚteã‡ræ”uìœzèxéŸwêš|èž|ì¦yï§~í™vê”pß|b¹L]¡D[·p‚’Ffj"LZBWB]DVF^Jq2Zh%IY@VBT?XDa!I[IUHYFd$N^Il,V^ Kb'Mj(Sd&Pf(Nd LVBUESBS?YBR=T:f"P[Bv6`c JVBPDMFq9f”nž“q ›²˜|±†Rˆ‹Xˆ„>q—Rh›_ŠR?W?c?W:X<V:V7S8V7U9S;V>\8^:p<Œ0DŽ.Dž<I¶FJÄQRÆTSÇPRÆWXÈVN¿OO(;a7W9u>¤@NÉVUÔ`^Ö]S×e`Üe_Úc[Úc^Ýf_ÚmhÝgeÜjhÞhcÞjgàhaßjdàjfáicâohálfàplàngàojàldápfÞmdßh]Ýg_ÝiaÞi`ÜhbÜh`ÝicÜi^ÝjbÝj`Ýi^ÚkdÜpfÜrhÞtjÞrfßxjà€nÝs߃vÝ€xÞƒyàˆzàŒ|á‘~â—ƒã˜äš‰ä¤‘㤎䫘峢赦繩缮辭ëÀ´ëõìźíɽîǹîɼðË¿ñÎÀóÌ¿ôÏ¿õÓ¶Ú®¤”=M’>O˜JTŸJS¤P]¥PX¥RX¡PXLVœFS”FV>PŒ=P8O~.Fq"Ai D^8X<N=I :Q;š<Là›nóÀ‰÷ȃóÂzÞ—[܆^çªxô†üËŒü΋ùÄ€é¨nÎoP’0>€!:q>p>z#@…*DŒ/FŠ0D„,Hƒ+IŽ6O™DV¦N_¦Vf˜JZŒ:Q‰6N‡9R‰8OŠ8OŠ8M’>U›NbŸXmžVk•J`ˆ@W‚8Q|2Jx,Gp%Dj>l!A’BR‘DV‘?PŽ?QE\‘I`dw¬„Œ¾ž›Ê¬¤Ì°¨È¬žºŸª|yŽIW„4I¥COÌ\VÚvdâ†rå’uêšxèŸ|êž{ëœwéŸyð¦|ò§}ìžuê“rÜzf¸U_ Lb®ezCct,TQ>SAXBd#J`Ca DYBW@XBZA[DXB\DWE[G]H`%Li)OZFh+Um,Xi*Te&Lf$MR@UDR@P>ZG^GL>b"Gn+Uh*Rd K\KVHXLf,^p:eŒa“ ~¤«—³—t¦|D~€G{’j•Ž_X?^@[<V8V7V9U<T9U5R5Q 1Y6^6h6(>.A“.@¨>G¾LMÅRPÅTSÉRPÈWQÂOK%;[ 7Z=v%D9JÂPQÐ]VÖ^UØ^ZÙg_Úd[ÙdbÜd`ÞhaÝlaÝleÞidÜg`ÞhaßkháhcàieÝhgàpjàleÞlhßmgànhàmfàngÞj`àh_àjaÞnfÝh^Ýf^Þe\ÜiaÜi\ÜmbÜjaÚg_ÜkcÛpkÛpjßugÜsiÞxlÞ|pÝ~oÞ‚t߀tà‚zÞˆzàŠxâ“€à•ƒâ“‚â™…äžŠã¥Žå®šæ°žæ´¤çºªçºªç¾°ê³êÄ¶ìÆµëÅ·ìÈ»îȽï˾ñ̾òÌ¿óϾöѶèDZ£LX8N•CRœOW¢T\¢PX§RW¤PV MUFR™GV=NAR‰9Pƒ2J|1Kq&Ah"@]<T;O<X?<JÞ’iðº|øÄ~ð¾xâ›fã˜jì²zõÁƒùÆ…öÀ~í­qÑzV¢:Cv9t<l>y#B‚&@‹0F„&?'?‚)BŒ3MœCT¡Na¥Sa›L\>Tˆ:N†6N„1Jˆ5NŠ8N@V—G^ Ui¡]pœNd‘K_Œ@Vƒ6Q€5Pz)Fn#?k!@s'DŒ:PBP?PCX”K`—Vj¡n~®ŠŒ½Ÿ¡Åª¦É°«Ê®¢Â¢•ª‚€’Ta†0D«DNÊXQÛtcã†qç”xëœyêž|ì¡zëžzêž|ï¥~ð§ví pè—nÚzc¦L^¦Nh¬`s†@`u*SXB[GX=XCX?ZB[DX?VAX?]Ja%L` HXFWEV@XAZDYGi-Tm.X^!JZIf&Od)STBVBRDVBh&Na#NP=e$Lj,Ud&P\Mb"Rm/^b$Np0\j.\m8]ŠZ‚—xš’f˜Šaf3O[A\@_AYAT<Y@T;V<V8O 4N 5X7`@d8t =†+@”4Gž4C¸HJÁMOÉ[WÆTTÈQP½KEŠ#7T 3T
+4p >š9I¼JPÏ\WÒ\YÖ]ZØ_[×b\Úf]ÚfeÝb[ßhbÜhhÚjfÞhfÝjaÞheÝlgàjcàheÞhfßngßnfàlißkfàlcàohàpiàh]àkaàmfÞhbÞg^ÞedÛd]ÝjdÜiaÝmdÜndÙkdÜibÜmcÛsfßrgÞrgßziÜ|pÝ{nÞ|qß‚tÞ†yÞŠ|ÞŠzà~à‹xß’€ã˜…✌â¨ä§•䫚沢洣湧迭龴ê¶ìÄ¹ìÆ»íǺðÇ»ð˾ò̾òǶó˼öϼðβºtxŠ3D“BP›JTŸR\¤SY¨RW¥SZ¢OUŸKU˜DS–GU”CR8L‹9M…5Jx,Bt'Bf>`:^@jE˜:LÖ‚bí²zõÀ„ó¿î¶yí±xð¶uö¼~ò¸zê¥iÔ{Tž@Cx;j<g<o<|$B‚*Aˆ,D$Aƒ(CŠ1J”<P KY¤Pb›JW’AX†4N€0J‚2L…2K†2KŒ8R”FZMd XmžXr”OgG^‡;T…:T‚8Os*Go"Bk$Ar)Fˆ;RŽ@V“DW—K^—Sgž_p¦v„®‹Ž¼¢£Ã¬¦É¯§Ê¬£Ä£–­„ƒ–Zd‡3F¨CLËZOÙrbá‰zç’téšyêzêŸzìŸvë¢|î§yñ¤zïxì–mÚn—7O®\qªYl„@`d FR>_GT?YDYDX@ZD[@ZCX@ZCb%JbIWGZGYEUCTDc%Pd%Le'QVJVHp4\|>fVBZHVFL@g&Q|@j`LTDf%Js5f\LXLs>gu;raQw@svAp€Oq<l’fŽž„©v@gZ?\<[>U>T>\@T;V8U=N=R 7[:`5n9†,@ˆ,@—3B®BJ¶FOÅQPÈWRÇOK¶DHv9S
+7W<v#@Ÿ6F·FLÊVVÒYTÒZZÕ^XØa\×^YÚf_Úf_Ýd[ßjhÝgdÞlbßh`ÜhaÛebÜmfÞgfßhbÞgdßlgàqhàohàkdÞkfÞolàpkßmfálcàlgßneßiaàg_ÛfbÜlfÛibÜngÜjbÛohÝj`ÜleÝrfÝrjÜrjÜtfÞvjÞxpß|nßrÞ‚wá‰xàŠ~àà‰{àŽã—ƒäŠâ ˆâ£ä¨–䱠峥浦绫農ê²ëÄ·ìÄ·íɹîʾîʾð̾ðȹò̾õιóдա–’7G<H”BPœKT¢QV§V[¤TXŸPWŸNX™EM™HT–ET?RŽ?R†7J~1H}2Kv,Et"?v$C{'C“6LÐweê²€ôˆôÊõ†òº{î®jê¤jä’_ÎsY¤JM‚)Dl<g>k>u"A€)B*A"?~$B‰3K—?QJY¢L]žL`“BXŒ>R‚6L2L…/H„6L‰6O’@Y˜Lb Uj \qšPg’E`ŒH\†=U€4Nx.Jm$Cp"Dt0Lu.Kx-JŠ>P–L\œXhœ\m dv§t€¯†‹À¡¡Æ¨¤Ç«¥Å§ž¾ ˜­„€—\dˆ6G¢FPÉZWØsgà‡tår蚀êxéžzêŸzí¤~î§ð¦víœzë”sß‚i–6R°bv¥Un†;_ZBQB`HRAXGW@\FXAaJbJVB]Df$J_FS@VDTFUF`'Su5bj/Zj1]TD^!Ox>d|Ad[FSAZHKAf(Y~Ei|>g^L]LzBmZH`&\a0X}Jyf'`v>o—l˜”ld&Pp1]p@X‚Jud?Z@U?T>W;\<X:R9P7O 4V8b@c<z)HŒ0F4I¯CH·FK¶GIÀOJ´FFŠ&7]6N7`6‰-B¨>HµHNÅONÍXUÐZXÔ^Z×^XÙ`\Ùa[Ûe`Úh^Ûd^áj`Üj`ÞibÝh_ÝjcßjcÞhbÞieÞjdÞkgßldâniâkbâjbÞlgÞngáohßoeàlbàldàkdàmdÞhaÛjjÞk_ÚifÝndÚh^Üh`ÜfZÚl`ÜpfÝphÜreÜujÞwjÝvhÝ~qÞrÞ~uÞˆzàŠxà‹|à}à’‚â”‚á’„àœ†âžã¦–㭚䱣䲧座漮迲ëÂ´ëÆ¸íÆºïÇ»îʼï̼ïÊ»ðËÀõμôж侬žCRŠ:K“CRšJUžKQ£PX£PT¡PX¢LVšHV•FQ—MX”DT”FSŠ;P„8O…8J2J‚3J†6IŠ3N£GUÏ|lè´‚öÇ”øÌ–÷È’ò¸zæ iÙ|TÂbQ¢BJ‚*Du"@n;n@v&E‚.F„,B)B|"B†/F‘8OšFX Qb¢O_˜MZŽ?U„2M2M€.J…2L†4MŒ>VšNbŸTh£[nœRh–MeŽBZŒHZ‡=R~4Mt*Hr&Et,Gq*Jm Ak">„?W’J[Znœ^oœ^oœj{«„¼ ¿¥¥Å©¨Ä§ ½Ÿ”¬„€˜^iŒ<L£GPÇXRØrdà…oå–xèšyèšzêž|ëŸ{ì zî¦zî§{îyì“o߃hž:T£Wl YpˆBfR>N?YCTB`"N]Cd&NT?f&N_IWC^Db%Hg'NWEQ=SCTFb'P}8duAnk6`SJe&UvAf~Djr4\N@R@VG] I|Cnk,Sj,V_#Te+MwAu[ Jj+\`!Rw:nl0i‰T~¥‡©€Mzv4jTAwAhn'OU>Q;S<Y=\?S8P 4Q:M 3U 4^3n:ˆ1A–1?£=E²AH¯BGœ6@„6b6R:Z:x$B 9GµBL»JPÆQQÉSUÎ\WÓXRÔ]VØ`ZØaXØc^ÜdZÙfaÙf]Üg`ÚicÝf\ÞhdÞljànaÝleÞießifàjfâldàngßnfánbßtlàngàrjßpkàmhámfßofàofÝndÜlhÝliÜj`ßmcÞj`Üi_Ýk_Üi^ÜmeÞndÜqgÞwiÝuiÜvmÞylÞ|tÞoà‡{Þ‡zߊzá‹~â’â‘â’‡à›ŒâŠâ¢—㪘㮠Ⲣ涧湬農êÀ³ê¶ìŶîȾîʾïËÀî˽ï;óÏÀôϸêʯ¬^i‰7F’AO–HQšMYžMUNVŸNS¡T[›JU™IQ™JV’GZ–HV>P‹<Qˆ;TŒAR@P”=P–8K­KTÓ€pé°„÷È•ùÌ‘õÂ…è¤pÑyS±PK‘4D(@|">r<t@{&B|*C‚.E€&B'A‡,H’=RœGZ Q`ŸNa˜J]EW‡5L|-Iy*H~,G„5PŠ<Q“BZœPb¢Zl£ZkœNf–QdŽG]ŽFZ€6Ly1Jl @t%Fz0Kv1Ml$Bb<€4LŒG]—Vf–Vi‘Si”^t¢y…µ–š¿¤¡Äª¤Åª ¼ š¬‚˜Zf>L¦GRÅVP×rdá„mã“wèšzëž|êŸ|í }ì¡€ï¤ï£|ëžxî’kàˆn >X˜If˜Tny2TN@N>XCTD]"KX@b!JZBk'O`FT@XG]Fq2\b&OSAXFVDl.Yv0^yLuv7cTHb$Tu@j‚Lt~Aj`LRA]JV@€Fli)TQ>WJ]%Oh‘b%Vd&Y^Rq:it9u|Ax¡¡ŽaŽq5d\Ih.Vj.YR<N8WA[<T9R9R8Z=U:\5j4)?Ž1<”1?‰,=|<h6V@W9`:$@œ0A­@IµBG½KNÆSTËWXÎTTÐYYÒ\VÐecÖ`\Ö_\Öa\Ûh]Ùe^Ûd\Üg_Ûh`Üb^Þf`àleßkbÞheÝidàgcÞpiámdâlbàngápcàqdßtjàtfßrjáqißrjàphándÞmbßleÜljÛjdÞjbÞiaÜj^ÝlbÝi`Ýj]ÝpfÝpdÝugÝvhÛxlÞxlÝ~qÜ€uÞtÞ…x߈|áˆ|á’┃◈⚈⛌â£ã¦–䫚㲤䵧幭缰辯êĹëŸíÇ»ìË¿îËÂðʽïË¿òÎÂñνïί‡„†3E:G“HR•JT›LWšLTœPVŸNU™NW™HTšOZ”K]—MX“ETŽCVEV•FX˜HV™CQ—>Q«HUÒxf樄ñ¿Œð¿ˆèª{Ö^¨HJŽ4Gˆ4K~.F|(Fw!A€,Dƒ.E‡2H„.D‚*D….H”:QžK^¤Tc¤WjšN\‘CU†:L€2Hx(Fu(Gz&B‡4M‘BW—F\¢Vh YlŸVgšQg“MaŽH]„>Vy.Gl"Bq&Bz,Hx-Ht,Gh!BZ6o'C~9PˆDZ‰F[ŠKe“\t u„°“–À¤¢Ç«¦Èª¡¿¢™°ˆ„Ÿdn>I¢ENÅXQØp`ß‚nã’wêœvìœì |î£|í£xï¦zî§vëì”lä‰m±P`Œ7XD`n(OQ@TAU>YE] I[H_GT>m+Vf"JZBZHX@v<gTBVCYFVDj*Sn)TvLoq3\KAd&Vz:hŒZŽ„Lu€@n[JWFQGh,Q„KtK>THc&ZsHa`•b#QY Td(_uBx‰W––dŽ«—´‘_Œ\E^Fd+SS;P8U9Y>T=ZAX8[8^8f3i8i8e8^>S7W9[>j!D‚,Fœ;OªBPµDN¼HLÀNSÉTUÍUTÎ]^ÎXTÐXXÒZVÒ^ZÖ_YÖa\Ö^ZÚcaÚgaÙfeÜhaÛebÜjfàg^ÞjaÞjcÜfbÛfdÝkhßmfßlhÞldÞngàpdÞpjÞtràphásfàtlàsmÞnhßlhÝnfÝphßodÚjeÜhbÝldÞpfÞnbÜnfÛlbÞodÛphÜrgÞtfÜxlÝynÝzrÞ~sàuÞ„w߇{á†}á€â“ƒá–†âžŠâ‰â¡“⤔䬘䮜䳦䵥漯龰éÀ´ëÄ·ìȾìļîËÀíÊÀî̾ñÎÀò̹òήܮ¢9GŒ;HŽ@O’FO•JU˜LVšOXŸSZžR\›LW™R\›PZ–MZ’FZ“JY™M^ P\¤P`¤NX¢GU¨GTÈc\ÛuâpÞšnÌyZ§JJŒ8N‰8Nˆ:K†0Iƒ,D†0Fˆ/DŒ2Jˆ/F„*E…/J‘8P™FZŸRa¢[jMY“AQ‰9L‚2J~5Pz(Ct'F|-FŽ;O™M`Qf¥^m Zož\n˜Pg•NcŒFZ€7Mr(Dp&Fr,Fw*Ft(Fj!CgAW:f >p*H€<T‡D[‰HdVpžo°•¼¤ŸÄ¨£Æ¨ Á ˜±Š‡ kr–FS¦FRÄVRÙnaß‚mäŽvê™xìœzí ~ð¤zî¢~ñ¦ð¨~ïŸzî•sæŠn¼\fv'K}6\]BUDSBR>U>X?YE\J^Jk'Pg K\I\H\F|GsVEVDZHWId!Nd#PzNxk*XMG`$S‚E€Œ_ŠY†ŒT€j'TV"QTL^LŠUz`(VLHSEb$W‹a‚t7fn2f_!Ut>pb”^™šr‘¤z ‚Ft`L`&NR<P:V9S:T8\>]?\8^5[2U 5T6^<d:t(B†(@“/Bž6C¬9D¯<JºGNÂPTÊTVÎVZÎ^[ÐZWÑ\\Ñ_^Ò`_Ò]YÓ^ZÔ\V×`XÔb]Ød[ÖdbØ`ZÚjcÙgbÜf\àj`Þh`Þf]ÞfaÝjbÞiaÞifÞjeàidßlcàsißmgánfàqjßrlárißsoßojßnfÞnbàneÞmbÝodÜhbÜjcÝj_àp`ÞnbÜm`ÞndÛogÝrgÜtkÝvjÝzqß{nÜsà„wÞ†|߆~àˆxáŒ€â‘€à”…âž†â Žá¡‘â§—äªšâ¬œã²¢ä´¦æº¨è¼¬èÀ²éôíŶíɾí˾îÊ¿íÌÀðÎÀñξñз嬜HX†7HŠ<LŽ@N’IV•KX–MRRXŸR^žS\ V^›Q\˜K\–P]˜R^ Ra¨W]¬V\ªRZªLU®LV¼VWËl`Êp^ÀeZ¨JS’:LŠ4K‡2F‹.E4H’4G”:K‘4FŽ1B,Dƒ,G‰8N•DYL^žQbžT`“ET‰<J€,C{(Bx(Ft!@v(F†4PšK] RežYj¤]n¤^pžZn•MeNd†?X|0Hr)Dr(Cn"@r#@q$BhB_;Y@e>j @u1L€;T‚A\ˆJg›gx¬Š¸œÃ¦ Â¦Ÿ½ š³‰„¢lqšRW£KTÁXS×kZá€læ’xê™zê€í ~î¡|î¡€ñ¢ñ§ì€ëtç‰rÆddr!Ev5ZT@XFRCV@\)M\ATAZDb"Lh*Tb!L` Pd"O`K}Mub LVBYFZKd*Wn,Y~Nvc MOCZL|@v‡Tƒ_ŠZ†j0]PGKBt<rƒHiŠ[…L@G <j+`k.Z‹X„g*Xn.nd(X“i™Ž_‡yDt}KqZ‚zAm\EOBSDWJV IYHW:S3U4X4a3m:v"=…$9‘);›4@§4@«;E®;G»HQÀLRÆTTÊWXÏWSÎTUÑVUÐWVÓZYÒ^WÓ\UÓ^ZÒ\\Ö^VÔ_XÖ[T×c]Øe^Ø`ZÜc[ÚfcÝh^Üe_Üf`Þe]ÜgbßlgßkdàlfájeálcàmfàmfÝqlßrlàtpàtjátlÞnjßnhÞtmÞndÝsißnbÞrfÜldÞkbÜpfàlaÝodÞl`ÞncÚqhÜngÝshÝyhÞ{nÞxlÝ|qßuàƒu߃wà„xà‚â‹~áà˜ˆãŽâ¢ã§’䫘䮛ä°ä³£å¶¨åº«è¿²é¿²ì¶ìÄ·î˽î̾íÊ¿î̽ðÌÀòθì̯´muƒ4E†9JŠBPŒDR“KY•KQšNZ¡U^¡TZ W_œXfšP_šRažTa¥[f«^a¯ZZ²VW±SX´RX²OT´TV²TZ¦NW›CR’:O’=O“<N”:LšBRš<K—:I–6GŒ-A†*C‰.I“<R™H\œPb˜R_DVˆ=Qƒ0F.D},Cu$Bs#D|0K’ATŸQ` Vl Vh£`r¡^qœXo•UiŽIc„9T{0Is(Ep"Bm"An$Ck @c<aC^ Ad"Ae?m(Hz6T~=[‚Ab•by«†»Ÿ Æª¡Æ¬¥Ã¤š²‘‡¦stžW_¤LY¿WV×jZà€kåuê™zí}졀ñ¤€ñ¤~ì}é’tã‰p»[_t"Iu1W[H^HVFV>[F\FR=YAb Kk*Te%Qb%Qb"N_Mn:Zq5fTDQBVHc M{BdzCkXFPGSGv<h„Pˆ|Ht”j’zBrPCF Cb%WˆNx•jŒn-YC <MFr2ey@\ƒL}^$Nh,fS‚ŒX‡’c˜j4j†S|~Di]"KP?P<V"BT8P4R4W6`7k=t;…'@’/B›8H¢6E©CL¶HL¸FIÂMNÆPPÈTXÉVYÌVWÎWUÏXVÐZYÐZXÒZZÓ][Ôb\Ô`YÐ\WÖ^UÖYSÖ^ZØ^\Ød\Úd\ÚhaÜd^ÞfZÜc\Üb_ÝfbÛohÞofàj`ài`âldànfßibÞkhÜliÞqjßrlÞqjàpfÞrnÝnfàsjàpiÝqgßpdÞvjÞndßneÜocßl^ÞleÝldßrlÛohÝqdÞthÜviÝviÞvlßznàqÞƒsÞtÞ‡y߉~à€à‘ƒá’†á›‰àœŠá£â¥‘㪚㰜㰢䴤縩湮述ë·êÅ·îÊ»îʼìʼî˼ï̾ðκïÎ±É”Žƒ4K‚3H‡@OŒBS‘JT’KV˜OZžS]ŸV^ \bžWcœV`¡Xa§[f«^b°\]µVV²TX¶TW¼TW´NS¬LUªJR¢FSœAQ™>Qš@R¡HV£FR¦HTŸ>O–:M5I‹2G‹6J7P—BVœPe™J^H[ˆ8L„6N‚6Mƒ2H{*Fw,Fv&Fˆ;T˜J^¢Xl¢Zm¥bw£fxœYj–Tf‘Nb‡CZ|/Kv+Fo$Ck @l"Do"?gAb <bA\@f%Dh*Li%Cn,Mt3T{:Z’]t¯ˆ‘Á¦¤Ð´¬Ô¸®Î´¬Á¤–®} \e¥JSÀYX×l^â‚kæ’vìšyí ~ì î£~ð¨ò¦í¡|ç•tÐ|a”<PlIp.Ta.PY#MSAWDZ@ZDV@_"MaHh'R] J` Ne$Nb$T\ I}FxZGQARF]NyDjt4aQBQFQJv:k‹Y–g'X“j’’f’\MNERL„Hz•g‡„HmVF><XF‚Bpaˆh(\SJo8jˆV‰’a„T€p7cg+Ka"KM;R>U?T:W=]9h9l3t58•*: 5B©:F²BH¾OLÄNNÄOKÄPOÆNNÈTSÊXVÎYWÐZXÒ^\ÒZXÔ^\Ô][Õ^UÖ\VÕ\WÓ_W×]WÖ[YÖ`XÚgbÚb\Úd`ÞhhÜfZÞd^ÜdaÝf]Þf_Üjeßibàgbàh`àkeànfßjhÞnfßleßmhárißpjáskápiàpiàsjÞtlàpkßqlßwqÞrfÞpfßodÞreÞriÜncßphÛtlÝvkßvhÜxhÞvjÞ|rÝzpß}sÞ€rà‚uÞ…zߊ{âŒ⎀ᑄ╆♊á¢ã ’⣔⫝̸ᮛⰣ乨巪绮èÁ´ê¶íȸìÊ»ëÉ»ìǹî̽ð̸ñͱװ¡Œ3H€0D†=L‹CS’IT•MWšQZU]žY`Xb [d£]e§^hª_d¬]a¸`^»XV·TU¸VVºQT´NR¬IT¬KT¨HT¥FP¤DR¦GU¯NR°NX¯IR£BO—:LŽ2HŒ3J8O˜@TšF\•JZˆ7N„6L~2G}+B‚0C‚.C}2L|.H2K‘@TžN`¦\l£`l¨ap£`s˜Tf–TeŒFZ‚6Ox+El Bj @n>f>e:d:b8f6^=b BdBbAj(Jj)Nn-S”Yp´Ž“ж°ÜµßóÝÀ®Ö¹¦»•Ž£_f§T[ÀZXØn\á„lçríœ{îž|îŸ~ð¤‚ñ§ô§€ðŸâŒpªS^z,QdDh%N^C_JV@\ I[DUBVE_Hc%Pf%N\Jb Md$Mf(TXIoAao3cP@VH]K‹aŒ|@mTLUNTLzApŒ^“hY‚R}šwž}GxZ"PPEqGoŠVz„Oyr/^H?J>v0`zC_”h•VNPEr3e}Ju‘bl.dR>b$MP?T;ZAV@\>c8k8q0€!6”0?¥9A²?IºCG½NOÅMJÊPKÊRPÇNKÉQPÊSRÎWUÐ[VÐZUÑZYÓ\XÕ^XÖ_WÒZTÔa\×\SÖ^TÙaX×^XÖ^ZÚd\Ø`YÚf_Úc[Üe\ÜdZÜh`ÞfZÞg`Üi`Þiaßg]àf\ámdàj`àlcßlfÞmdàqkáphàpiàniàrqßrkàulÞqlÞskàqhßulßrhàrlßrdàncÞriÝpfÞwlÝrfÝthàvhàxgßxjßwkÝzpÞznÞpÞ…vÞ…zߌ{àŒ~㑀ᓃᖆàá ”â¤à¦âªšâ­žà® äµ¦æº¯ç½¯è¿±èÁ³ëÄ·ëÆºêɺëȽêǼì̼ñ͵âîšKZ|.G…;K‡BVŽDOšQYœR\žRYžXbVb [f¤`fªaf¬af´`^·YZºUU¹VU¸VV¸UX·QT´QV°JT«JR©DT¬FRµQV¼TV¸QU®HR£@Nš:N“6N˜=R•DV˜I`–I\ŒAZ†:L~,Dz*A|,D~.G~)>~6N4NŽ;NœPd¥\h§aq§s¤^nžZo–PeŒEY†?Uy1Hn)Gh$Ah @i>h"@f>f;f<v:Z>d"@bA_@h"Eg%Jo.W”av½•—Û¾¯áóâijß°޾¤Î©“­po§RZÁZVØjZà‚næpìšxìŸ{ïœ~ð¢€î¢€ð¤ð¤~ìž|â“y¼lpOlx'Pc KVCbFf&NZAZDQBYB`#PbHfKb"Ld"Nj(Qi-UZL\&IzGvL@TGb'T˜uz8bVEWOVH~EtŽbo'_w8hŸ}¢•j”b#LOBRFy@h‡T€_"J`#SI @j'Ou/^ˆXs€FxOBq0dn1cŠ[‰yJum6po4\T=P:V 7S6_6l6s2‚!7—0;¤7>°<@¹EG¾GGÀMHÃPNÊQJÊTLÈTNËRNÉSMÐWSÒXSÒ]UÐZVÒXPÓ^YÕZRÒXVÖ^XÕ^ZÕ`YÚcZØ`Y×^YÚ^XØaZÛaTÝd[ÜaXÝhaÜh`Þh`Þf]ÝdaÝh`ÞfZÞjgàhcáießkdàjaÞhgáogßqjàleápnárkàrsãtiàtkàpjàtfÞxoápjàvpßsjßreÞxqÞvlßtnÞtiÝtkÞznÞxjàylà|qß~kÞzpß}mà„và„táŠxâ|à‘‚à—…à“…á™‹à â¤“ᡎᩜ⬜ᮟ䴦漬溬龲èÀ´ëõêĶêÈ»ëÈ»ëʽìͺñ͹êʱ¯ox{.J„8H‰>JŒGUŒGPŒHT˜T]˜Tb›T`ŸZb¨bi­bf°ae´``¸ZYº[Y¼VQ¼TT¹UV¸RT·OU¸JP³LT²NV¸VYÁWXÀVV¶RY­LT¦AR@Q—=T™DZ˜F\—H^’K_„>Vƒ6J~.C}-F‚.I}0F}/H0I‹=SœM^£Wd«bn«du¨bpž\p˜VjŽJ`ŠBU4Lr)Fh#Cd>h">k%Cf:h;i<r >Œ2HY<`=`@`>d&GcHk*P˜bz̤œÛ½±ß³ãijàòݼ§Ó°’»ƒw¨PWÀ\\Õm`á€järìœvìœxìxï¡|îž{î£~ñ£|îŸ~é‘sÈnn³do‚3VcJYE\D`DXD_KPA_"Jb-Sf&PfMd Pi"Nn)Um+Tf"RTFzJpb PVLg-W˜r—s2^[JVJ\NˆQ|e•eR}4q„Uv ¦s4dPBSJh*WŠ\‚g,ac&Pe"TZA€:dn.T†NucTi)[p,`„T€G€†V„_‹ZDL 6N 2X 1c3u4ƒ#;’.<¥5=¬9?·DI¹HJÀGGÃNKÂJJÊPNÊTOÌSPÍVQËRPÐVOÑZUÔ`YÒ\UÐ\XÐ]VÔYRÑ[VÖ]V×a^Ø`XÚ_XÚb\Ø`[Ú^RØ_YÜ_VÜd\ÛaZÞd[Ýf^Ýf_Üb[Ûg_Þmfáj^Þhdàh`àkdàg^àlbàlhßplànjàniásnàoeàqiâulßriâtkàthàqkávjàwmÞthâujßxmßsjàslÞtfÞtgÞwlÞzlâyná~sà|sà{pßzoáƒzâ‡{á‹yâà‘€ß‘à’ᚆ⠊ឈ࢒䨖⭜à¯ä²¡ä¶¨äº­ç¾°çÀ´êĶêÄ·êȺìȺìʼìÊ»ñ˹îαʓ’}/J{4Lˆ>LHR’R[–MY—P\”NZ›Vc¡\b«^c¯ad²]`¶\^º][À[WÂXV¼UV¼UVºOQ¸OU½RS¼QS¿WYÈ[VÊ_XÂXWºOQ¬GR ?Pœ@U›DXœNb”F]ˆBZˆ@Y‚<Xˆ;Qƒ8Rƒ4J‚;P€-G†2H‹:P˜H\§Vh­cq¬hs¬fv¤cp›Uf•RiˆD]€;Rt*Fh @c;`6f9i<j)Fl<}+Fˆ0C™8IZ>[=b?`@d"EaEj'P˜\rΦœÚ¼¯ÜÀ°àñßÁ®Þ»¦Õ²˜Áˆ~¥R\¾ZXÖnbà‚kåŽuìœvìžwê xí¢|î {ï£|ñ¥€íšxß“xºYbžNbv-QbF^DWAX>ZAc JUH^H`#LdLk%Rh"Nr*Vz6\z5]r2\[Jd$Ms0`\OsDnŒ\‚q,XcJ[Nj"W„Fq`TB~?ut0dšz˜–d‘[KRIk/b_„‹X†XJe&Pf&Tr0T}<gf*Oh+XVGn/`~@k€I}s9q”fŠ^‰^!KT 4\ 4o3€7&9œ+;¨6>¯9B¸FHºDEÁGFÆPQÈPNÌOLÊWTÍUQÎUPÌWTÐXQÐXOÒ_WÒ]VÐZWÐ\XÔ^TÓ`[×`UØbXÖ]ZÙ`XÙb\ØbYÜ\TÛ^TÚh_Üd[ÝjdÝf`ÞeXÝc]Ý`XÜf`ÞjhÞjbÝgeÞmkÞkfßgdàicàngánjâpdârgâtjárgàsjàkiàrfásháqjßrhârfázpàwnâxiátgÞxlávhÞrfßuhÞxoÝzjà{lâ~lá}oà€rà~rà„wã…và‹}áŒ~à‹|ÝŠá’‚á–‡àžŽàšŠá£’ã©—ã¬á®ã²žæµ¨å¹§ç¼±è¾±êÁ´ëÂ·éÆ·ìÆ¸íʽíʺðʺïͲڲ¦‹:N4I„<KŠ?IBN”FO’NZ”O[šR\ž[b¨\`®^a´[\¸^^ÄaXÃa^Ã\VÀTRÀTS¿SPÁYWÇXTËUTÐ`WÐ`XÌ_XÃVS¸KR£GTž?Oš@V™DT–E\Š<X‚<V6Pƒ9OŠ<U„:P†:P†5K„2GŒ<Q˜H]£Xj¬bl¯mx¬kx«htž\m—Uh‹EZ€<Tt)Ei<f!A^8a>e>g=p&By(CŒ5HŸ?J¢<KU<\<^>^:c D^Fc&O‰OmË¢šÚ»²Þ¾²ÞÀ³Ý¿²Ú¸¦Ò®—À‡y§NXÀ\XØn_ß„nåtìšwëž{ì î |좀î£ñ¤}íŸvÞ‘w¤JWŠA^j"Gf$NdHR@P<_Bh%LP?g*V[Hh)Rn(Rg#Pp-Vs*Tx5Zx6b\JbMp0fj.^o’†HpbLg!T` QiR†Lub‹P@w8fv3fu<h“jŠ€FzXMf'U˜p—‹XzXDRCTDl(Q<^s0\f(SWIWFaL~Kym,b}?g—f…X|j$Ki<‚$<–->œ4B¢1?©7@®?F½GHÁHIÆMJÊQNÊSRÊPPÎTTÎXSÏXSÐWUÑ\SÓZRÐZXÑZXÐ^[Ò]TÖ]TÓ]TÕ^X×_TØ\UØ\XØaZ×`ZÛ^XÚ^VÛcXÜa\Üe[àe[Þi`Þf_à^WÞdZÝc[Þd\Þhhàh_àhdÞlfàjcÞnhàrgárfâpfßphßrjßrjÞkfàrlâsjàtoßsmârgáwlàxlàuhàtlàvhßyoßymàtgßxjÜ{pàwkßyoá€rá€tá€pá‚sâ…yáˆ|áŽ€àŽ„àŽá~ß“†á˜ŠàšŒàžŒâ¤•⪕⯟䱠崦帨帬翰꿰êÁ´éÄ¶íÆ¼íÈÀìÇ»ïʼðηæÄ®¥Vl„>Z‡?PˆDS‘HNŽ@JBM”JV™W`œW_¦Y`«X[´^_Âe_Ìg\É`VÆ[VÅ\WÆZTÆXTÄZWÎb[Ñh[ÖkXÑdYË]UÀTR±IR¡>NžBQ—>O’:P‡7Q{4Q4R‚6SŒBY>XŠ@Tˆ:Q†:P…4M”DUžK^©Zk²gp¯n|¬ktªguœ]kŽH^:Qv/Hg >a<_6`:`8c;j?u+F†4Jš@R¬FRªANR9^>`=\=` C[>`H†HfÄ™ŽÚ¼®Þ¾±Ü¾°Ü¾®Øº¦Ñ¬·zq¡S^Â`\Øl\á„mæŒnë–xì„ì¢zì¢|ð¤‚잀á–{¢JWFdh&Nj%LY>Q>P:d"Gi*PH8g(RV@r0Yr2[k"Mq*Vm&Nt-Xx4^f%ObS|@t|;h ˆ«r4Y`Fq.`VBd R‚Gm^ˆTHi(Tt4`p+d|Fk˜nœs2l\!N x|EhN=YFSJZDo*Xy:`e-Se"PTD_L}Lso.a`F‚Lzz7NR€z8’/D—1A›2>¨:D¨8@²@DºDF¾HKÃJJÆLNÈQRÏSRËWTÌVQÎXQÑZPÐXQÒZTÑbZÑa[ÐZWÒ]TÕ\TÔ\VÔ]WÖ\T×`\×b^Ùb\Öd]Û`TÛ^XÜ^Wß`YÞe^ÞibÞe_Üe]ß`XÞc\Ýd\ßb^ÞfdâhfàhdÞnháihàhbàojândârlÞnfÞpfÞnnâmcàrkárhàqhßrjàrhàvjàxiáypàwlßvhßxlÞxqáyißxlÞ{oÞztà{pà„rà„tà‚xâ€tà„tàˆ~ጂà†ßŒ„ß€à”ˆá”Šâ˜Œàœâ¦”᪘ᬘ峢䳤䷤䶨溰辰èÀ´ê´ìŹìÇÀîļðÊ»ð˵êÈ·Ân‚žR€’IhˆEZŽDRŽBPHT“HRšPWœRZœPY°bf½f^Çj]Ñh\Ïc\Ì`VÈ\XÎ]TÊ[TÈ`\Ðj^×l[ØkZÑfZÊXP¸RS¨BK˜<J˜<L6Lƒ/H1Lx2P|2P‰>ZŽ@]AXŽBZ†<V‡6QŒ<R—DV¤P_¬cn²fp®mvªjx¥dr–Vg†D[v+Fh#@f"D_<Z2_7`:k>v,N~8P–BV¨GT²DN·HQ \ No newline at end of file
diff --git a/contrib/ffmpeg/tests/libav.regression.ref b/contrib/ffmpeg/tests/libav.regression.ref
new file mode 100644
index 000000000..a5ea5f15d
--- /dev/null
+++ b/contrib/ffmpeg/tests/libav.regression.ref
@@ -0,0 +1,106 @@
+ffmpeg regression test
+8a0536ccfe36f4fff408b3327d33e1dd *./data/b-libav.avi
+340344 ./data/b-libav.avi
+./data/b-libav.avi CRC=0x400c29e9
+85caa242a36abeab85a27c82ba73cc28 *./data/b-libav.asf
+339767 ./data/b-libav.asf
+./data/b-libav.asf CRC=0x74113749
+1ad618e9fd318eb3483270cf9b9b3f43 *./data/b-libav.rm
+355405 ./data/b-libav.rm
+bdb7484c68db722f66ba1630cf79844c *./data/b-libav.mpg
+378880 ./data/b-libav.mpg
+./data/b-libav.mpg CRC=0x2b71a386
+0bf8fb3b4d7bb70ed89356e94cd8ffa2 *./data/b-libav.ts
+471316 ./data/b-libav.ts
+./data/b-libav.ts CRC=0xcc4948e1
+16cf1c37c123f8d3c4a31d192d9b0cc6 *./data/b-libav.swf
+41836 ./data/b-libav.swf
+./data/b-libav.swf CRC=0xcdec36a1
+87a8c8be5db2513356b28f29dff5769e *./data/b-libav.ffm
+380928 ./data/b-libav.ffm
+./data/b-libav.ffm CRC=0x01522b4a
+5e8aa303a877cfac6569ed11cf20e92b *./data/b-libav.flv
+335810 ./data/b-libav.flv
+./data/b-libav.flv CRC=0xe14e8847
+16518706f425cb537362bfc1c58b8de5 *./data/b-libav.mov
+366923 ./data/b-libav.mov
+./data/b-libav.mov CRC=0x45079dca
+26c41db318d9aacfd6b9e734c0ea4d94 *./data/b-libav.dv
+3600000 ./data/b-libav.dv
+./data/b-libav.dv CRC=0xa6b8b635
+2b3f921fb7a01bb126cab5ee21ae3f8d *./data/b-libav.gxf
+815700 ./data/b-libav.gxf
+./data/b-libav.gxf CRC=0x238a01b0
+9a9da315747599f7718cc9a9a09c21ff *./data/b-libav.pbm
+ 317075 ./data/b-libav.pbm
+./data/b-libav.pbm CRC=0xb92906cb
+6ea0e1faf08f6fcdb44db4a104361b57 *./data/b-libav.pgm
+2534775 ./data/b-libav.pgm
+./data/b-libav.pgm CRC=0xf4aa7c47
+0c5fe86621b7377705837f304d4ba1e9 *./data/b-libav.ppm
+7603575 ./data/b-libav.ppm
+./data/b-libav.ppm CRC=0xb2bb8e92
+88a98269295fbfce7816558ad84e1259 *./data/b-libav.gif
+2906382 ./data/b-libav.gif
+b977a4fedff90a79baf70c8e02986820 *./data/b-libav.y4m
+3801810 ./data/b-libav.y4m
+./data/b-libav%02d.pgm CRC=0x84c09106
+./data/b-libav%02d.ppm CRC=0x25c06ecf
+./data/b-libav%02d.jpg CRC=0x62328baa
+b0a8c8063d81921db5d7c8f50a1cc454 *./data/b-libav.wav
+ 89132 ./data/b-libav.wav
+./data/b-libav.wav CRC=0x2a09519c
+e2a6d6fae17394dfe87cb5bb8ae11837 *./data/b-libav.al
+ 44544 ./data/b-libav.al
+./data/b-libav.al CRC=0xefdf94c3
+4574d7e2c09e1e13663e61bd2889f12d *./data/b-libav.ul
+ 44544 ./data/b-libav.ul
+./data/b-libav.ul CRC=0x6064b2f8
+7a21ff174e3cca1702e0826c4ca0eccf *./data/b-libav.au
+ 89112 ./data/b-libav.au
+./data/b-libav.au CRC=0x2a09519c
+272b91d8fc31ed43b08246d182719751 *./data/b-libav.mmf
+ 22609 ./data/b-libav.mmf
+./data/b-libav.mmf CRC=0x03633476
+c8cf5bac13fb7862bcbce76977328f92 *./data/b-libav.aif
+89160 ./data/b-libav.aif
+./data/b-libav.aif CRC=0x2a09519c
+8d117c49d6b210abe783d1b0b897cec7 *./data/b-libav.voc
+ 32768 ./data/b-libav.voc
+./data/b-libav.voc CRC=0x49972c8c
+ce356ce2708cb6033ab5d762da93cfd4 *./data/b-libav-yuv420p.yuv
+ 304128 ./data/b-libav-yuv420p.yuv
+ce356ce2708cb6033ab5d762da93cfd4 *./data/b-libav-yuv422p.yuv
+ 304128 ./data/b-libav-yuv422p.yuv
+ce356ce2708cb6033ab5d762da93cfd4 *./data/b-libav-yuv444p.yuv
+ 304128 ./data/b-libav-yuv444p.yuv
+ce356ce2708cb6033ab5d762da93cfd4 *./data/b-libav-yuv422.yuv
+ 304128 ./data/b-libav-yuv422.yuv
+545f61c353a8b4419808785cb4f0069d *./data/b-libav-yuv410p.yuv
+ 304128 ./data/b-libav-yuv410p.yuv
+d6c03f930018ff859bd43f52b92e9321 *./data/b-libav-yuv411p.yuv
+ 304128 ./data/b-libav-yuv411p.yuv
+14117c4d7956775a7bbceabfc38da808 *./data/b-libav-yuvj420p.yuv
+ 304128 ./data/b-libav-yuvj420p.yuv
+14117c4d7956775a7bbceabfc38da808 *./data/b-libav-yuvj422p.yuv
+ 304128 ./data/b-libav-yuvj422p.yuv
+14117c4d7956775a7bbceabfc38da808 *./data/b-libav-yuvj444p.yuv
+ 304128 ./data/b-libav-yuvj444p.yuv
+deb2f7ebe297df2c1fe264d19b34d2fb *./data/b-libav-rgb24.yuv
+ 304128 ./data/b-libav-rgb24.yuv
+deb2f7ebe297df2c1fe264d19b34d2fb *./data/b-libav-bgr24.yuv
+ 304128 ./data/b-libav-bgr24.yuv
+deb2f7ebe297df2c1fe264d19b34d2fb *./data/b-libav-rgba32.yuv
+ 304128 ./data/b-libav-rgba32.yuv
+5d395f62bff8ac475f743268c772ca3a *./data/b-libav-rgb565.yuv
+ 304128 ./data/b-libav-rgb565.yuv
+2ffd6871fcbfe9570454e8703ac8ea01 *./data/b-libav-rgb555.yuv
+ 304128 ./data/b-libav-rgb555.yuv
+0b62dcf9b57b294dbaa5d9e99b1ee192 *./data/b-libav-gray.yuv
+ 304128 ./data/b-libav-gray.yuv
+e197450dae2feba9e757b551e1e9145c *./data/b-libav-monow.yuv
+ 304128 ./data/b-libav-monow.yuv
+e197450dae2feba9e757b551e1e9145c *./data/b-libav-monob.yuv
+ 304128 ./data/b-libav-monob.yuv
+7a319375916cae4e691ecb74295e5d2a *./data/b-libav-pal8.yuv
+ 304128 ./data/b-libav-pal8.yuv
diff --git a/contrib/ffmpeg/tests/regression.sh b/contrib/ffmpeg/tests/regression.sh
new file mode 100755
index 000000000..70f4eba12
--- /dev/null
+++ b/contrib/ffmpeg/tests/regression.sh
@@ -0,0 +1,785 @@
+#!/bin/sh
+#
+# automatic regression test for ffmpeg
+#
+#
+#set -x
+# Even in the 21st century some diffs are not supporting -u.
+diff -u "$0" "$0" > /dev/null 2>&1
+if [ $? -eq 0 ]; then
+ diff_cmd="diff -u"
+else
+ diff_cmd="diff"
+fi
+
+diff -w "$0" "$0" > /dev/null 2>&1
+if [ $? -eq 0 ]; then
+ diff_cmd="$diff_cmd -w"
+fi
+
+set -e
+
+datadir="./data"
+
+logfile="$datadir/ffmpeg.regression"
+outfile="$datadir/a-"
+
+# tests to do
+if [ "$1" = "mpeg4" ] ; then
+ do_mpeg4=y
+elif [ "$1" = "mpeg" ] ; then
+ do_mpeg=y
+ do_mpeg2=y
+elif [ "$1" = "ac3" ] ; then
+ do_ac3=y
+elif [ "$1" = "huffyuv" ] ; then
+ do_huffyuv=y
+elif [ "$1" = "mpeg2thread" ] ; then
+ do_mpeg2thread=y
+elif [ "$1" = "snow" ] ; then
+ do_snow=y
+elif [ "$1" = "snowll" ] ; then
+ do_snowll=y
+elif [ "$1" = "libavtest" ] ; then
+ do_libav=y
+ logfile="$datadir/libav.regression"
+ outfile="$datadir/b-"
+else
+ do_mpeg=y
+ do_mpeg2=y
+ do_mpeg2thread=y
+ do_msmpeg4v2=y
+ do_msmpeg4=y
+ do_wmv1=y
+ do_wmv2=y
+ do_h261=y
+ do_h263=y
+ do_h263p=y
+ do_mpeg4=y
+ do_mp4psp=y
+ do_huffyuv=y
+ do_mjpeg=y
+ do_ljpeg=y
+ do_jpegls=y
+ do_rv10=y
+ do_rv20=y
+ do_mp2=y
+ do_ac3=y
+ do_g726=y
+ do_adpcm_ima_wav=y
+ do_adpcm_ms=y
+ do_flac=y
+ do_rc=y
+ do_mpeg4adv=y
+ do_mpeg4thread=y
+ do_mpeg4nr=y
+ do_mpeg1b=y
+ do_asv1=y
+ do_asv2=y
+ do_flv=y
+ do_ffv1=y
+ do_error=y
+ do_svq1=y
+ do_snow=y
+ do_snowll=y
+ do_adpcm_yam=y
+ do_dv=y
+ do_dv50=y
+fi
+
+
+# various files
+ffmpeg="../ffmpeg_g"
+tiny_psnr="./tiny_psnr"
+reffile="$2"
+benchfile="$datadir/ffmpeg.bench"
+raw_src="$3/%02d.pgm"
+raw_dst="$datadir/out.yuv"
+raw_ref="$datadir/ref.yuv"
+pcm_src="asynth1.sw"
+pcm_dst="$datadir/out.wav"
+pcm_ref="$datadir/ref.wav"
+if [ X"`echo | md5sum 2> /dev/null`" != X ]; then
+ do_md5sum() { md5sum -b $1; }
+elif [ -x /sbin/md5 ]; then
+ do_md5sum() { /sbin/md5 -r $1 | sed 's# \**\./# *./#'; }
+else
+ do_md5sum() { echo No md5sum program found; }
+fi
+
+# create the data directory if it does not exists
+mkdir -p $datadir
+
+FFMPEG_OPTS="-y -flags +bitexact -dct fastint -idct simple"
+
+do_ffmpeg()
+{
+ f="$1"
+ shift
+ echo $ffmpeg $FFMPEG_OPTS $*
+ $ffmpeg $FFMPEG_OPTS -benchmark $* > $datadir/bench.tmp 2> /tmp/ffmpeg$$
+ egrep -v "^(Stream|Press|Input|Output|frame| Stream| Duration|video:)" /tmp/ffmpeg$$ || true
+ rm -f /tmp/ffmpeg$$
+ do_md5sum $f >> $logfile
+ if [ $f = $raw_dst ] ; then
+ $tiny_psnr $f $raw_ref >> $logfile
+ elif [ $f = $pcm_dst ] ; then
+ $tiny_psnr $f $pcm_ref 2 >> $logfile
+ else
+ wc -c $f >> $logfile
+ fi
+ expr "`cat $datadir/bench.tmp`" : '.*utime=\(.*s\)' > $datadir/bench2.tmp
+ echo `cat $datadir/bench2.tmp` $f >> $benchfile
+}
+
+do_ffmpeg_crc()
+{
+ f="$1"
+ shift
+ echo $ffmpeg $FFMPEG_OPTS $* -f crc $datadir/ffmpeg.crc
+ $ffmpeg $FFMPEG_OPTS $* -f crc $datadir/ffmpeg.crc > /tmp/ffmpeg$$ 2>&1
+ egrep -v "^(Stream|Press|Input|Output|frame| Stream| Duration|video:|ffmpeg version| configuration| built)" /tmp/ffmpeg$$ || true
+ rm -f /tmp/ffmpeg$$
+ echo "$f `cat $datadir/ffmpeg.crc`" >> $logfile
+}
+
+do_ffmpeg_nocheck()
+{
+ f="$1"
+ shift
+ echo $ffmpeg $FFMPEG_OPTS $*
+ $ffmpeg $FFMPEG_OPTS -benchmark $* > $datadir/bench.tmp 2> /tmp/ffmpeg$$
+ egrep -v "^(Stream|Press|Input|Output|frame| Stream| Duration|video:)" /tmp/ffmpeg$$ || true
+ rm -f /tmp/ffmpeg$$
+ expr "`cat $datadir/bench.tmp`" : '.*utime=\(.*s\)' > $datadir/bench2.tmp
+ echo `cat $datadir/bench2.tmp` $f >> $benchfile
+}
+
+do_video_decoding()
+{
+ do_ffmpeg $raw_dst -y -i $file -f rawvideo $@ $raw_dst
+}
+
+do_video_encoding()
+{
+ do_ffmpeg $file -y $1 -f pgmyuv -i $raw_src $2 $file
+}
+
+do_audio_encoding()
+{
+ file=${outfile}$1
+ do_ffmpeg $file -y -ab 128 -ac 2 -f s16le -i $pcm_src $3 $file
+}
+
+do_audio_decoding()
+{
+ do_ffmpeg $pcm_dst -y -i $file -f wav $pcm_dst
+}
+
+do_libav()
+{
+ file=${outfile}libav.$1
+ do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -f s16le -i $pcm_src $2 $file
+ do_ffmpeg_crc $file -i $file $3
+
+}
+
+do_streamed_images()
+{
+ file=${outfile}libav.$1
+ do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -f image2pipe $file
+ do_ffmpeg_crc $file -f image2pipe -i $file
+}
+
+do_image_formats()
+{
+ file=${outfile}libav%02d.$1
+ $ffmpeg -t 0.5 -y -qscale 10 -f pgmyuv -i $raw_src $2 $3 $file
+ do_ffmpeg_crc $file $3 -i $file
+
+}
+
+do_audio_only()
+{
+ file=${outfile}libav.$1
+ do_ffmpeg $file -t 1 -y -qscale 10 -f s16le -i $pcm_src $file
+ do_ffmpeg_crc $file -i $file
+}
+
+echo "ffmpeg regression test" > $logfile
+echo "ffmpeg benchmarks" > $benchfile
+
+###################################
+# generate reference for quality check
+do_ffmpeg_nocheck $raw_ref -y -f pgmyuv -i $raw_src -an -f rawvideo $raw_ref
+do_ffmpeg_nocheck $pcm_ref -y -ab 128 -ac 2 -ar 44100 -f s16le -i $pcm_src -f wav $pcm_ref
+
+###################################
+if [ -n "$do_mpeg" ] ; then
+# mpeg1 encoding
+file=${outfile}mpeg1.mpg
+do_video_encoding "-qscale 10" "-f mpeg1video"
+
+# mpeg1 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mpeg2" ] ; then
+# mpeg2 encoding
+file=${outfile}mpeg2.mpg
+do_video_encoding "-qscale 10" "-vcodec mpeg2video -f mpeg1video"
+
+# mpeg2 decoding
+do_video_decoding
+
+# mpeg2 encoding using intra vlc
+file=${outfile}mpeg2ivlc.mpg
+do_video_encoding "-qscale 10" "-vcodec mpeg2video -f mpeg1video -flags2 +ivlc"
+
+# mpeg2 decoding
+do_video_decoding
+
+# mpeg2 encoding
+file=${outfile}mpeg2.mpg
+do_video_encoding "-qscale 10" "-vcodec mpeg2video -idct int -dct int -f mpeg1video"
+
+# mpeg2 decoding
+do_ffmpeg $raw_dst -y -idct int -i $file -f rawvideo $raw_dst
+
+# mpeg2 encoding interlaced
+file=${outfile}mpeg2i.mpg
+do_video_encoding "-qscale 10" "-vcodec mpeg2video -f mpeg1video -flags +ildct+ilme"
+
+# mpeg2 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mpeg2thread" ] ; then
+# mpeg2 encoding interlaced
+file=${outfile}mpeg2thread.mpg
+do_video_encoding "-qscale 10" "-vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 2"
+
+# mpeg2 decoding
+do_video_decoding
+
+# mpeg2 encoding interlaced using intra vlc
+file=${outfile}mpeg2threadivlc.mpg
+do_video_encoding "-qscale 10" "-vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -flags2 +ivlc -threads 2"
+
+# mpeg2 decoding
+do_video_decoding
+
+# mpeg2 encoding interlaced
+file=${outfile}mpeg2reuse.mpg
+do_ffmpeg $file -y -sameq -me_threshold 256 -mb_threshold 1024 -i ${outfile}mpeg2thread.mpg -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 4 $file
+
+# mpeg2 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_msmpeg4v2" ] ; then
+# msmpeg4 encoding
+file=${outfile}msmpeg4v2.avi
+do_video_encoding "-qscale 10" "-an -vcodec msmpeg4v2"
+
+# msmpeg4v2 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_msmpeg4" ] ; then
+# msmpeg4 encoding
+file=${outfile}msmpeg4.avi
+do_video_encoding "-qscale 10" "-an -vcodec msmpeg4"
+
+# msmpeg4 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_wmv1" ] ; then
+# wmv1 encoding
+file=${outfile}wmv1.avi
+do_video_encoding "-qscale 10" "-an -vcodec wmv1"
+
+# wmv1 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_wmv2" ] ; then
+# wmv2 encoding
+file=${outfile}wmv2.avi
+do_video_encoding "-qscale 10" "-an -vcodec wmv2"
+
+# wmv2 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_h261" ] ; then
+# h261 encoding
+file=${outfile}h261.avi
+do_video_encoding "-qscale 11" "-s 352x288 -an -vcodec h261"
+
+# h261 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_h263" ] ; then
+# h263 encoding
+file=${outfile}h263.avi
+do_video_encoding "-qscale 10" "-s 352x288 -an -vcodec h263"
+
+# h263 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_h263p" ] ; then
+# h263p encoding
+file=${outfile}h263p.avi
+do_video_encoding "-qscale 2 -flags +umv+aiv+aic" "-s 352x288 -an -vcodec h263p -ps 300"
+
+# h263p decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mpeg4" ] ; then
+# mpeg4
+file=${outfile}odivx.mp4
+do_video_encoding "-flags +mv4 -mbd bits -qscale 10" "-an -vcodec mpeg4"
+
+# mpeg4 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_huffyuv" ] ; then
+# huffyuv
+file=${outfile}huffyuv.avi
+do_video_encoding "" "-an -vcodec huffyuv -pix_fmt yuv422p"
+
+# huffyuv decoding
+do_video_decoding -strict -2 -pix_fmt yuv420p
+fi
+
+###################################
+if [ -n "$do_rc" ] ; then
+# mpeg4 rate control
+file=${outfile}mpeg4-rc.avi
+do_video_encoding "-b 400k -bf 2" "-an -vcodec mpeg4"
+
+# mpeg4 rate control decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mpeg4adv" ] ; then
+# mpeg4
+file=${outfile}mpeg4-adv.avi
+do_video_encoding "-qscale 9 -flags +mv4+part+aic+trell -mbd bits -ps 200" "-an -vcodec mpeg4"
+
+# mpeg4 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mpeg4thread" ] ; then
+# mpeg4
+file=${outfile}mpeg4-thread.avi
+do_video_encoding "-b 500k -flags +mv4+part+aic+trell -mbd bits -ps 200 -bf 2" "-an -vcodec mpeg4 -threads 2"
+
+# mpeg4 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mpeg4adv" ] ; then
+# mpeg4
+file=${outfile}mpeg4-Q.avi
+do_video_encoding "-qscale 7 -flags +mv4+qpel -mbd 2 -bf 2 -cmp 1 -subcmp 2" "-an -vcodec mpeg4"
+
+# mpeg4 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mp4psp" ] ; then
+# mp4 PSP style
+file=${outfile}mpeg4-PSP.mp4
+do_ffmpeg $file -y -b 768k -s 320x240 -f psp -ar 24000 -ab 32 -i $raw_src $file
+fi
+
+###################################
+if [ -n "$do_error" ] ; then
+# damaged mpeg4
+file=${outfile}error-mpeg4-adv.avi
+do_video_encoding "-qscale 7 -flags +mv4+part+aic -mbd rd -ps 250 -error 10" "-an -vcodec mpeg4"
+
+# damaged mpeg4 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mpeg4nr" ] ; then
+# noise reduction
+file=${outfile}mpeg4-nr.avi
+do_video_encoding "-qscale 8 -flags +mv4 -mbd rd -nr 200" "-an -vcodec mpeg4"
+
+# mpeg4 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mpeg1b" ] ; then
+# mpeg1
+file=${outfile}mpeg1b.mpg
+do_video_encoding "-qscale 8 -bf 3 -ps 200" "-an -vcodec mpeg1video -f mpeg1video"
+
+# mpeg1 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_mjpeg" ] ; then
+# mjpeg
+file=${outfile}mjpeg.avi
+do_video_encoding "-qscale 10" "-an -vcodec mjpeg -pix_fmt yuvj420p"
+
+# mjpeg decoding
+do_video_decoding -pix_fmt yuv420p
+fi
+
+###################################
+if [ -n "$do_ljpeg" ] ; then
+# ljpeg
+file=${outfile}ljpeg.avi
+do_video_encoding "" "-an -vcodec ljpeg -strict -1"
+
+# ljpeg decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_jpegls" ] ; then
+# jpeg ls
+file=${outfile}jpegls.avi
+do_video_encoding "" "-an -vcodec jpegls -vtag MJPG"
+
+# jpeg ls decoding
+do_video_decoding -pix_fmt yuv420p
+fi
+
+###################################
+if [ -n "$do_rv10" ] ; then
+# rv10 encoding
+file=${outfile}rv10.rm
+do_video_encoding "-qscale 10" "-an"
+
+# rv10 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_rv20" ] ; then
+# rv20 encoding
+file=${outfile}rv20.rm
+do_video_encoding "-qscale 10" "-vcodec rv20 -an"
+
+# rv20 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_asv1" ] ; then
+# asv1 encoding
+file=${outfile}asv1.avi
+do_video_encoding "-qscale 10" "-an -vcodec asv1"
+
+# asv1 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_asv2" ] ; then
+# asv2 encoding
+file=${outfile}asv2.avi
+do_video_encoding "-qscale 10" "-an -vcodec asv2"
+
+# asv2 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_flv" ] ; then
+# flv encoding
+file=${outfile}flv.flv
+do_video_encoding "-qscale 10" "-an -vcodec flv"
+
+# flv decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_ffv1" ] ; then
+# ffv1 encoding
+file=${outfile}ffv1.avi
+do_video_encoding "-strict -2" "-an -vcodec ffv1"
+
+# ffv1 decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_snow" ] ; then
+# snow encoding
+file=${outfile}snow.avi
+do_video_encoding "-strict -2" "-an -vcodec snow -qscale 2 -flags +qpel -me iter -dia_size 2 -cmp 12 -subcmp 12 -s 128x64"
+
+# snow decoding
+do_video_decoding -s 352x288
+fi
+
+###################################
+if [ -n "$do_snowll" ] ; then
+# snow encoding
+file=${outfile}snow53.avi
+do_video_encoding "-strict -2" "-an -vcodec snow -qscale .001 -pred 1 -flags +mv4+qpel"
+
+# snow decoding
+do_video_decoding
+fi
+
+###################################
+if [ -n "$do_dv" ] ; then
+# dv encoding
+file=${outfile}dv.dv
+do_video_encoding "-dct int" "-s pal -an"
+
+# dv decoding
+do_video_decoding -s cif
+fi
+
+###################################
+if [ -n "$do_dv50" ] ; then
+# dv50 encoding
+file=${outfile}dv.dv
+do_video_encoding "-dct int" "-s pal -pix_fmt yuv422p -an"
+
+# dv50 decoding
+do_video_decoding -s cif -pix_fmt yuv420p
+fi
+
+
+###################################
+if [ -n "$do_svq1" ] ; then
+# svq1 encoding
+file=${outfile}svq1.mov
+do_video_encoding "" "-an -vcodec svq1 -qscale 3 -pix_fmt yuv410p"
+
+# svq1 decoding
+do_video_decoding -pix_fmt yuv420p
+fi
+
+###################################
+if [ -n "$do_mp2" ] ; then
+# mp2 encoding
+do_audio_encoding mp2.mp2 "-ar 44100"
+
+# mp2 decoding
+do_audio_decoding
+$tiny_psnr $pcm_dst $pcm_ref 2 1924 >> $logfile
+fi
+
+###################################
+if [ -n "$do_ac3" ] ; then
+# ac3 encoding
+do_audio_encoding ac3.rm "" -vn
+
+# ac3 decoding
+#do_audio_decoding
+fi
+
+###################################
+if [ -n "$do_g726" ] ; then
+# g726 encoding
+do_audio_encoding g726.wav "-ar 44100" "-ab 32 -ac 1 -ar 8000 -acodec g726"
+
+# g726 decoding
+do_audio_decoding
+fi
+
+###################################
+if [ -n "$do_adpcm_ima_wav" ] ; then
+# encoding
+do_audio_encoding adpcm_ima.wav "-ar 44100" "-acodec adpcm_ima_wav"
+
+# decoding
+do_audio_decoding
+fi
+
+###################################
+if [ -n "$do_adpcm_ms" ] ; then
+# encoding
+do_audio_encoding adpcm_ms.wav "-ar 44100" "-acodec adpcm_ms"
+
+# decoding
+do_audio_decoding
+fi
+
+###################################
+if [ -n "$do_adpcm_yam" ] ; then
+# encoding
+do_audio_encoding adpcm_yam.wav "-ar 44100" "-acodec adpcm_yamaha"
+
+# decoding
+do_audio_decoding
+fi
+
+###################################
+if [ -n "$do_flac" ] ; then
+# encoding
+do_audio_encoding flac.flac "-ar 44100" "-acodec flac -compression_level 2"
+
+# decoding
+do_audio_decoding
+fi
+
+###################################
+# libav testing
+###################################
+
+if [ -n "$do_libav" ] ; then
+
+# avi
+do_libav avi
+
+# asf
+do_libav asf "-acodec mp2" "-r 25"
+
+# rm
+file=${outfile}libav.rm
+do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -f s16le -i $pcm_src $file
+# broken
+#do_ffmpeg_crc $file -i $file
+
+# mpegps
+do_libav mpg
+
+# mpegts
+do_libav ts
+
+# swf (decode audio only)
+do_libav swf "-acodec mp2"
+
+# ffm
+do_libav ffm
+
+# flv
+do_libav flv -an
+
+# mov
+do_libav mov "-acodec pcm_alaw"
+
+# nut
+#do_libav nut "-acodec mp2"
+
+# dv
+do_libav dv "-ar 48000 -r 25 -s pal -ac 2"
+
+# gxf
+do_libav gxf "-ar 48000 -r 25 -s pal -ac 1"
+
+####################
+# streamed images
+# mjpeg
+#file=${outfile}libav.mjpeg
+#do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src $file
+#do_ffmpeg_crc $file -i $file
+
+# pbmpipe
+do_streamed_images pbm
+
+# pgmpipe
+do_streamed_images pgm
+
+# ppmpipe
+do_streamed_images ppm
+
+# gif
+file=${outfile}libav.gif
+do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -pix_fmt rgb24 $file
+#do_ffmpeg_crc $file -i $file
+
+# yuv4mpeg
+file=${outfile}libav.y4m
+do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src $file
+#do_ffmpeg_crc $file -i $file
+
+####################
+# image formats
+# pgm (we do not do md5 on image files yet)
+do_image_formats pgm
+
+# ppm (we do not do md5 on image files yet)
+do_image_formats ppm
+
+# jpeg (we do not do md5 on image files yet)
+do_image_formats jpg "-flags +bitexact -dct fastint -idct simple -pix_fmt yuvj420p" "-f image2"
+
+####################
+# audio only
+
+# wav
+do_audio_only wav
+
+# alaw
+do_audio_only al
+
+# mulaw
+do_audio_only ul
+
+# au
+do_audio_only au
+
+# mmf
+do_audio_only mmf
+
+# aiff
+do_audio_only aif
+
+# voc
+do_audio_only voc
+
+####################
+# pix_fmt conversions
+conversions="yuv420p yuv422p yuv444p yuv422 yuv410p yuv411p yuvj420p \
+ yuvj422p yuvj444p rgb24 bgr24 rgba32 rgb565 rgb555 gray monow \
+ monob pal8"
+for pix_fmt in $conversions ; do
+ file=${outfile}libav-${pix_fmt}.yuv
+ do_ffmpeg_nocheck $file -r 1 -t 1 -y -f pgmyuv -i $raw_src \
+ -f rawvideo -s 352x288 -pix_fmt $pix_fmt $raw_dst
+ do_ffmpeg $file -f rawvideo -s 352x288 -pix_fmt $pix_fmt -i $raw_dst \
+ -f rawvideo -s 352x288 -pix_fmt yuv444p $file
+done
+
+fi
+
+
+
+if $diff_cmd "$logfile" "$reffile" ; then
+ echo
+ echo Regression test succeeded.
+ exit 0
+else
+ echo
+ echo Regression test: Error.
+ exit 1
+fi
diff --git a/contrib/ffmpeg/tests/rotozoom.c b/contrib/ffmpeg/tests/rotozoom.c
new file mode 100644
index 000000000..5549d6ec2
--- /dev/null
+++ b/contrib/ffmpeg/tests/rotozoom.c
@@ -0,0 +1,289 @@
+/*
+ * Generates a synthetic YUV video sequence suitable for codec testing.
+ * GPLv2
+ * rotozoom.c -> s.bechet@av7.net
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <inttypes.h>
+
+#define FIXP (1<<16)
+#define MY_PI 205887 //(M_PI*FIX)
+
+static int64_t int_pow(int64_t a, int p){
+ int64_t v= FIXP;
+
+ for(; p; p--){
+ v*= a;
+ v/= FIXP;
+ }
+
+ return v;
+}
+
+static int64_t int_sin(int64_t a){
+ if(a<0) a= MY_PI-a; // 0..inf
+ a %= 2*MY_PI; // 0..2PI
+
+ if(a>=MY_PI*3/2) a -= 2*MY_PI; // -PI/2 .. 3PI/2
+ if(a>=MY_PI/2 ) a = MY_PI - a; // -PI/2 .. PI/2
+
+ return a - int_pow(a, 3)/6 + int_pow(a, 5)/120 - int_pow(a, 7)/5040;
+}
+
+#define SCALEBITS 8
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1L<<SCALEBITS) + 0.5))
+typedef unsigned char UINT8;
+
+static void rgb24_to_yuv420p(UINT8 *lum, UINT8 *cb, UINT8 *cr,
+ UINT8 *src, int width, int height)
+{
+ int wrap, wrap3, x, y;
+ int r, g, b, r1, g1, b1;
+ UINT8 *p;
+
+ wrap = width;
+ wrap3 = width * 3;
+ p = src;
+ for(y=0;y<height;y+=2) {
+ for(x=0;x<width;x+=2) {
+ r = p[0];
+ g = p[1];
+ b = p[2];
+ r1 = r;
+ g1 = g;
+ b1 = b;
+ lum[0] = (FIX(0.29900) * r + FIX(0.58700) * g +
+ FIX(0.11400) * b + ONE_HALF) >> SCALEBITS;
+ r = p[3];
+ g = p[4];
+ b = p[5];
+ r1 += r;
+ g1 += g;
+ b1 += b;
+ lum[1] = (FIX(0.29900) * r + FIX(0.58700) * g +
+ FIX(0.11400) * b + ONE_HALF) >> SCALEBITS;
+ p += wrap3;
+ lum += wrap;
+
+ r = p[0];
+ g = p[1];
+ b = p[2];
+ r1 += r;
+ g1 += g;
+ b1 += b;
+ lum[0] = (FIX(0.29900) * r + FIX(0.58700) * g +
+ FIX(0.11400) * b + ONE_HALF) >> SCALEBITS;
+ r = p[3];
+ g = p[4];
+ b = p[5];
+ r1 += r;
+ g1 += g;
+ b1 += b;
+ lum[1] = (FIX(0.29900) * r + FIX(0.58700) * g +
+ FIX(0.11400) * b + ONE_HALF) >> SCALEBITS;
+
+ cb[0] = ((- FIX(0.16874) * r1 - FIX(0.33126) * g1 +
+ FIX(0.50000) * b1 + 4 * ONE_HALF - 1) >> (SCALEBITS + 2)) + 128;
+ cr[0] = ((FIX(0.50000) * r1 - FIX(0.41869) * g1 -
+ FIX(0.08131) * b1 + 4 * ONE_HALF - 1) >> (SCALEBITS + 2)) + 128;
+
+ cb++;
+ cr++;
+ p += -wrap3 + 2 * 3;
+ lum += -wrap + 2;
+ }
+ p += wrap3;
+ lum += wrap;
+ }
+}
+
+/* cif format */
+#define DEFAULT_WIDTH 352
+#define DEFAULT_HEIGHT 288
+#define DEFAULT_NB_PICT 50
+
+void pgmyuv_save(const char *filename, int w, int h,
+ unsigned char *rgb_tab)
+{
+ FILE *f;
+ int i, h2, w2;
+ unsigned char *cb, *cr;
+ unsigned char *lum_tab, *cb_tab, *cr_tab;
+
+ lum_tab = malloc(w * h);
+ cb_tab = malloc((w * h) / 4);
+ cr_tab = malloc((w * h) / 4);
+
+ rgb24_to_yuv420p(lum_tab, cb_tab, cr_tab, rgb_tab, w, h);
+
+ f = fopen(filename,"wb");
+ fprintf(f, "P5\n%d %d\n%d\n", w, (h * 3) / 2, 255);
+ fwrite(lum_tab, 1, w * h, f);
+ h2 = h / 2;
+ w2 = w / 2;
+ cb = cb_tab;
+ cr = cr_tab;
+ for(i=0;i<h2;i++) {
+ fwrite(cb, 1, w2, f);
+ fwrite(cr, 1, w2, f);
+ cb += w2;
+ cr += w2;
+ }
+ fclose(f);
+
+ free(lum_tab);
+ free(cb_tab);
+ free(cr_tab);
+}
+
+unsigned char *rgb_tab;
+int width, height, wrap;
+
+void put_pixel(int x, int y, int r, int g, int b)
+{
+ unsigned char *p;
+
+ if (x < 0 || x >= width ||
+ y < 0 || y >= height)
+ return;
+
+ p = rgb_tab + y * wrap + x * 3;
+ p[0] = r;
+ p[1] = g;
+ p[2] = b;
+}
+
+unsigned char tab_r[256*256];
+unsigned char tab_g[256*256];
+unsigned char tab_b[256*256];
+
+int teta = 0;
+int h_cos [360];
+int h_sin [360];
+
+static int ipol(uint8_t *src, int x, int y){
+ int int_x= x>>16;
+ int int_y= y>>16;
+ int frac_x= x&0xFFFF;
+ int frac_y= y&0xFFFF;
+ int s00= src[ ( int_x &255) + 256*( int_y &255) ];
+ int s01= src[ ((int_x+1)&255) + 256*( int_y &255) ];
+ int s10= src[ ( int_x &255) + 256*((int_y+1)&255) ];
+ int s11= src[ ((int_x+1)&255) + 256*((int_y+1)&255) ];
+ int s0= (((1<<16) - frac_x)*s00 + frac_x*s01)>>8;
+ int s1= (((1<<16) - frac_x)*s10 + frac_x*s11)>>8;
+
+ return (((1<<16) - frac_y)*s0 + frac_y*s1)>>24;
+}
+
+void gen_image(int num, int w, int h)
+{
+ const int c = h_cos [teta];
+ const int s = h_sin [teta];
+
+ const int xi = -(w/2) * c;
+ const int yi = (w/2) * s;
+
+ const int xj = -(h/2) * s;
+ const int yj = -(h/2) * c;
+ int i,j;
+
+ int x,y;
+ int xprime = xj;
+ int yprime = yj;
+
+
+ for (j=0;j<h;j++) {
+
+ x = xprime + xi + FIXP*w/2;
+ xprime += s;
+
+ y = yprime + yi + FIXP*h/2;
+ yprime += c;
+
+ for ( i=0 ; i<w ; i++ ) {
+ x += c;
+ y -= s;
+#if 1
+ put_pixel(i, j, ipol(tab_r, x, y), ipol(tab_g, x, y), ipol(tab_b, x, y));
+#else
+ {
+ unsigned dep;
+ dep = ((x>>16)&255) + (((y>>16)&255)<<8);
+ put_pixel(i, j, tab_r[dep], tab_g[dep], tab_b[dep]);
+ }
+#endif
+ }
+ }
+ teta = (teta+1) % 360;
+}
+
+#define W 256
+#define H 256
+
+void init_demo(const char *filename) {
+ int i,j;
+ int h;
+ int radian;
+ char line[3 * W];
+
+ FILE *fichier;
+
+ fichier = fopen(filename,"rb");
+ if (!fichier) {
+ perror(filename);
+ exit(1);
+ }
+
+ fread(line, 1, 15, fichier);
+ for (i=0;i<H;i++) {
+ fread(line,1,3*W,fichier);
+ for (j=0;j<W;j++) {
+ tab_r[W*i+j] = line[3*j ];
+ tab_g[W*i+j] = line[3*j + 1];
+ tab_b[W*i+j] = line[3*j + 2];
+ }
+ }
+ fclose(fichier);
+
+ /* tables sin/cos */
+ for (i=0;i<360;i++) {
+ radian = 2*i*MY_PI/360;
+ h = 2*FIXP + int_sin (radian);
+ h_cos[i] = ( h * int_sin (radian + MY_PI/2) )/2/FIXP;
+ h_sin[i] = ( h * int_sin (radian ) )/2/FIXP;
+ }
+}
+
+int main(int argc, char **argv)
+{
+ int w, h, i;
+ char buf[1024];
+
+ if (argc != 3) {
+ printf("usage: %s directory/ image.pnm\n"
+ "generate a test video stream\n", argv[0]);
+ exit(1);
+ }
+
+ w = DEFAULT_WIDTH;
+ h = DEFAULT_HEIGHT;
+
+ rgb_tab = malloc(w * h * 3);
+ wrap = w * 3;
+ width = w;
+ height = h;
+
+ init_demo(argv[2]);
+
+ for(i=0;i<DEFAULT_NB_PICT;i++) {
+ snprintf(buf, sizeof(buf), "%s%02d.pgm", argv[1], i);
+ gen_image(i, w, h);
+ pgmyuv_save(buf, w, h, rgb_tab);
+ }
+
+ free(rgb_tab);
+ return 0;
+}
diff --git a/contrib/ffmpeg/tests/rotozoom.regression.ref b/contrib/ffmpeg/tests/rotozoom.regression.ref
new file mode 100644
index 000000000..91dbe27d9
--- /dev/null
+++ b/contrib/ffmpeg/tests/rotozoom.regression.ref
@@ -0,0 +1,182 @@
+ffmpeg regression test
+73ca6f1deab02d1d67a0e8495c026a9e *./data/a-mpeg1.mpg
+192783 ./data/a-mpeg1.mpg
+56147e94b12f08df7213e610e177823d *./data/out.yuv
+stddev: 4.95 PSNR:34.21 bytes:7602176
+2d55ce623a7be4e8136f80266e487678 *./data/a-mpeg2.mpg
+198667 ./data/a-mpeg2.mpg
+b7cae8a1f751b821cddcbe4d5dbc518c *./data/out.yuv
+stddev: 4.96 PSNR:34.19 bytes:7602176
+6dc412b7972918b0c3be5b448c76cdbb *./data/a-mpeg2ivlc.mpg
+197869 ./data/a-mpeg2ivlc.mpg
+b7cae8a1f751b821cddcbe4d5dbc518c *./data/out.yuv
+stddev: 4.96 PSNR:34.19 bytes:7602176
+f979bcca866e6e4cad5dc6cb06e56cfb *./data/a-mpeg2.mpg
+198041 ./data/a-mpeg2.mpg
+f6d9bf24ff8676a7f6076c05cd2c81a3 *./data/out.yuv
+stddev: 4.97 PSNR:34.18 bytes:7602176
+f90197a8b6e62ae25f82625337f27240 *./data/a-mpeg2i.mpg
+204579 ./data/a-mpeg2i.mpg
+ea5057b60146c06d40449cdfc686bf13 *./data/out.yuv
+stddev: 4.98 PSNR:34.17 bytes:7602176
+dc359097986fc04df7541f720fa5da14 *./data/a-mpeg2thread.mpg
+183314 ./data/a-mpeg2thread.mpg
+354eb621a217e50252dbed937f9eab18 *./data/out.yuv
+stddev: 4.76 PSNR:34.57 bytes:7602176
+fb02afb5a0c79c7e99a32e60e19a86bc *./data/a-mpeg2threadivlc.mpg
+182460 ./data/a-mpeg2threadivlc.mpg
+354eb621a217e50252dbed937f9eab18 *./data/out.yuv
+stddev: 4.76 PSNR:34.57 bytes:7602176
+b567ce695c24fb1e08ff1c137a805e1a *./data/a-mpeg2reuse.mpg
+394759 ./data/a-mpeg2reuse.mpg
+c3a86762e97a8869ed3c969ee2e084b8 *./data/out.yuv
+stddev: 4.76 PSNR:34.56 bytes:7602176
+c09815e40a9d260628e1ebad8b2b3774 *./data/a-msmpeg4v2.avi
+129918 ./data/a-msmpeg4v2.avi
+8920194f8bf8f9cdd6c65b3df9e1a292 *./data/out.yuv
+stddev: 5.33 PSNR:33.58 bytes:7602176
+3069f95f2ffca1f20c8ea36e2625fabc *./data/a-msmpeg4.avi
+127680 ./data/a-msmpeg4.avi
+0e1c6e25c71c6a8fa8e506e3d97ca4c9 *./data/out.yuv
+stddev: 5.33 PSNR:33.58 bytes:7602176
+6896c9bdf765953d272c25e34795b934 *./data/a-wmv1.avi
+129548 ./data/a-wmv1.avi
+81eee429b665254d19a06607463c0b5e *./data/out.yuv
+stddev: 5.33 PSNR:33.59 bytes:7602176
+34b40c67036c8e09740f7acfe3d43df6 *./data/a-wmv2.avi
+129864 ./data/a-wmv2.avi
+81eee429b665254d19a06607463c0b5e *./data/out.yuv
+stddev: 5.33 PSNR:33.59 bytes:7602176
+dfd005d4c9030a0dc889c828a6408b9c *./data/a-h261.avi
+191086 ./data/a-h261.avi
+db7ceff174823b98834faa2320ca89ac *./data/out.yuv
+stddev: 6.38 PSNR:32.02 bytes:7602176
+9a368687ab34c48079f11a202839a6bc *./data/a-h263.avi
+160106 ./data/a-h263.avi
+61213b91b359697ebcefb9e0a53ac54a *./data/out.yuv
+stddev: 5.43 PSNR:33.41 bytes:7602176
+c7644d40e9f40bbd98e5a978f9f94bb4 *./data/a-h263p.avi
+868018 ./data/a-h263p.avi
+4b0ee791f280029dc03c528f76f195d4 *./data/out.yuv
+stddev: 1.91 PSNR:42.49 bytes:7602176
+f15f07988bfdb2851f88ebe3036b72d3 *./data/a-odivx.mp4
+119797 ./data/a-odivx.mp4
+90a3577850239083a9042bef33c50e85 *./data/out.yuv
+stddev: 5.34 PSNR:33.56 bytes:7602176
+a1323da0c8b437cd6961f8c90451880b *./data/a-huffyuv.avi
+6455232 ./data/a-huffyuv.avi
+dde5895817ad9d219f79a52d0bdfb001 *./data/out.yuv
+stddev: 0.00 PSNR:99.99 bytes:7602176
+a418db8cc1410a50a154e8418d78e3d7 *./data/a-mpeg4-rc.avi
+227918 ./data/a-mpeg4-rc.avi
+56b327651c526538576fbc104f696dde *./data/out.yuv
+stddev: 4.24 PSNR:35.55 bytes:7602176
+9001cf571eb7f26fa5592bdec6538583 *./data/a-mpeg4-adv.avi
+173590 ./data/a-mpeg4-adv.avi
+699edf05648fdc42196b7bebef9be269 *./data/out.yuv
+stddev: 4.84 PSNR:34.41 bytes:7602176
+5d94ca237bd1ba1a6cdab6796485c6dc *./data/a-mpeg4-thread.avi
+254248 ./data/a-mpeg4-thread.avi
+1cb815783ab9dc787add51cdbc97e156 *./data/out.yuv
+stddev: 4.03 PSNR:36.01 bytes:7602176
+59e682efe457070d21170fdb117c1961 *./data/a-mpeg4-Q.avi
+165840 ./data/a-mpeg4-Q.avi
+f5838ffcf1dd6c538e27dbe906e5eadd *./data/out.yuv
+stddev: 4.00 PSNR:36.08 bytes:7602176
+5b5b88ce610114e346a5df4f46995bb9 *./data/a-mpeg4-PSP.mp4
+325006 ./data/a-mpeg4-PSP.mp4
+90e65096aa9ebafa3fe3f44a5a47cdc4 *./data/a-error-mpeg4-adv.avi
+176588 ./data/a-error-mpeg4-adv.avi
+113defd3f8daf878e0b3fc03fafb4c09 *./data/out.yuv
+stddev: 9.02 PSNR:29.01 bytes:7602176
+c41187c99588fb7229ad330b2f80d28b *./data/a-mpeg4-nr.avi
+155044 ./data/a-mpeg4-nr.avi
+f7fc191308679f709405e62271f5c65f *./data/out.yuv
+stddev: 4.73 PSNR:34.62 bytes:7602176
+1244f602a39b255ca676ca1e47617afd *./data/a-mpeg1b.mpg
+230033 ./data/a-mpeg1b.mpg
+df02ed235141aa229b47b91b28f71da5 *./data/out.yuv
+stddev: 4.13 PSNR:35.79 bytes:7602176
+b179402bba391073b5f5f9324a834061 *./data/a-mjpeg.avi
+703564 ./data/a-mjpeg.avi
+b1aa72cfb6f9cc3f525b27abc86a8f51 *./data/out.yuv
+stddev: 4.38 PSNR:35.28 bytes:7602176
+bc0d8c868c1a05db0ff03f41768f6c5e *./data/a-ljpeg.avi
+4766558 ./data/a-ljpeg.avi
+dde5895817ad9d219f79a52d0bdfb001 *./data/out.yuv
+stddev: 0.00 PSNR:99.99 bytes:7602176
+8a2f3984a27a7513545cf98bc05fd066 *./data/a-jpegls.avi
+8334508 ./data/a-jpegls.avi
+a63d4e3ea1f0b0c0d44821da9e09b8f4 *./data/out.yuv
+stddev: 0.67 PSNR:51.57 bytes:7602176
+989a42671603dc1a7e6b156dccf0e820 *./data/a-rv10.rm
+154330 ./data/a-rv10.rm
+61213b91b359697ebcefb9e0a53ac54a *./data/out.yuv
+stddev: 5.43 PSNR:33.41 bytes:7602176
+1b1cbff8e78602de498b4314cb991e72 *./data/a-rv20.rm
+132754 ./data/a-rv20.rm
+c66afdcc0daac2f1b4167b9811968877 *./data/out.yuv
+stddev: 5.42 PSNR:33.44 bytes:7602176
+4eb34d2de25f67a2706456e999338fe9 *./data/a-asv1.avi
+832512 ./data/a-asv1.avi
+c96ff7fd17c52f99ddb7922a4cb9168f *./data/out.yuv
+stddev: 10.47 PSNR:27.72 bytes:7602176
+9649a4b68fb1107bad13e8a7574cc72d *./data/a-asv2.avi
+789072 ./data/a-asv2.avi
+74a78015b64b2cf8cb9da2e44f508a69 *./data/out.yuv
+stddev: 10.28 PSNR:27.88 bytes:7602176
+bd76377d9e167caff10ebaf381f01a82 *./data/a-flv.flv
+131337 ./data/a-flv.flv
+8999c8264fb0941561f64c4a736e9d88 *./data/out.yuv
+stddev: 5.33 PSNR:33.58 bytes:7602176
+d72b0960e162d4998b9acbabb07e99ab *./data/a-ffv1.avi
+3525804 ./data/a-ffv1.avi
+dde5895817ad9d219f79a52d0bdfb001 *./data/out.yuv
+stddev: 0.00 PSNR:99.99 bytes:7602176
+4b306a67e21771eba4c61b1cf0f56141 *./data/a-snow.avi
+68526 ./data/a-snow.avi
+d2914543504345fad6e5593f66f072bc *./data/out.yuv
+stddev: 10.93 PSNR:27.34 bytes:7602176
+892221ef4c1debf694a481a5e23e1136 *./data/a-snow53.avi
+2722066 ./data/a-snow53.avi
+dde5895817ad9d219f79a52d0bdfb001 *./data/out.yuv
+stddev: 0.00 PSNR:99.99 bytes:7602176
+af9f474238c9c68cb32e389659ee25ab *./data/a-dv.dv
+7200000 ./data/a-dv.dv
+bb69dda7a84a5b166434e28e1243d3d1 *./data/out.yuv
+stddev: 2.99 PSNR:38.59 bytes:7602176
+91dc1c1dc4a8cca72f27d85db6d7636f *./data/a-dv.dv
+14400000 ./data/a-dv.dv
+74b01209bb5e096d570dd4df112bb82d *./data/out.yuv
+stddev: 2.98 PSNR:38.61 bytes:7602176
+7ba9292d663819a9a1d1cdecc6f1b079 *./data/a-svq1.mov
+768407 ./data/a-svq1.mov
+6bc10518bc387c3bdf117997713ee69a *./data/out.yuv
+stddev: 3.44 PSNR:37.39 bytes:7602176
+21f8ff9f1daacd9133683bb4ea0f50a4 *./data/a-mp2.mp2
+95712 ./data/a-mp2.mp2
+83f8df5d5f84480566af548bb037fceb *./data/out.wav
+stddev:9330.70 PSNR:16.92 bytes:1054720
+stddev:4396.13 PSNR:23.46 bytes:1052672
+dd68110cb7e5388392f89d5160d3a825 *./data/a-ac3.rm
+98203 ./data/a-ac3.rm
+9e6e66847a568ef4f1f229b0939d2aae *./data/a-g726.wav
+24268 ./data/a-g726.wav
+a719ab6d47d8d601520edb13bf6136b4 *./data/out.wav
+stddev:8459.88 PSNR:17.77 bytes:96256
+ea2efb8ba20695a35ab0d71a7ee86f22 *./data/a-adpcm_ima.wav
+266288 ./data/a-adpcm_ima.wav
+60178d48204f5662d91776e36eddc82e *./data/out.wav
+stddev:11441.89 PSNR:15.15 bytes:1054720
+d2eee867856d2bdb6d08e936d4ceec0c *./data/a-adpcm_ms.wav
+267308 ./data/a-adpcm_ms.wav
+91a84bb4f319a3a0bf0c0441b3d3a529 *./data/out.wav
+stddev:1050.18 PSNR:35.89 bytes:1054720
+48ae9fcb043a44e316998b85043b61bc *./data/a-adpcm_yam.wav
+264236 ./data/a-adpcm_yam.wav
+e92cec8c07913ffb91ad2b11f79cdc00 *./data/out.wav
+stddev:18312.68 PSNR:11.06 bytes:1056768
+c3382f03ce2efb5d475240d288a33898 *./data/a-flac.flac
+353368 ./data/a-flac.flac
+c4228df189aad9567a037727d0e763e4 *./data/out.wav
+stddev: 33.31 PSNR:65.87 bytes:1040384
diff --git a/contrib/ffmpeg/tests/server-regression.sh b/contrib/ffmpeg/tests/server-regression.sh
new file mode 100755
index 000000000..fb28fbae8
--- /dev/null
+++ b/contrib/ffmpeg/tests/server-regression.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+# Even in the 21st century some diffs are not supporting -u.
+diff -u $0 $0 > /dev/null 2>&1
+if [ $? -eq 0 ]; then
+ diff_cmd="diff -u"
+else
+ diff_cmd="diff"
+fi
+
+# Make sure that the data directory exists
+mkdir -p data
+
+cp "$2" data/test.conf
+#perl -e 'chomp($wd = `pwd`); print map { s!data/!!; "<Stream $_>\nFile $wd/data/$_\n</Stream>\n\n" } @ARGV' data/a* >> data/test.conf
+#perl -e 'chomp($wd = `pwd`); print map { s!data/!!; "<Stream $_.asf>\nFile $wd/data/$_\n</Stream>\n\n" } @ARGV' data/a* >> data/test.conf
+
+FILES=`sed -n 's/^[^#]*<Stream \(.*\)>.*/\1/p' data/test.conf | grep -v html`
+
+rm -f /tmp/feed.ffm
+../ffserver -d -f data/test.conf 2> /dev/null &
+FFSERVER_PID=$!
+echo "Waiting for feeds to startup..."
+sleep 2
+(
+ cd data || exit $?
+ rm -f ff-*;
+ WGET_OPTIONS="--user-agent=NSPlayer -q --proxy=off -e verbose=off -e server_response=off"
+ for file in $FILES; do
+ if [ `expr $file : "a-*"` != 0 ]; then
+ wget $WGET_OPTIONS --output-document=- http://localhost:9999/$file > ff-$file &
+ else
+ wget $WGET_OPTIONS --output-document=- http://localhost:9999/$file?date=19700101T000000Z | dd bs=1 count=100000 > ff-$file 2>/dev/null &
+ fi
+ MDFILES="$MDFILES ff-$file"
+ done
+ wait
+ # the status page is always different
+ md5sum $MDFILES > ffserver.regression
+)
+kill $FFSERVER_PID
+wait > /dev/null 2>&1
+if $diff_cmd data/ffserver.regression "$1" ; then
+ echo
+ echo Server regression test succeeded.
+ exit 0
+else
+ echo
+ echo Server regression test: Error.
+ exit 1
+fi
diff --git a/contrib/ffmpeg/tests/test.conf b/contrib/ffmpeg/tests/test.conf
new file mode 100644
index 000000000..cc2038a60
--- /dev/null
+++ b/contrib/ffmpeg/tests/test.conf
@@ -0,0 +1,306 @@
+#
+# This is a test configuration file. You can invoke it with
+# ../ffserver -f test.conf
+# when in the tests directory and once the vsynth1 subdirectory
+# has been populated. Then point your browser at http://whatever:9999/teststat.html
+# and you can look at the streams
+#
+
+#
+# Port on which the server is listening. You must select a different
+# port from your standard http web server if it is running on the same
+# computer.
+
+Port 9999
+RTSPPort 9990
+
+# Address on which the server is bound. Only useful if you have
+# several network interfaces.
+
+BindAddress 0.0.0.0
+
+# Number of simultaneous requests that can be handled. Since FFServer
+# is very fast, this limit is determined mainly by your Internet
+# connection speed.
+
+MaxClients 1000
+
+MaxBandwidth 100000
+
+# Access Log file (uses standard Apache log file format)
+# '-' is the standard output
+
+CustomLog -
+
+##################################################################
+# Definition of the live feeds. Each live feed contains one video
+# and/or audio sequence coming from an ffmpeg encoder or another
+# ffserver. This sequence may be encoded simultaneously with several
+# codecs at several resolutions.
+
+<Feed feed1.ffm>
+
+# You must use 'ffmpeg' to send a live feed to ffserver. In this
+# example, you can type:
+#
+# ffmpeg http://localhost:8090/feed1.ffm
+
+# ffserver can also do time shifting. It means that it can stream any
+# previously recorded live stream. The request should contain:
+# "http://xxxx?date=[YYYY-MM-DDT][[HH:]MM:]SS[.m...]".You must specify
+# a path where the feed is stored on disk. You also specify the
+# maximum size of the feed (100M bytes here). Default:
+# File=/tmp/feed_name.ffm FileMaxSize=5M
+
+File /tmp/feed.ffm
+FileMaxSize 100M
+
+# Fire up ffmpeg pointing at this stream
+
+Launch ../ffmpeg -loop_input -flags +bitexact -dct fastint -idct simple -y -f pgmyuv -i vsynth1/%02d.pgm
+acl allow localhost
+</Feed>
+
+##################################################################
+# Now you can define each stream which will be generated from the
+# original audio and video stream. Each format has a filename (here
+# 'test128.mpg'). FFServer will send this stream when answering a
+# request containing this filename.
+
+<Stream test_h.avi>
+Feed feed1.ffm
+Format avi
+#
+BitExact
+DctFastint
+IdctSimple
+VideoFrameRate 10
+VideoSize 352x288
+VideoBitRate 100
+VideoGopSize 30
+NoAudio
+
+PreRoll 10
+StartSendOnKey
+MaxTime 100
+
+</Stream>
+
+<Stream test_l.avi>
+Feed feed1.ffm
+Format avi
+#
+BitExact
+DctFastint
+IdctSimple
+VideoFrameRate 2
+VideoSize 320x240
+VideoBitRate 40
+VideoGopSize 20
+NoAudio
+
+PreRoll 20
+StartSendOnKey
+MaxTime 100
+
+</Stream>
+
+#<Stream test_h.mpg>
+#Feed feed1.ffm
+#
+#VideoFrameRate 10
+#VideoSize 352x288
+#VideoBitRate 100
+#VideoGopSize 30
+#NoAudio
+
+#PreRoll 10
+#StartSendOnKey
+#MaxTime 100
+#
+#</Stream>
+#
+#<Stream test_l.mpg>
+#Feed feed1.ffm
+##
+#VideoFrameRate 2
+#VideoSize 320x240
+#VideoBitRate 40
+#VideoGopSize 20
+#NoAudio
+#
+#PreRoll 20
+#StartSendOnKey
+#MaxTime 100
+#
+#</Stream>
+#
+<Stream test.swf>
+Feed feed1.ffm
+#
+BitExact
+DctFastint
+IdctSimple
+Qscale 10
+VideoFrameRate 10
+VideoSize 352x288
+VideoBitRate 100
+VideoGopSize 30
+NoAudio
+
+PreRoll 10
+StartSendOnKey
+MaxTime 100
+
+</Stream>
+
+<Stream test_h.asf>
+Feed feed1.ffm
+Format asf
+#
+BitExact
+DctFastint
+IdctSimple
+Qscale 10
+VideoFrameRate 10
+VideoSize 320x240
+VideoBitRate 100
+VideoGopSize 30
+NoAudio
+
+PreRoll 10
+StartSendOnKey
+MaxTime 100
+
+Title "Test data stream"
+
+</Stream>
+
+<Stream test_l.asf>
+Feed feed1.ffm
+Format asf
+#
+BitExact
+DctFastint
+IdctSimple
+Qscale 10
+VideoFrameRate 2
+VideoSize 320x240
+VideoBitRate 40
+VideoGopSize 20
+NoAudio
+
+PreRoll 20
+StartSendOnKey
+MaxTime 100
+
+Title "Test data stream"
+
+</Stream>
+
+<Stream test_h.rm>
+
+Feed feed1.ffm
+Format rm
+
+BitExact
+DctFastint
+IdctSimple
+Qscale 10
+VideoBitRate 100
+VideoFrameRate 10
+VideoGopSize 30
+VideoSize 320x240
+NoAudio
+
+PreRoll 10
+StartSendOnKey
+MaxTime 100
+
+</Stream>
+
+<Stream test_l.rm>
+
+Feed feed1.ffm
+Format rm
+
+BitExact
+DctFastint
+IdctSimple
+Qscale 10
+VideoBitRate 40
+VideoFrameRate 2
+VideoGopSize 20
+VideoSize 320x240
+NoAudio
+
+PreRoll 20
+StartSendOnKey
+MaxTime 100
+
+</Stream>
+
+
+<Stream test.jpg>
+
+Feed feed1.ffm
+Format jpeg
+Strict -1
+
+BitExact
+DctFastint
+IdctSimple
+VideoFrameRate 1
+VideoSize 352x288
+NoAudio
+
+PreRoll 2
+
+</Stream>
+
+<Stream test_small.jpg>
+
+Feed feed1.ffm
+Format jpeg
+Strict -1
+
+BitExact
+DctFastint
+IdctSimple
+VideoFrameRate 1
+VideoSize 160x128
+NoAudio
+
+PreRoll 2
+
+</Stream>
+
+<Stream test.mjpg>
+
+Feed feed1.ffm
+Format mpjpeg
+Strict -1
+
+BitExact
+DctFastint
+IdctSimple
+VideoFrameRate 1
+VideoSize 320x240
+NoAudio
+StartSendOnKey
+
+PreRoll 1
+MaxTime 100
+
+</Stream>
+
+
+##################################################################
+# Special stream : server status
+
+<Stream teststat.html>
+
+Format status
+
+</Stream>
+
diff --git a/contrib/ffmpeg/tests/tiny_psnr.c b/contrib/ffmpeg/tests/tiny_psnr.c
new file mode 100644
index 000000000..d8bce2b7b
--- /dev/null
+++ b/contrib/ffmpeg/tests/tiny_psnr.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#define F 100
+#define SIZE 2048
+
+uint64_t exp16_table[21]={
+ 65537,
+ 65538,
+ 65540,
+ 65544,
+ 65552,
+ 65568,
+ 65600,
+ 65664,
+ 65793,
+ 66050,
+ 66568,
+ 67616,
+ 69763,
+ 74262,
+ 84150,
+ 108051,
+ 178145,
+ 484249,
+ 3578144,
+ 195360063,
+ 582360139072LL,
+};
+#if 1
+// 16.16 fixpoint exp()
+static unsigned int exp16(unsigned int a){
+ int i;
+ int out= 1<<16;
+
+ for(i=19;i>=0;i--){
+ if(a&(1<<i))
+ out= (out*exp16_table[i] + (1<<15))>>16;
+ }
+
+ return out;
+}
+// 16.16 fixpoint log()
+static int64_t log16(uint64_t a){
+ int i;
+ int out=0;
+
+ if(a < 1<<16)
+ return -log16((1LL<<32) / a);
+ a<<=16;
+
+ for(i=20;i>=0;i--){
+ int64_t b= exp16_table[i];
+ if(a<(b<<16)) continue;
+ out |= 1<<i;
+ a = ((a/b)<<16) + (((a%b)<<16) + b/2)/b;
+ }
+ return out;
+}
+
+#endif
+static uint64_t int_sqrt(uint64_t a)
+{
+ uint64_t ret=0;
+ int s;
+ uint64_t ret_sq=0;
+
+ for(s=31; s>=0; s--){
+ uint64_t b= ret_sq + (1ULL<<(s*2)) + (ret<<s)*2;
+ if(b<=a){
+ ret_sq=b;
+ ret+= 1ULL<<s;
+ }
+ }
+ return ret;
+}
+
+int main(int argc,char* argv[]){
+ int i, j;
+ uint64_t sse=0;
+ uint64_t dev;
+ FILE *f[2];
+ uint8_t buf[2][SIZE];
+ uint64_t psnr;
+ int len= argc<4 ? 1 : atoi(argv[3]);
+ int64_t max= (1<<(8*len))-1;
+ int shift= argc<5 ? 0 : atoi(argv[4]);
+ int skip_bytes = argc<6 ? 0 : atoi(argv[5]);
+
+ if(argc<3){
+ printf("tiny_psnr <file1> <file2> [<elem size> [<shift> [<skip bytes>]]]\n");
+ printf("for wav files use the following:\n");
+ printf("./tiny_psnr file1.wav file2.wav 2 0 44 to skip the header.\n");
+ return -1;
+ }
+
+ f[0]= fopen(argv[1], "rb");
+ f[1]= fopen(argv[2], "rb");
+ fseek(f[shift<0], shift < 0 ? -shift : shift, SEEK_SET);
+
+ fseek(f[0],skip_bytes,SEEK_CUR);
+ fseek(f[1],skip_bytes,SEEK_CUR);
+
+ for(i=0;;){
+ if( fread(buf[0], SIZE, 1, f[0]) != 1) break;
+ if( fread(buf[1], SIZE, 1, f[1]) != 1) break;
+
+ for(j=0; j<SIZE; i++,j++){
+ int64_t a= buf[0][j];
+ int64_t b= buf[1][j];
+ if(len==2){
+ a= (int16_t)(a | (buf[0][++j]<<8));
+ b= (int16_t)(b | (buf[1][ j]<<8));
+ }
+ sse += (a-b) * (a-b);
+ }
+ }
+
+ if(!i) i=1;
+ dev= int_sqrt( ((sse/i)*F*F) + (((sse%i)*F*F) + i/2)/i );
+ if(sse)
+ psnr= ((2*log16(max<<16) + log16(i) - log16(sse))*284619LL*F + (1<<31)) / (1LL<<32);
+ else
+ psnr= 100*F-1; //floating point free infinity :)
+
+ printf("stddev:%3d.%02d PSNR:%2d.%02d bytes:%d\n",
+ (int)(dev/F), (int)(dev%F),
+ (int)(psnr/F), (int)(psnr%F),
+ i*len);
+ return 0;
+}
+
+
diff --git a/contrib/ffmpeg/tests/videogen.c b/contrib/ffmpeg/tests/videogen.c
new file mode 100644
index 000000000..a6b8ce167
--- /dev/null
+++ b/contrib/ffmpeg/tests/videogen.c
@@ -0,0 +1,278 @@
+/*
+ * Generates a synthetic YUV video sequence suitable for codec testing.
+ * NOTE: no floats are used to guaranty a bit exact output.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+
+#define SCALEBITS 8
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1L<<SCALEBITS) + 0.5))
+typedef unsigned char uint8_t;
+
+static void rgb24_to_yuv420p(uint8_t *lum, uint8_t *cb, uint8_t *cr,
+ uint8_t *src, int width, int height)
+{
+ int wrap, wrap3, x, y;
+ int r, g, b, r1, g1, b1;
+ uint8_t *p;
+
+ wrap = width;
+ wrap3 = width * 3;
+ p = src;
+ for(y=0;y<height;y+=2) {
+ for(x=0;x<width;x+=2) {
+ r = p[0];
+ g = p[1];
+ b = p[2];
+ r1 = r;
+ g1 = g;
+ b1 = b;
+ lum[0] = (FIX(0.29900) * r + FIX(0.58700) * g +
+ FIX(0.11400) * b + ONE_HALF) >> SCALEBITS;
+ r = p[3];
+ g = p[4];
+ b = p[5];
+ r1 += r;
+ g1 += g;
+ b1 += b;
+ lum[1] = (FIX(0.29900) * r + FIX(0.58700) * g +
+ FIX(0.11400) * b + ONE_HALF) >> SCALEBITS;
+ p += wrap3;
+ lum += wrap;
+
+ r = p[0];
+ g = p[1];
+ b = p[2];
+ r1 += r;
+ g1 += g;
+ b1 += b;
+ lum[0] = (FIX(0.29900) * r + FIX(0.58700) * g +
+ FIX(0.11400) * b + ONE_HALF) >> SCALEBITS;
+ r = p[3];
+ g = p[4];
+ b = p[5];
+ r1 += r;
+ g1 += g;
+ b1 += b;
+ lum[1] = (FIX(0.29900) * r + FIX(0.58700) * g +
+ FIX(0.11400) * b + ONE_HALF) >> SCALEBITS;
+
+ cb[0] = ((- FIX(0.16874) * r1 - FIX(0.33126) * g1 +
+ FIX(0.50000) * b1 + 4 * ONE_HALF - 1) >> (SCALEBITS + 2)) + 128;
+ cr[0] = ((FIX(0.50000) * r1 - FIX(0.41869) * g1 -
+ FIX(0.08131) * b1 + 4 * ONE_HALF - 1) >> (SCALEBITS + 2)) + 128;
+
+ cb++;
+ cr++;
+ p += -wrap3 + 2 * 3;
+ lum += -wrap + 2;
+ }
+ p += wrap3;
+ lum += wrap;
+ }
+}
+
+/* cif format */
+#define DEFAULT_WIDTH 352
+#define DEFAULT_HEIGHT 288
+#define DEFAULT_NB_PICT 50 /* 2 seconds */
+
+void pgmyuv_save(const char *filename, int w, int h,
+ unsigned char *rgb_tab)
+{
+ FILE *f;
+ int i, h2, w2;
+ unsigned char *cb, *cr;
+ unsigned char *lum_tab, *cb_tab, *cr_tab;
+
+ lum_tab = malloc(w * h);
+ cb_tab = malloc((w * h) / 4);
+ cr_tab = malloc((w * h) / 4);
+
+ rgb24_to_yuv420p(lum_tab, cb_tab, cr_tab, rgb_tab, w, h);
+
+ f = fopen(filename,"wb");
+ fprintf(f, "P5\n%d %d\n%d\n", w, (h * 3) / 2, 255);
+ fwrite(lum_tab, 1, w * h, f);
+ h2 = h / 2;
+ w2 = w / 2;
+ cb = cb_tab;
+ cr = cr_tab;
+ for(i=0;i<h2;i++) {
+ fwrite(cb, 1, w2, f);
+ fwrite(cr, 1, w2, f);
+ cb += w2;
+ cr += w2;
+ }
+ fclose(f);
+
+ free(lum_tab);
+ free(cb_tab);
+ free(cr_tab);
+}
+
+unsigned char *rgb_tab;
+int width, height, wrap;
+
+void put_pixel(int x, int y, int r, int g, int b)
+{
+ unsigned char *p;
+
+ if (x < 0 || x >= width ||
+ y < 0 || y >= height)
+ return;
+
+ p = rgb_tab + y * wrap + x * 3;
+ p[0] = r;
+ p[1] = g;
+ p[2] = b;
+}
+
+static unsigned int myrnd(unsigned int *seed_ptr, int n)
+{
+ unsigned int seed, val;
+
+ seed = *seed_ptr;
+ seed = (seed * 314159) + 1;
+ if (n == 256) {
+ val = seed >> 24;
+ } else {
+ val = seed % n;
+ }
+ *seed_ptr = seed;
+ return val;
+}
+
+#define NOISE_X 10
+#define NOISE_Y 30
+#define NOISE_W 26
+
+#define FRAC_BITS 8
+#define FRAC_ONE (1 << FRAC_BITS)
+
+/* cosine approximate with 1-x^2 */
+int int_cos(int a)
+{
+ int v, neg;
+ a = a & (FRAC_ONE - 1);
+ if (a >= (FRAC_ONE / 2))
+ a = FRAC_ONE - a;
+ neg = 0;
+ if (a > (FRAC_ONE / 4)) {
+ neg = -1;
+ a = (FRAC_ONE / 2) - a;
+ }
+ v = FRAC_ONE - ((a * a) >> 4);
+ v = (v ^ neg) - neg;
+ return v;
+}
+
+#define NB_OBJS 10
+
+typedef struct VObj {
+ int x, y, w, h;
+ int r, g, b;
+} VObj;
+
+VObj objs[NB_OBJS];
+
+unsigned int seed = 1;
+
+void gen_image(int num, int w, int h)
+{
+ int r, g, b, x, y, i, dx, dy, x1, y1;
+ unsigned int seed1;
+
+ if (num == 0) {
+ for(i=0;i<NB_OBJS;i++) {
+ objs[i].x = myrnd(&seed, w);
+ objs[i].y = myrnd(&seed, h);
+ objs[i].w = myrnd(&seed, w / 4) + 10;
+ objs[i].h = myrnd(&seed, h / 4) + 10;
+ objs[i].r = myrnd(&seed, 256);
+ objs[i].g = myrnd(&seed, 256);
+ objs[i].b = myrnd(&seed, 256);
+ }
+ }
+
+ /* first a moving background with gradients */
+ /* test motion estimation */
+ dx = int_cos(num * FRAC_ONE / 50) * 35;
+ dy = int_cos(num * FRAC_ONE / 50 + FRAC_ONE / 10) * 30;
+ for(y=0;y<h;y++) {
+ for(x=0;x<w;x++) {
+ x1 = (x << FRAC_BITS) + dx;
+ y1 = (y << FRAC_BITS) + dx;
+ r = ((y1 * 7) >> FRAC_BITS) & 0xff;
+ g = (((x1 + y1) * 9) >> FRAC_BITS) & 0xff;
+ b = ((x1 * 5) >> FRAC_BITS) & 0xff;
+ put_pixel(x, y, r, g, b);
+ }
+ }
+
+ /* then some noise with very high intensity to test saturation */
+ seed1 = num;
+ for(y=0;y<NOISE_W;y++) {
+ for(x=0;x<NOISE_W;x++) {
+ r = myrnd(&seed1, 256);
+ g = myrnd(&seed1, 256);
+ b = myrnd(&seed1, 256);
+ put_pixel(x + NOISE_X, y + NOISE_Y, r, g, b);
+ }
+ }
+
+ /* then moving objects */
+ for(i=0;i<NB_OBJS;i++) {
+ VObj *p = &objs[i];
+ seed1 = i;
+ for(y=0;y<p->h;y++) {
+ for(x=0;x<p->w;x++) {
+ r = p->r;
+ g = p->g;
+ b = p->b;
+ /* add a per object noise */
+ r += myrnd(&seed1, 50);
+ g += myrnd(&seed1, 50);
+ b += myrnd(&seed1, 50);
+ put_pixel(x + p->x, y + p->y, r, g, b);
+ }
+ }
+ p->x += myrnd(&seed, 21) - 10;
+ p->y += myrnd(&seed, 21) - 10;
+ }
+}
+
+int main(int argc, char **argv)
+{
+ int w, h, i;
+ char buf[1024];
+
+ if (argc != 2) {
+ printf("usage: %s file\n"
+ "generate a test video stream\n", argv[0]);
+ exit(1);
+ }
+
+#if 0
+ for(i=0;i<256;i++)
+ printf("cos(%d)=%d\n", i, int_cos(i));
+#endif
+
+ w = DEFAULT_WIDTH;
+ h = DEFAULT_HEIGHT;
+
+ rgb_tab = malloc(w * h * 3);
+ wrap = w * 3;
+ width = w;
+ height = h;
+
+ for(i=0;i<DEFAULT_NB_PICT;i++) {
+ snprintf(buf, sizeof(buf), "%s%02d.pgm", argv[1], i);
+ gen_image(i, w, h);
+ pgmyuv_save(buf, w, h, rgb_tab);
+ }
+
+ free(rgb_tab);
+ return 0;
+}
diff --git a/contrib/ffmpeg/unwrap-diff b/contrib/ffmpeg/unwrap-diff
new file mode 100755
index 000000000..ccea99b7b
--- /dev/null
+++ b/contrib/ffmpeg/unwrap-diff
@@ -0,0 +1,2 @@
+#!/bin/sh
+tr '\n' '\001' | sed 's/\x01\x01/\x01 \x01/g' | sed 's/\x01\([^-+ @]\)/ \1/g' | tr '\001' '\n'
diff --git a/contrib/ffmpeg/version.sh b/contrib/ffmpeg/version.sh
new file mode 100755
index 000000000..c04822f46
--- /dev/null
+++ b/contrib/ffmpeg/version.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+svn_revision=`cd "$1" && LC_ALL=C svn info 2> /dev/null | grep Revision | cut -d' ' -f2`
+test $svn_revision || svn_revision=`cd "$1" && grep revision .svn/entries | \
+ cut -d '"' -f2 2> /dev/null`
+test $svn_revision || svn_revision=UNKNOWN
+
+NEW_REVISION="#define FFMPEG_VERSION \"SVN-r$svn_revision\""
+OLD_REVISION=`cat version.h 2> /dev/null`
+
+# Update version.h only on revision changes to avoid spurious rebuilds
+if test "$NEW_REVISION" != "$OLD_REVISION"; then
+ echo "$NEW_REVISION" > version.h
+fi
diff --git a/contrib/ffmpeg/vhook/Makefile b/contrib/ffmpeg/vhook/Makefile
new file mode 100644
index 000000000..06b48935e
--- /dev/null
+++ b/contrib/ffmpeg/vhook/Makefile
@@ -0,0 +1,51 @@
+include ../config.mak
+
+VPATH=$(SRC_PATH_BARE)/vhook
+
+CFLAGS=-I$(BUILD_ROOT) -I$(SRC_PATH) -I$(SRC_PATH)/libavutil -I$(SRC_PATH)/libavcodec \
+ -I$(SRC_PATH)/libavformat -I$(SRC_PATH)/libswscale $(VHOOKCFLAGS) -DHAVE_AV_CONFIG_H
+LDFLAGS+= -g
+
+HOOKS=null$(SLIBSUF) fish$(SLIBSUF) ppm$(SLIBSUF) watermark$(SLIBSUF)
+ALLHOOKS=$(HOOKS) imlib2$(SLIBSUF) drawtext$(SLIBSUF)
+
+ifeq ($(HAVE_IMLIB2),yes)
+ HOOKS += imlib2$(SLIBSUF)
+ LIBS_imlib2$(SLIBSUF) = -lImlib2
+endif
+
+ifeq ($(HAVE_FREETYPE2),yes)
+ HOOKS += drawtext$(SLIBSUF)
+ CFLAGS += `freetype-config --cflags`
+ LIBS_drawtext$(SLIBSUF) = `freetype-config --libs`
+endif
+
+SRCS := $(HOOKS:$(SLIBSUF)=.c)
+
+all: $(HOOKS)
+
+depend dep: $(SRCS)
+ $(CC) -MM $(CFLAGS) $^ 1>.depend
+
+install: $(HOOKS)
+ install -d "$(shlibdir)/vhook"
+ install -m 755 $(HOOKS) "$(shlibdir)/vhook"
+
+uninstall:
+ rm -f $(addprefix $(shlibdir)/vhook/,$(ALLHOOKS))
+ -rmdir "$(shlibdir)/vhook/"
+
+%$(SLIBSUF): %.o
+ $(CC) $(LDFLAGS) -o $@ $(VHOOKSHFLAGS) $< $(VHOOKLIBS) $(LIBS_$@)
+
+clean:
+ rm -f *.o *.d *~ *.a *.lib *.so *.dylib *.dll
+
+distclean: clean
+ rm -f .depend
+
+.PHONY: all depend dep clean distclean install* uninstall*
+
+ifneq ($(wildcard .depend),)
+include .depend
+endif
diff --git a/contrib/ffmpeg/vhook/drawtext.c b/contrib/ffmpeg/vhook/drawtext.c
new file mode 100644
index 000000000..081847620
--- /dev/null
+++ b/contrib/ffmpeg/vhook/drawtext.c
@@ -0,0 +1,531 @@
+/*
+ * drawtext.c: print text over the screen
+ ******************************************************************************
+ * Options:
+ * -f <filename> font filename (MANDATORY!!!)
+ * -s <pixel_size> font size in pixels [default 16]
+ * -b print background
+ * -o outline glyphs (use the bg color)
+ * -x <pos> x position ( >= 0) [default 0]
+ * -y <pos> y position ( >= 0) [default 0]
+ * -t <text> text to print (will be passed to strftime())
+ * MANDATORY: will be used even when -T is used.
+ * in this case, -t will be used if some error
+ * occurs
+ * -T <filename> file with the text (re-read every frame)
+ * -c <#RRGGBB> foreground color ('internet' way) [default #ffffff]
+ * -C <#RRGGBB> background color ('internet' way) [default #000000]
+ *
+ ******************************************************************************
+ * Features:
+ * - True Type, Type1 and others via FreeType2 library
+ * - Font kerning (better output)
+ * - Line Wrap (if the text doesn't fit, the next char go to the next line)
+ * - Background box
+ * - Outline
+ ******************************************************************************
+ * Author: Gustavo Sverzut Barbieri <gsbarbieri@yahoo.com.br>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define MAXSIZE_TEXT 1024
+
+#include "framehook.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#undef time
+#include <sys/time.h>
+#include <time.h>
+
+#include <ft2build.h>
+#include FT_FREETYPE_H
+#include FT_GLYPH_H
+
+#define SCALEBITS 10
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define RGB_TO_YUV(rgb_color, yuv_color) do { \
+ yuv_color[0] = (FIX(0.29900) * rgb_color[0] + FIX(0.58700) * rgb_color[1] + FIX(0.11400) * rgb_color[2] + ONE_HALF) >> SCALEBITS; \
+ yuv_color[2] = ((FIX(0.50000) * rgb_color[0] - FIX(0.41869) * rgb_color[1] - FIX(0.08131) * rgb_color[2] + ONE_HALF - 1) >> SCALEBITS) + 128; \
+ yuv_color[1] = ((- FIX(0.16874) * rgb_color[0] - FIX(0.33126) * rgb_color[1] + FIX(0.50000) * rgb_color[2] + ONE_HALF - 1) >> SCALEBITS) + 128; \
+} while (0)
+
+#define COPY_3(dst,src) { \
+ dst[0]=src[0]; \
+ dst[1]=src[1]; \
+ dst[2]=src[2]; \
+}
+
+
+
+#define SET_PIXEL(picture, yuv_color, x, y) { \
+ picture->data[0][ (x) + (y)*picture->linesize[0] ] = yuv_color[0]; \
+ picture->data[1][ ((x/2) + (y/2)*picture->linesize[1]) ] = yuv_color[1]; \
+ picture->data[2][ ((x/2) + (y/2)*picture->linesize[2]) ] = yuv_color[2]; \
+}
+
+#define GET_PIXEL(picture, yuv_color, x, y) { \
+ yuv_color[0] = picture->data[0][ (x) + (y)*picture->linesize[0] ]; \
+ yuv_color[1] = picture->data[1][ (x/2) + (y/2)*picture->linesize[1] ]; \
+ yuv_color[2] = picture->data[2][ (x/2) + (y/2)*picture->linesize[2] ]; \
+}
+
+
+typedef struct {
+ unsigned char *text;
+ char *file;
+ unsigned int x;
+ unsigned int y;
+ int bg;
+ int outline;
+ unsigned char bgcolor[3]; /* YUV */
+ unsigned char fgcolor[3]; /* YUV */
+ FT_Library library;
+ FT_Face face;
+ FT_Glyph glyphs[ 255 ];
+ FT_Bitmap bitmaps[ 255 ];
+ int advance[ 255 ];
+ int bitmap_left[ 255 ];
+ int bitmap_top[ 255 ];
+ unsigned int glyphs_index[ 255 ];
+ int text_height;
+ int baseline;
+ int use_kerning;
+} ContextInfo;
+
+
+void Release(void *ctx)
+{
+ if (ctx)
+ av_free(ctx);
+}
+
+
+static int ParseColor(char *text, unsigned char yuv_color[3])
+{
+ char tmp[3];
+ unsigned char rgb_color[3];
+ int i;
+
+ tmp[2] = '\0';
+
+ if ((!text) || (strlen(text) != 7) || (text[0] != '#') )
+ return -1;
+
+ for (i=0; i < 3; i++)
+ {
+ tmp[0] = text[i*2+1];
+ tmp[1] = text[i*2+2];
+
+ rgb_color[i] = strtol(tmp, NULL, 16);
+ }
+
+ RGB_TO_YUV(rgb_color, yuv_color);
+
+ return 0;
+}
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ int c;
+ int error;
+ ContextInfo *ci=NULL;
+ char *font=NULL;
+ unsigned int size=16;
+ FT_BBox bbox;
+ int yMax, yMin;
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ ci = (ContextInfo *) *ctxp;
+
+ /* configure Context Info */
+ ci->text = NULL;
+ ci->file = NULL;
+ ci->x = ci->y = 0;
+ ci->fgcolor[0]=255;
+ ci->fgcolor[1]=128;
+ ci->fgcolor[2]=128;
+ ci->bgcolor[0]=0;
+ ci->fgcolor[1]=128;
+ ci->fgcolor[2]=128;
+ ci->bg = 0;
+ ci->outline = 0;
+ ci->text_height = 0;
+
+ optind = 0;
+ while ((c = getopt(argc, argv, "f:t:T:x:y:s:c:C:bo")) > 0) {
+ switch (c) {
+ case 'f':
+ font = optarg;
+ break;
+ case 't':
+ ci->text = av_strdup(optarg);
+ break;
+ case 'T':
+ ci->file = av_strdup(optarg);
+ break;
+ case 'x':
+ ci->x = (unsigned int) atoi(optarg);
+ break;
+ case 'y':
+ ci->y = (unsigned int) atoi(optarg);
+ break;
+ case 's':
+ size = (unsigned int) atoi(optarg);
+ break;
+ case 'c':
+ if (ParseColor(optarg, ci->fgcolor) == -1)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Invalid foreground color: '%s'. You must specify the color in the internet way(packaged hex): #RRGGBB, ie: -c #ffffff (for white foreground)\n", optarg);
+ return -1;
+ }
+ break;
+ case 'C':
+ if (ParseColor(optarg, ci->bgcolor) == -1)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Invalid foreground color: '%s'. You must specify the color in the internet way(packaged hex): #RRGGBB, ie: -c #ffffff (for white foreground)\n", optarg);
+ return -1;
+ }
+ break;
+ case 'b':
+ ci->bg=1;
+ break;
+ case 'o':
+ ci->outline=1;
+ break;
+ case '?':
+ av_log(NULL, AV_LOG_ERROR, "Unrecognized argument '%s'\n", argv[optind]);
+ return -1;
+ }
+ }
+
+ if (!ci->text)
+ {
+ av_log(NULL, AV_LOG_ERROR, "No text provided (-t text)\n");
+ return -1;
+ }
+
+ if (ci->file)
+ {
+ FILE *fp;
+ if ((fp=fopen(ci->file, "r")) == NULL)
+ {
+ av_log(NULL, AV_LOG_INFO, "WARNING: The file could not be opened. Using text provided with -t switch: %s", strerror(errno));
+ }
+ else
+ {
+ fclose(fp);
+ }
+ }
+
+ if (!font)
+ {
+ av_log(NULL, AV_LOG_ERROR, "No font file provided! (-f filename)\n");
+ return -1;
+ }
+
+ if ((error = FT_Init_FreeType(&(ci->library))) != 0)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Could not load FreeType (error# %d).\n", error);
+ return -1;
+ }
+
+ if ((error = FT_New_Face( ci->library, font, 0, &(ci->face) )) != 0)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Could not load face: %s (error# %d).\n", font, error);
+ return -1;
+ }
+
+ if ((error = FT_Set_Pixel_Sizes( ci->face, 0, size)) != 0)
+ {
+ av_log(NULL, AV_LOG_ERROR, "Could not set font size to %d pixels (error# %d).\n", size, error);
+ return -1;
+ }
+
+ ci->use_kerning = FT_HAS_KERNING(ci->face);
+
+ /* load and cache glyphs */
+ yMax = -32000;
+ yMin = 32000;
+ for (c=0; c < 256; c++)
+ {
+ /* Load char */
+ error = FT_Load_Char( ci->face, (unsigned char) c, FT_LOAD_RENDER | FT_LOAD_MONOCHROME );
+ if (error) continue; /* ignore errors */
+
+ /* Save bitmap */
+ ci->bitmaps[c] = ci->face->glyph->bitmap;
+ /* Save bitmap left */
+ ci->bitmap_left[c] = ci->face->glyph->bitmap_left;
+ /* Save bitmap top */
+ ci->bitmap_top[c] = ci->face->glyph->bitmap_top;
+
+ /* Save advance */
+ ci->advance[c] = ci->face->glyph->advance.x >> 6;
+
+ /* Save glyph */
+ error = FT_Get_Glyph( ci->face->glyph, &(ci->glyphs[c]) );
+ /* Save glyph index */
+ ci->glyphs_index[c] = FT_Get_Char_Index( ci->face, (unsigned char) c );
+
+ /* Measure text height to calculate text_height (or the maximum text height) */
+ FT_Glyph_Get_CBox( ci->glyphs[ c ], ft_glyph_bbox_pixels, &bbox );
+ if (bbox.yMax > yMax)
+ yMax = bbox.yMax;
+ if (bbox.yMin < yMin)
+ yMin = bbox.yMin;
+
+ }
+
+ ci->text_height = yMax - yMin;
+ ci->baseline = yMax;
+
+ return 0;
+}
+
+
+
+
+static inline void draw_glyph(AVPicture *picture, FT_Bitmap *bitmap, unsigned int x, unsigned int y, unsigned int width, unsigned int height, unsigned char yuv_fgcolor[3], unsigned char yuv_bgcolor[3], int outline)
+{
+ int r, c;
+ int spixel, dpixel[3], in_glyph=0;
+
+ if (bitmap->pixel_mode == ft_pixel_mode_mono)
+ {
+ in_glyph = 0;
+ for (r=0; (r < bitmap->rows) && (r+y < height); r++)
+ {
+ for (c=0; (c < bitmap->width) && (c+x < width); c++)
+ {
+ /* pixel in the picture (destination) */
+ GET_PIXEL(picture, dpixel, (c+x), (y+r));
+
+ /* pixel in the glyph bitmap (source) */
+ spixel = bitmap->buffer[r*bitmap->pitch +c/8] & (0x80>>(c%8));
+
+ if (spixel)
+ COPY_3(dpixel, yuv_fgcolor);
+
+ if (outline)
+ {
+ /* border detection: */
+ if ( (!in_glyph) && (spixel) )
+ /* left border detected */
+ {
+ in_glyph = 1;
+ /* draw left pixel border */
+ if (c-1 >= 0)
+ SET_PIXEL(picture, yuv_bgcolor, (c+x-1), (y+r));
+ }
+ else if ( (in_glyph) && (!spixel) )
+ /* right border detected */
+ {
+ in_glyph = 0;
+ /* 'draw' right pixel border */
+ COPY_3(dpixel, yuv_bgcolor);
+ }
+
+ if (in_glyph)
+ /* see if we have a top/bottom border */
+ {
+ /* top */
+ if ( (r-1 >= 0) && (! bitmap->buffer[(r-1)*bitmap->pitch +c/8] & (0x80>>(c%8))) )
+ /* we have a top border */
+ SET_PIXEL(picture, yuv_bgcolor, (c+x), (y+r-1));
+
+ /* bottom */
+ if ( (r+1 < height) && (! bitmap->buffer[(r+1)*bitmap->pitch +c/8] & (0x80>>(c%8))) )
+ /* we have a bottom border */
+ SET_PIXEL(picture, yuv_bgcolor, (c+x), (y+r+1));
+
+ }
+ }
+
+ SET_PIXEL(picture, dpixel, (c+x), (y+r));
+ }
+ }
+ }
+}
+
+
+static inline void draw_box(AVPicture *picture, unsigned int x, unsigned int y, unsigned int width, unsigned int height, unsigned char yuv_color[3])
+{
+ int i, j;
+
+ for (j = 0; (j < height); j++)
+ for (i = 0; (i < width); i++)
+ {
+ SET_PIXEL(picture, yuv_color, (i+x), (y+j));
+ }
+
+}
+
+
+
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ FT_Face face = ci->face;
+ FT_GlyphSlot slot = face->glyph;
+ unsigned char *text = ci->text;
+ unsigned char c;
+ int x = 0, y = 0, i=0, size=0;
+ unsigned char buff[MAXSIZE_TEXT];
+ unsigned char tbuff[MAXSIZE_TEXT];
+ time_t now = time(0);
+ int str_w, str_w_max;
+ FT_Vector pos[MAXSIZE_TEXT];
+ FT_Vector delta;
+
+ if (ci->file)
+ {
+ int fd = open(ci->file, O_RDONLY);
+
+ if (fd < 0)
+ {
+ text = ci->text;
+ av_log(NULL, AV_LOG_INFO, "WARNING: The file could not be opened. Using text provided with -t switch: %s", strerror(errno));
+ }
+ else
+ {
+ int l = read(fd, tbuff, sizeof(tbuff) - 1);
+
+ if (l >= 0)
+ {
+ tbuff[l] = 0;
+ text = tbuff;
+ }
+ else
+ {
+ text = ci->text;
+ av_log(NULL, AV_LOG_INFO, "WARNING: The file could not be read. Using text provided with -t switch: %s", strerror(errno));
+ }
+ close(fd);
+ }
+ }
+ else
+ {
+ text = ci->text;
+ }
+
+ strftime(buff, sizeof(buff), text, localtime(&now));
+
+ text = buff;
+
+ size = strlen(text);
+
+
+
+
+ /* measure string size and save glyphs position*/
+ str_w = str_w_max = 0;
+ x = ci->x;
+ y = ci->y;
+ for (i=0; i < size; i++)
+ {
+ c = text[i];
+
+ /* kerning */
+ if ( (ci->use_kerning) && (i > 0) && (ci->glyphs_index[c]) )
+ {
+ FT_Get_Kerning( ci->face,
+ ci->glyphs_index[ text[i-1] ],
+ ci->glyphs_index[c],
+ ft_kerning_default,
+ &delta );
+
+ x += delta.x >> 6;
+ }
+
+ if (( (x + ci->advance[ c ]) >= width ) || ( c == '\n' ))
+ {
+ str_w = width - ci->x - 1;
+
+ y += ci->text_height;
+ x = ci->x;
+ }
+
+
+ /* save position */
+ pos[i].x = x + ci->bitmap_left[c];
+ pos[i].y = y - ci->bitmap_top[c] + ci->baseline;
+
+
+ x += ci->advance[c];
+
+
+ if (str_w > str_w_max)
+ str_w_max = str_w;
+
+ }
+
+
+
+
+ if (ci->bg)
+ {
+ /* Check if it doesn't pass the limits */
+ if ( str_w_max + ci->x >= width )
+ str_w_max = width - ci->x - 1;
+ if ( y >= height )
+ y = height - 1 - 2*ci->y;
+
+ /* Draw Background */
+ draw_box( picture, ci->x, ci->y, str_w_max, y - ci->y, ci->bgcolor );
+ }
+
+
+
+ /* Draw Glyphs */
+ for (i=0; i < size; i++)
+ {
+ c = text[i];
+
+ if (
+ ( (c == '_') && (text == ci->text) ) || /* skip '_' (consider as space)
+ IF text was specified in cmd line
+ (which doesn't like neasted quotes) */
+ ( c == '\n' ) /* Skip new line char, just go to new line */
+ )
+ continue;
+
+ /* now, draw to our target surface */
+ draw_glyph( picture,
+ &(ci->bitmaps[ c ]),
+ pos[i].x,
+ pos[i].y,
+ width,
+ height,
+ ci->fgcolor,
+ ci->bgcolor,
+ ci->outline );
+
+ /* increment pen position */
+ x += slot->advance.x >> 6;
+ }
+
+
+}
+
diff --git a/contrib/ffmpeg/vhook/fish.c b/contrib/ffmpeg/vhook/fish.c
new file mode 100644
index 000000000..2a30d2847
--- /dev/null
+++ b/contrib/ffmpeg/vhook/fish.c
@@ -0,0 +1,380 @@
+/*
+ * Fish Detector Hook
+ * Copyright (c) 2002 Philip Gladstone
+ *
+ * This file implements a fish detector. It is used to see when a
+ * goldfish passes in front of the camera. It does this by counting
+ * the number of input pixels that fall within a particular HSV
+ * range.
+ *
+ * It takes a multitude of arguments:
+ *
+ * -h <num>-<num> the range of H values that are fish
+ * -s <num>-<num> the range of S values that are fish
+ * -v <num>-<num> the range of V values that are fish
+ * -z zap all non-fish values to black
+ * -l <num> limit the number of saved files to <num>
+ * -i <num> only check frames every <num> seconds
+ * -t <num> the threshold for the amount of fish pixels (range 0-1)
+ * -d turn debugging on
+ * -D <directory> where to put the fish images
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <stdlib.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <string.h>
+#include <time.h>
+#include <stdio.h>
+#include <dirent.h>
+
+#include "framehook.h"
+#include "dsputil.h"
+#include "avformat.h"
+#include "swscale.h"
+
+static int sws_flags = SWS_BICUBIC;
+
+#define SCALEBITS 10
+#define ONE_HALF (1 << (SCALEBITS - 1))
+#define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
+
+#define YUV_TO_RGB1_CCIR(cb1, cr1)\
+{\
+ cb = (cb1) - 128;\
+ cr = (cr1) - 128;\
+ r_add = FIX(1.40200*255.0/224.0) * cr + ONE_HALF;\
+ g_add = - FIX(0.34414*255.0/224.0) * cb - FIX(0.71414*255.0/224.0) * cr + \
+ ONE_HALF;\
+ b_add = FIX(1.77200*255.0/224.0) * cb + ONE_HALF;\
+}
+
+#define YUV_TO_RGB2_CCIR(r, g, b, y1)\
+{\
+ yt = ((y1) - 16) * FIX(255.0/219.0);\
+ r = cm[(yt + r_add) >> SCALEBITS];\
+ g = cm[(yt + g_add) >> SCALEBITS];\
+ b = cm[(yt + b_add) >> SCALEBITS];\
+}
+
+
+
+
+typedef struct {
+ int h; /* 0 .. 360 */
+ int s; /* 0 .. 255 */
+ int v; /* 0 .. 255 */
+} HSV;
+
+typedef struct {
+ int zapping;
+ int threshold;
+ HSV dark, bright;
+ char *dir;
+ int file_limit;
+ int debug;
+ int min_interval;
+ int64_t next_pts;
+ int inset;
+ int min_width;
+ struct SwsContext *toRGB_convert_ctx;
+} ContextInfo;
+
+static void dorange(const char *s, int *first, int *second, int maxval)
+{
+ sscanf(s, "%d-%d", first, second);
+ if (*first > maxval)
+ *first = maxval;
+ if (*second > maxval)
+ *second = maxval;
+}
+
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ctx) {
+ sws_freeContext(ci->toRGB_convert_ctx);
+ av_free(ctx);
+ }
+}
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ ContextInfo *ci;
+ int c;
+
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ ci = (ContextInfo *) *ctxp;
+
+ optind = 0;
+
+ ci->dir = "/tmp";
+ ci->threshold = 100;
+ ci->file_limit = 100;
+ ci->min_interval = 1000000;
+ ci->inset = 10; /* Percent */
+
+ while ((c = getopt(argc, argv, "w:i:dh:s:v:zl:t:D:")) > 0) {
+ switch (c) {
+ case 'h':
+ dorange(optarg, &ci->dark.h, &ci->bright.h, 360);
+ break;
+ case 's':
+ dorange(optarg, &ci->dark.s, &ci->bright.s, 255);
+ break;
+ case 'v':
+ dorange(optarg, &ci->dark.v, &ci->bright.v, 255);
+ break;
+ case 'z':
+ ci->zapping = 1;
+ break;
+ case 'l':
+ ci->file_limit = atoi(optarg);
+ break;
+ case 'i':
+ ci->min_interval = 1000000 * atof(optarg);
+ break;
+ case 't':
+ ci->threshold = atof(optarg) * 1000;
+ if (ci->threshold > 1000 || ci->threshold < 0) {
+ fprintf(stderr, "Invalid threshold value '%s' (range is 0-1)\n", optarg);
+ return -1;
+ }
+ break;
+ case 'w':
+ ci->min_width = atoi(optarg);
+ break;
+ case 'd':
+ ci->debug++;
+ break;
+ case 'D':
+ ci->dir = av_strdup(optarg);
+ break;
+ default:
+ fprintf(stderr, "Unrecognized argument '%s'\n", argv[optind]);
+ return -1;
+ }
+ }
+
+ fprintf(stderr, "Fish detector configured:\n");
+ fprintf(stderr, " HSV range: %d,%d,%d - %d,%d,%d\n",
+ ci->dark.h,
+ ci->dark.s,
+ ci->dark.v,
+ ci->bright.h,
+ ci->bright.s,
+ ci->bright.v);
+ fprintf(stderr, " Threshold is %d%% pixels\n", ci->threshold / 10);
+
+
+ return 0;
+}
+
+static void get_hsv(HSV *hsv, int r, int g, int b)
+{
+ int i, v, x, f;
+
+ x = (r < g) ? r : g;
+ if (b < x)
+ x = b;
+ v = (r > g) ? r : g;
+ if (b > v)
+ v = b;
+
+ if (v == x) {
+ hsv->h = 0;
+ hsv->s = 0;
+ hsv->v = v;
+ return;
+ }
+
+ if (r == v) {
+ f = g - b;
+ i = 0;
+ } else if (g == v) {
+ f = b - r;
+ i = 2 * 60;
+ } else {
+ f = r - g;
+ i = 4 * 60;
+ }
+
+ hsv->h = i + (60 * f) / (v - x);
+ if (hsv->h < 0)
+ hsv->h += 360;
+
+ hsv->s = (255 * (v - x)) / v;
+ hsv->v = v;
+
+ return;
+}
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
+ int rowsize = picture->linesize[0];
+
+#if 0
+ printf("pix_fmt = %d, width = %d, pts = %lld, ci->next_pts = %lld\n",
+ pix_fmt, width, pts, ci->next_pts);
+#endif
+
+ if (pts < ci->next_pts)
+ return;
+
+ if (width < ci->min_width)
+ return;
+
+ ci->next_pts = pts + 1000000;
+
+ if (pix_fmt == PIX_FMT_YUV420P) {
+ uint8_t *y, *u, *v;
+ int width2 = width >> 1;
+ int inrange = 0;
+ int pixcnt;
+ int h;
+ int h_start, h_end;
+ int w_start, w_end;
+
+ h_end = 2 * ((ci->inset * height) / 200);
+ h_start = height - h_end;
+
+ w_end = (ci->inset * width2) / 100;
+ w_start = width2 - w_end;
+
+ pixcnt = ((h_start - h_end) >> 1) * (w_start - w_end);
+
+ y = picture->data[0] + h_end * picture->linesize[0] + w_end * 2;
+ u = picture->data[1] + h_end * picture->linesize[1] / 2 + w_end;
+ v = picture->data[2] + h_end * picture->linesize[2] / 2 + w_end;
+
+ for (h = h_start; h > h_end; h -= 2) {
+ int w;
+
+ for (w = w_start; w > w_end; w--) {
+ unsigned int r,g,b;
+ HSV hsv;
+ int cb, cr, yt, r_add, g_add, b_add;
+
+ YUV_TO_RGB1_CCIR(u[0], v[0]);
+ YUV_TO_RGB2_CCIR(r, g, b, y[0]);
+
+ get_hsv(&hsv, r, g, b);
+
+ if (ci->debug > 1)
+ fprintf(stderr, "(%d,%d,%d) -> (%d,%d,%d)\n",
+ r,g,b,hsv.h,hsv.s,hsv.v);
+
+
+ if (hsv.h >= ci->dark.h && hsv.h <= ci->bright.h &&
+ hsv.s >= ci->dark.s && hsv.s <= ci->bright.s &&
+ hsv.v >= ci->dark.v && hsv.v <= ci->bright.v) {
+ inrange++;
+ } else if (ci->zapping) {
+ y[0] = y[1] = y[rowsize] = y[rowsize + 1] = 16;
+ u[0] = 128;
+ v[0] = 128;
+ }
+
+ y+= 2;
+ u++;
+ v++;
+ }
+
+ y += picture->linesize[0] * 2 - (w_start - w_end) * 2;
+ u += picture->linesize[1] - (w_start - w_end);
+ v += picture->linesize[2] - (w_start - w_end);
+ }
+
+ if (ci->debug)
+ fprintf(stderr, "Fish: Inrange=%d of %d = %d threshold\n", inrange, pixcnt, 1000 * inrange / pixcnt);
+
+ if (inrange * 1000 / pixcnt >= ci->threshold) {
+ /* Save to file */
+ int size;
+ char *buf;
+ AVPicture picture1;
+ static int frame_counter;
+ static int foundfile;
+
+ if ((frame_counter++ % 20) == 0) {
+ /* Check how many files we have */
+ DIR *d;
+
+ foundfile = 0;
+
+ d = opendir(ci->dir);
+ if (d) {
+ struct dirent *dent;
+
+ while ((dent = readdir(d))) {
+ if (strncmp("fishimg", dent->d_name, 7) == 0) {
+ if (strcmp(".ppm", dent->d_name + strlen(dent->d_name) - 4) == 0) {
+ foundfile++;
+ }
+ }
+ }
+ closedir(d);
+ }
+ }
+
+ if (foundfile < ci->file_limit) {
+ FILE *f;
+ char fname[256];
+
+ size = avpicture_get_size(PIX_FMT_RGB24, width, height);
+ buf = av_malloc(size);
+
+ avpicture_fill(&picture1, buf, PIX_FMT_RGB24, width, height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ width, height, pix_fmt,
+ width, height, PIX_FMT_RGB24,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+ // img_convert parameters are 2 first destination, then 4 source
+ // sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, height,
+ picture1.data, picture1.linesize);
+
+ /* Write out the PPM file */
+ snprintf(fname, sizeof(fname), "%s/fishimg%ld_%"PRId64".ppm", ci->dir, (long)(av_gettime() / 1000000), pts);
+ f = fopen(fname, "w");
+ if (f) {
+ fprintf(f, "P6 %d %d 255\n", width, height);
+ fwrite(buf, width * height * 3, 1, f);
+ fclose(f);
+ }
+
+ av_free(buf);
+ ci->next_pts = pts + ci->min_interval;
+ }
+ }
+ }
+}
+
diff --git a/contrib/ffmpeg/vhook/imlib2.c b/contrib/ffmpeg/vhook/imlib2.c
new file mode 100644
index 000000000..87c54cf0b
--- /dev/null
+++ b/contrib/ffmpeg/vhook/imlib2.c
@@ -0,0 +1,450 @@
+/*
+ * imlib2 based hook
+ * Copyright (c) 2002 Philip Gladstone
+ *
+ * This module implements a text overlay for a video image. Currently it
+ * supports a fixed overlay or reading the text from a file. The string
+ * is passed through strftime so that it is easy to imprint the date and
+ * time onto the image.
+ *
+ * You may also overlay an image (even semi-transparent) like TV stations do.
+ * You may move either the text or the image around your video to create
+ * scrolling credits, for example.
+ *
+ * Text fonts are being looked for in FONTPATH
+ *
+ * Options:
+ *
+ * -c <color> The color of the text
+ * -F <fontname> The font face and size
+ * -t <text> The text
+ * -f <filename> The filename to read text from
+ * -x <expresion> X coordinate of text or image
+ * -y <expresion> Y coordinate of text or image
+ * -i <filename> The filename to read a image from
+ *
+ * Expresions are functions of:
+ * N // frame number (starting at zero)
+ * H // frame height
+ * W // frame width
+ * h // image height
+ * w // image width
+ * X // previous x
+ * Y // previous y
+ *
+
+ Examples:
+
+ FONTPATH="/cygdrive/c/WINDOWS/Fonts/"
+ FONTPATH="$FONTPATH:/usr/share/imlib2/data/fonts/"
+ FONTPATH="$FONTPATH:/usr/X11R6/lib/X11/fonts/TTF/"
+ export FONTPATH
+
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -x W*(0.5+0.25*sin(N/47*PI))-w/2 -y H*(0.5+0.50*cos(N/97*PI))-h/2 -i /usr/share/imlib2/data/images/bulb.png'
+ -acodec copy -sameq output.avi
+
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -c red -F Vera.ttf/20 -x 150+0.5*N -y 70+0.25*N -t Hello'
+ -acodec copy -sameq output.avi
+
+ * This module is very much intended as an example of what could be done.
+ *
+ * One caution is that this is an expensive process -- in particular the
+ * conversion of the image into RGB and back is time consuming. For some
+ * special cases -- e.g. painting black text -- it would be faster to paint
+ * the text into a bitmap and then combine it directly into the YUV
+ * image. However, this code is fast enough to handle 10 fps of 320x240 on a
+ * 900MHz Duron in maybe 15% of the CPU.
+
+ * See further statistics on Pentium4, 3GHz, FFMpeg is SVN-r6798
+ * Input movie is 20.2 seconds of PAL DV on AVI
+ * Output movie is DVD compliant VOB.
+ *
+ ffmpeg -i input.avi -target pal-dvd out.vob
+ # 13.516s just transcode
+ ffmpeg -i input.avi -vhook /usr/local/bin/vhook/null.dll -target pal-dvd out.vob
+ # 23.546s transcode and img_convert
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -c red -F Vera/20 -x 150-0.5*N -y 70+0.25*N -t Hello_person' \
+ -target pal-dvd out.vob
+ # 21.454s transcode, img_convert and move text around
+ ffmpeg -i input.avi -vhook \
+ 'vhook/imlib2.dll -x 150-0.5*N -y 70+0.25*N -i /usr/share/imlib2/data/images/bulb.png' \
+ -target pal-dvd out.vob
+ # 20.828s transcode, img_convert and move image around
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "framehook.h"
+#include "swscale.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#undef time
+#include <sys/time.h>
+#include <time.h>
+#include <X11/Xlib.h>
+#include <Imlib2.h>
+#include "eval.h"
+
+const char *const_names[]={
+ "PI",
+ "E",
+ "N", // frame number (starting at zero)
+ "H", // frame height
+ "W", // frame width
+ "h", // image height
+ "w", // image width
+ "X", // previous x
+ "Y", // previous y
+ NULL
+};
+
+static int sws_flags = SWS_BICUBIC;
+
+typedef struct {
+ int dummy;
+ Imlib_Font fn;
+ char *text;
+ char *file;
+ int r, g, b;
+ double x, y;
+ char *fileImage;
+ struct _CachedImage *cache;
+ Imlib_Image imageOverlaid;
+ AVEvalExpr *eval_x, *eval_y;
+ char *expr_x, *expr_y;
+ int frame_number;
+ int imageOverlaid_width, imageOverlaid_height;
+
+ // This vhook first converts frame to RGB ...
+ struct SwsContext *toRGB_convert_ctx;
+ // ... and then converts back frame from RGB to initial format
+ struct SwsContext *fromRGB_convert_ctx;
+} ContextInfo;
+
+typedef struct _CachedImage {
+ struct _CachedImage *next;
+ Imlib_Image image;
+ int width;
+ int height;
+} CachedImage;
+
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ci->cache) {
+ imlib_context_set_image(ci->cache->image);
+ imlib_free_image();
+ av_free(ci->cache);
+ }
+ if (ctx) {
+ if (ci->imageOverlaid) {
+ imlib_context_set_image(ci->imageOverlaid);
+ imlib_free_image();
+ }
+ ff_eval_free(ci->expr_x);
+ ff_eval_free(ci->expr_y);
+ sws_freeContext(ci->toRGB_convert_ctx);
+ sws_freeContext(ci->fromRGB_convert_ctx);
+ av_free(ctx);
+ }
+}
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ int c;
+ ContextInfo *ci;
+ char *font = "LucidaSansDemiBold/16";
+ char *fp = getenv("FONTPATH");
+ char *color = 0;
+ FILE *f;
+ char *p;
+
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ ci = (ContextInfo *) *ctxp;
+
+ ci->x = 0.0;
+ ci->y = 0.0;
+ ci->expr_x = "0.0";
+ ci->expr_y = "0.0";
+
+ optind = 0;
+
+ /* Use ':' to split FONTPATH */
+ if (fp)
+ while (p = strchr(fp, ':')) {
+ *p = 0;
+ imlib_add_path_to_font_path(fp);
+ fp = p + 1;
+ }
+ if ((fp) && (*fp))
+ imlib_add_path_to_font_path(fp);
+
+
+ while ((c = getopt(argc, argv, "c:f:F:t:x:y:i:")) > 0) {
+ switch (c) {
+ case 'c':
+ color = optarg;
+ break;
+ case 'F':
+ font = optarg;
+ break;
+ case 't':
+ ci->text = av_strdup(optarg);
+ break;
+ case 'f':
+ ci->file = av_strdup(optarg);
+ break;
+ case 'x':
+ ci->expr_x = av_strdup(optarg);
+ break;
+ case 'y':
+ ci->expr_y = av_strdup(optarg);
+ break;
+ case 'i':
+ ci->fileImage = av_strdup(optarg);
+ break;
+ case '?':
+ fprintf(stderr, "Unrecognized argument '%s'\n", argv[optind]);
+ return -1;
+ }
+ }
+
+ if (ci->text || ci->file) {
+ ci->fn = imlib_load_font(font);
+ if (!ci->fn) {
+ fprintf(stderr, "Failed to load font '%s'\n", font);
+ return -1;
+ }
+ imlib_context_set_font(ci->fn);
+ imlib_context_set_direction(IMLIB_TEXT_TO_RIGHT);
+ }
+
+ if (color) {
+ char buff[256];
+ int done = 0;
+
+ f = fopen("/usr/share/X11/rgb.txt", "r");
+ if (!f)
+ f = fopen("/usr/lib/X11/rgb.txt", "r");
+ if (!f) {
+ fprintf(stderr, "Failed to find rgb.txt\n");
+ return -1;
+ }
+ while (fgets(buff, sizeof(buff), f)) {
+ int r, g, b;
+ char colname[80];
+
+ if (sscanf(buff, "%d %d %d %64s", &r, &g, &b, colname) == 4 &&
+ strcasecmp(colname, color) == 0) {
+ ci->r = r;
+ ci->g = g;
+ ci->b = b;
+ /* fprintf(stderr, "%s -> %d,%d,%d\n", colname, r, g, b); */
+ done = 1;
+ break;
+ }
+ }
+ fclose(f);
+ if (!done) {
+ fprintf(stderr, "Unable to find color '%s' in rgb.txt\n", color);
+ return -1;
+ }
+ }
+ imlib_context_set_color(ci->r, ci->g, ci->b, 255);
+
+ /* load the image (for example, credits for a movie) */
+ if (ci->fileImage) {
+ ci->imageOverlaid = imlib_load_image_immediately(ci->fileImage);
+ if (!(ci->imageOverlaid)){
+ av_log(NULL, AV_LOG_ERROR, "Couldn't load image '%s'\n", ci->fileImage);
+ return -1;
+ }
+ imlib_context_set_image(ci->imageOverlaid);
+ ci->imageOverlaid_width = imlib_image_get_width();
+ ci->imageOverlaid_height = imlib_image_get_height();
+ }
+
+ if (!(ci->eval_x = ff_parse(ci->expr_x, const_names, NULL, NULL, NULL, NULL, NULL))){
+ av_log(NULL, AV_LOG_ERROR, "Couldn't parse x expression '%s'\n", ci->expr_x);
+ return -1;
+ }
+
+ if (!(ci->eval_y = ff_parse(ci->expr_y, const_names, NULL, NULL, NULL, NULL, NULL))){
+ av_log(NULL, AV_LOG_ERROR, "Couldn't parse y expression '%s'\n", ci->expr_y);
+ return -1;
+ }
+
+ return 0;
+}
+
+static Imlib_Image get_cached_image(ContextInfo *ci, int width, int height)
+{
+ CachedImage *cache;
+
+ for (cache = ci->cache; cache; cache = cache->next) {
+ if (width == cache->width && height == cache->height)
+ return cache->image;
+ }
+
+ return NULL;
+}
+
+static void put_cached_image(ContextInfo *ci, Imlib_Image image, int width, int height)
+{
+ CachedImage *cache = av_mallocz(sizeof(*cache));
+
+ cache->image = image;
+ cache->width = width;
+ cache->height = height;
+ cache->next = ci->cache;
+ ci->cache = cache;
+}
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ AVPicture picture1;
+ Imlib_Image image;
+ DATA32 *data;
+
+ image = get_cached_image(ci, width, height);
+
+ if (!image) {
+ image = imlib_create_image(width, height);
+ put_cached_image(ci, image, width, height);
+ }
+
+ imlib_context_set_image(image);
+ data = imlib_image_get_data();
+
+ avpicture_fill(&picture1, (uint8_t *) data, PIX_FMT_RGBA32, width, height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ width, height, pix_fmt,
+ width, height, PIX_FMT_RGBA32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, height,
+ picture1.data, picture1.linesize);
+
+ imlib_image_set_has_alpha(0);
+
+ {
+ int wid, hig, h_a, v_a;
+ char buff[1000];
+ char tbuff[1000];
+ char *tbp = ci->text;
+ time_t now = time(0);
+ char *p, *q;
+ int y;
+
+ double const_values[]={
+ M_PI,
+ M_E,
+ ci->frame_number, // frame number (starting at zero)
+ height, // frame height
+ width, // frame width
+ ci->imageOverlaid_height, // image height
+ ci->imageOverlaid_width, // image width
+ ci->x, // previous x
+ ci->y, // previous y
+ 0
+ };
+
+ if (ci->file) {
+ int fd = open(ci->file, O_RDONLY);
+
+ if (fd < 0) {
+ tbp = "[File not found]";
+ } else {
+ int l = read(fd, tbuff, sizeof(tbuff) - 1);
+
+ if (l >= 0) {
+ tbuff[l] = 0;
+ tbp = tbuff;
+ } else {
+ tbp = "[I/O Error]";
+ }
+ close(fd);
+ }
+ }
+
+ if (tbp)
+ strftime(buff, sizeof(buff), tbp, localtime(&now));
+ else if (!(ci->imageOverlaid))
+ strftime(buff, sizeof(buff), "[No data]", localtime(&now));
+
+ ci->x = ff_parse_eval(ci->eval_x, const_values, ci);
+ ci->y = ff_parse_eval(ci->eval_y, const_values, ci);
+ y = ci->y;
+
+ if (!(ci->imageOverlaid))
+ for (p = buff; p; p = q) {
+ q = strchr(p, '\n');
+ if (q)
+ *q++ = 0;
+
+ imlib_text_draw_with_return_metrics(ci->x, y, p, &wid, &hig, &h_a, &v_a);
+ y += v_a;
+ }
+
+ if (ci->imageOverlaid) {
+ imlib_context_set_image(image);
+ imlib_blend_image_onto_image(ci->imageOverlaid, 0,
+ 0, 0, ci->imageOverlaid_width, ci->imageOverlaid_height,
+ ci->x, ci->y, ci->imageOverlaid_width, ci->imageOverlaid_height);
+ }
+
+ }
+
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ width, height, PIX_FMT_RGBA32,
+ width, height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, height,
+ picture->data, picture->linesize);
+
+ ci->frame_number++;
+}
+
diff --git a/contrib/ffmpeg/vhook/null.c b/contrib/ffmpeg/vhook/null.c
new file mode 100644
index 000000000..041e5abda
--- /dev/null
+++ b/contrib/ffmpeg/vhook/null.c
@@ -0,0 +1,116 @@
+/*
+ * Null Video Hook
+ * Copyright (c) 2002 Philip Gladstone
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <stdio.h>
+
+#include "framehook.h"
+#include "swscale.h"
+
+static int sws_flags = SWS_BICUBIC;
+
+typedef struct {
+ int dummy;
+
+ // This vhook first converts frame to RGB ...
+ struct SwsContext *toRGB_convert_ctx;
+
+ // ... and later converts back frame from RGB to initial format
+ struct SwsContext *fromRGB_convert_ctx;
+
+} ContextInfo;
+
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ctx) {
+ sws_freeContext(ci->toRGB_convert_ctx);
+ sws_freeContext(ci->fromRGB_convert_ctx);
+ av_free(ctx);
+ }
+}
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ fprintf(stderr, "Called with argc=%d\n", argc);
+
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ return 0;
+}
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ char *buf = 0;
+ AVPicture picture1;
+ AVPicture *pict = picture;
+
+ (void) ci;
+
+ if (pix_fmt != PIX_FMT_RGB24) {
+ int size;
+
+ size = avpicture_get_size(PIX_FMT_RGB24, width, height);
+ buf = av_malloc(size);
+
+ avpicture_fill(&picture1, buf, PIX_FMT_RGB24, width, height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ width, height, pix_fmt,
+ width, height, PIX_FMT_RGB24,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, height,
+ picture1.data, picture1.linesize);
+
+ pict = &picture1;
+ }
+
+ /* Insert filter code here */
+
+ if (pix_fmt != PIX_FMT_RGB24) {
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ width, height, PIX_FMT_RGB24,
+ width, height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, height,
+ picture->data, picture->linesize);
+ }
+
+ av_free(buf);
+}
+
diff --git a/contrib/ffmpeg/vhook/ppm.c b/contrib/ffmpeg/vhook/ppm.c
new file mode 100644
index 000000000..51badd58d
--- /dev/null
+++ b/contrib/ffmpeg/vhook/ppm.c
@@ -0,0 +1,367 @@
+/*
+ * PPM Video Hook
+ * Copyright (c) 2003 Charles Yates
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <ctype.h>
+#include "framehook.h"
+#include "avformat.h"
+#include "swscale.h"
+
+static int sws_flags = SWS_BICUBIC;
+
+/** Bi-directional pipe structure.
+*/
+
+typedef struct rwpipe
+{
+ int pid;
+ FILE *reader;
+ FILE *writer;
+}
+rwpipe;
+
+/** Create a bidirectional pipe for the given command.
+*/
+
+static rwpipe *rwpipe_open( int argc, char *argv[] )
+{
+ rwpipe *this = av_mallocz( sizeof( rwpipe ) );
+
+ if ( this != NULL )
+ {
+ int input[ 2 ];
+ int output[ 2 ];
+
+ pipe( input );
+ pipe( output );
+
+ this->pid = fork();
+
+ if ( this->pid == 0 )
+ {
+#define COMMAND_SIZE 10240
+ char *command = av_mallocz( COMMAND_SIZE );
+ int i;
+
+ strcpy( command, "" );
+ for ( i = 0; i < argc; i ++ )
+ {
+ pstrcat( command, COMMAND_SIZE, argv[ i ] );
+ pstrcat( command, COMMAND_SIZE, " " );
+ }
+
+ dup2( output[ 0 ], STDIN_FILENO );
+ dup2( input[ 1 ], STDOUT_FILENO );
+
+ close( input[ 0 ] );
+ close( input[ 1 ] );
+ close( output[ 0 ] );
+ close( output[ 1 ] );
+
+ execl("/bin/sh", "sh", "-c", command, (char*)NULL );
+ exit( 255 );
+ }
+ else
+ {
+ close( input[ 1 ] );
+ close( output[ 0 ] );
+
+ this->reader = fdopen( input[ 0 ], "r" );
+ this->writer = fdopen( output[ 1 ], "w" );
+ }
+ }
+
+ return this;
+}
+
+/** Read data from the pipe.
+*/
+
+static FILE *rwpipe_reader( rwpipe *this )
+{
+ if ( this != NULL )
+ return this->reader;
+ else
+ return NULL;
+}
+
+/** Write data to the pipe.
+*/
+
+static FILE *rwpipe_writer( rwpipe *this )
+{
+ if ( this != NULL )
+ return this->writer;
+ else
+ return NULL;
+}
+
+/* Read a number from the pipe - assumes PNM style headers.
+*/
+
+static int rwpipe_read_number( rwpipe *rw )
+{
+ int value = 0;
+ int c = 0;
+ FILE *in = rwpipe_reader( rw );
+
+ do
+ {
+ c = fgetc( in );
+
+ while( c != EOF && !isdigit( c ) && c != '#' )
+ c = fgetc( in );
+
+ if ( c == '#' )
+ while( c != EOF && c != '\n' )
+ c = fgetc( in );
+ }
+ while ( c != EOF && !isdigit( c ) );
+
+ while( c != EOF && isdigit( c ) )
+ {
+ value = value * 10 + ( c - '0' );
+ c = fgetc( in );
+ }
+
+ return value;
+}
+
+/** Read a PPM P6 header.
+*/
+
+static int rwpipe_read_ppm_header( rwpipe *rw, int *width, int *height )
+{
+ char line[ 3 ];
+ FILE *in = rwpipe_reader( rw );
+ int max;
+
+ fgets( line, 3, in );
+ if ( !strncmp( line, "P6", 2 ) )
+ {
+ *width = rwpipe_read_number( rw );
+ *height = rwpipe_read_number( rw );
+ max = rwpipe_read_number( rw );
+ return max != 255 || *width <= 0 || *height <= 0;
+ }
+ return 1;
+}
+
+/** Close the pipe and process.
+*/
+
+static void rwpipe_close( rwpipe *this )
+{
+ if ( this != NULL )
+ {
+ fclose( this->reader );
+ fclose( this->writer );
+ waitpid( this->pid, NULL, 0 );
+ av_free( this );
+ }
+}
+
+/** Context info for this vhook - stores the pipe and image buffers.
+*/
+
+typedef struct
+{
+ rwpipe *rw;
+ int size1;
+ char *buf1;
+ int size2;
+ char *buf2;
+
+ // This vhook first converts frame to RGB ...
+ struct SwsContext *toRGB_convert_ctx;
+ // ... then processes it via a PPM command pipe ...
+ // ... and finally converts back frame from RGB to initial format
+ struct SwsContext *fromRGB_convert_ctx;
+}
+ContextInfo;
+
+/** Initialise the context info for this vhook.
+*/
+
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ if ( argc > 1 )
+ {
+ *ctxp = av_mallocz(sizeof(ContextInfo));
+ if ( ctxp != NULL && argc > 1 )
+ {
+ ContextInfo *info = (ContextInfo *)*ctxp;
+ info->rw = rwpipe_open( argc - 1, &argv[ 1 ] );
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/** Process a frame.
+*/
+
+void Process(void *ctx, AVPicture *picture, enum PixelFormat pix_fmt, int width, int height, int64_t pts)
+{
+ int err = 0;
+ ContextInfo *ci = (ContextInfo *) ctx;
+ AVPicture picture1;
+ AVPicture picture2;
+ AVPicture *pict = picture;
+ int out_width;
+ int out_height;
+ int i;
+ uint8_t *ptr = NULL;
+ FILE *in = rwpipe_reader( ci->rw );
+ FILE *out = rwpipe_writer( ci->rw );
+
+ /* Check that we have a pipe to talk to. */
+ if ( in == NULL || out == NULL )
+ err = 1;
+
+ /* Convert to RGB24 if necessary */
+ if ( !err && pix_fmt != PIX_FMT_RGB24 )
+ {
+ int size = avpicture_get_size(PIX_FMT_RGB24, width, height);
+
+ if ( size != ci->size1 )
+ {
+ av_free( ci->buf1 );
+ ci->buf1 = av_malloc(size);
+ ci->size1 = size;
+ err = ci->buf1 == NULL;
+ }
+
+ if ( !err )
+ {
+ avpicture_fill(&picture1, ci->buf1, PIX_FMT_RGB24, width, height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ width, height, pix_fmt,
+ width, height, PIX_FMT_RGB24,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, height,
+ picture1.data, picture1.linesize);
+
+ pict = &picture1;
+ }
+ }
+
+ /* Write out the PPM */
+ if ( !err )
+ {
+ ptr = pict->data[ 0 ];
+ fprintf( out, "P6\n%d %d\n255\n", width, height );
+ for ( i = 0; !err && i < height; i ++ )
+ {
+ err = !fwrite( ptr, width * 3, 1, out );
+ ptr += pict->linesize[ 0 ];
+ }
+ if ( !err )
+ err = fflush( out );
+ }
+
+ /* Read the PPM returned. */
+ if ( !err && !rwpipe_read_ppm_header( ci->rw, &out_width, &out_height ) )
+ {
+ int size = avpicture_get_size(PIX_FMT_RGB24, out_width, out_height);
+
+ if ( size != ci->size2 )
+ {
+ av_free( ci->buf2 );
+ ci->buf2 = av_malloc(size);
+ ci->size2 = size;
+ err = ci->buf2 == NULL;
+ }
+
+ if ( !err )
+ {
+ avpicture_fill(&picture2, ci->buf2, PIX_FMT_RGB24, out_width, out_height);
+ ptr = picture2.data[ 0 ];
+ for ( i = 0; !err && i < out_height; i ++ )
+ {
+ err = !fread( ptr, out_width * 3, 1, in );
+ ptr += picture2.linesize[ 0 ];
+ }
+ }
+ }
+
+ /* Convert the returned PPM back to the input format */
+ if ( !err )
+ {
+ /* The out_width/out_height returned from the PPM
+ * filter won't necessarily be the same as width and height
+ * but it will be scaled anyway to width/height.
+ */
+ av_log(NULL, AV_LOG_DEBUG,
+ "PPM vhook: Input dimensions: %d x %d Output dimensions: %d x %d\n",
+ width, height, out_width, out_height);
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ out_width, out_height, PIX_FMT_RGB24,
+ width, height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture2.data, picture2.linesize, 0, out_height,
+ picture->data, picture->linesize);
+ }
+}
+
+/** Clean up the effect.
+*/
+
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ctx)
+ {
+ rwpipe_close( ci->rw );
+ av_free( ci->buf1 );
+ av_free( ci->buf2 );
+ sws_freeContext(ci->toRGB_convert_ctx);
+ sws_freeContext(ci->fromRGB_convert_ctx);
+ av_free(ctx);
+ }
+}
+
diff --git a/contrib/ffmpeg/vhook/watermark.c b/contrib/ffmpeg/vhook/watermark.c
new file mode 100644
index 000000000..4d2acd2aa
--- /dev/null
+++ b/contrib/ffmpeg/vhook/watermark.c
@@ -0,0 +1,661 @@
+/*
+ * Watermark Hook
+ * Copyright (c) 2005 Marcus Engene myfirstname(at)mylastname.se
+ *
+ * parameters for watermark:
+ * -m nbr = nbr is 0..1. 0 is the default mode, see below.
+ * -t nbr = nbr is six digit hex. Threshold.
+ * -f file = file is the watermark image filename. You must specify this!
+ *
+ * MODE 0:
+ * The watermark picture works like this (assuming color intensities 0..0xff):
+ * Per color do this:
+ * If mask color is 0x80, no change to the original frame.
+ * If mask color is < 0x80 the abs difference is subtracted from the frame. If
+ * result < 0, result = 0
+ * If mask color is > 0x80 the abs difference is added to the frame. If result
+ * > 0xff, result = 0xff
+ *
+ * You can override the 0x80 level with the -t flag. E.g. if threshold is
+ * 000000 the color value of watermark is added to the destination.
+ *
+ * This way a mask that is visible both in light pictures and in dark can be
+ * made (fex by using a picture generated by Gimp and the bump map tool).
+ *
+ * An example watermark file is at
+ * http://engene.se/ffmpeg_watermark.gif
+ *
+ * MODE 1:
+ * Per color do this:
+ * If mask color > threshold color then the watermark pixel is used.
+ *
+ * Example usage:
+ * ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif' -an out.mov
+ * ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif -m 1 -t 222222' -an out.mov
+ *
+ * Note that the entire vhook argument is encapsulated in ''. This
+ * way, arguments to the vhook won't be mixed up with those for ffmpeg.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+//#include <fcntl.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include "common.h"
+#include "avformat.h"
+
+#include "framehook.h"
+#include "cmdutils.h"
+#include "swscale.h"
+
+static int sws_flags = SWS_BICUBIC;
+
+typedef struct {
+ char filename[2000];
+ int x_size;
+ int y_size;
+
+ /* get_watermark_picture() variables */
+ AVFormatContext *pFormatCtx;
+ const char *p_ext;
+ int videoStream;
+ int frameFinished;
+ AVCodecContext *pCodecCtx;
+ AVCodec *pCodec;
+ AVFrame *pFrame;
+ AVPacket packet;
+ int numBytes;
+ uint8_t *buffer;
+ int i;
+ AVInputFormat *file_iformat;
+ AVStream *st;
+ int is_done;
+ AVFrame *pFrameRGB;
+ int thrR;
+ int thrG;
+ int thrB;
+ int mode;
+
+ // This vhook first converts frame to RGB ...
+ struct SwsContext *toRGB_convert_ctx;
+ // ... then converts a watermark and applies it to the RGB frame ...
+ struct SwsContext *watermark_convert_ctx;
+ // ... and finally converts back frame from RGB to initial format
+ struct SwsContext *fromRGB_convert_ctx;
+} ContextInfo;
+
+int get_watermark_picture(ContextInfo *ci, int cleanup);
+
+
+/****************************************************************************
+ *
+ ****************************************************************************/
+void Release(void *ctx)
+{
+ ContextInfo *ci;
+ ci = (ContextInfo *) ctx;
+
+ if (ci) {
+ get_watermark_picture(ci, 1);
+ sws_freeContext(ci->toRGB_convert_ctx);
+ sws_freeContext(ci->watermark_convert_ctx);
+ sws_freeContext(ci->fromRGB_convert_ctx);
+ }
+ av_free(ctx);
+}
+
+
+/****************************************************************************
+ *
+ ****************************************************************************/
+int Configure(void **ctxp, int argc, char *argv[])
+{
+ ContextInfo *ci;
+ int c;
+ int tmp = 0;
+
+ if (0 == (*ctxp = av_mallocz(sizeof(ContextInfo)))) return -1;
+ ci = (ContextInfo *) *ctxp;
+
+ optind = 1;
+
+ // Struct is mallocz:ed so no need to reset.
+ ci->thrR = 0x80;
+ ci->thrG = 0x80;
+ ci->thrB = 0x80;
+
+ while ((c = getopt(argc, argv, "f:m:t:")) > 0) {
+ switch (c) {
+ case 'f':
+ strncpy(ci->filename, optarg, 1999);
+ ci->filename[1999] = 0;
+ break;
+ case 'm':
+ ci->mode = atoi(optarg);
+ break;
+ case 't':
+ if (1 != sscanf(optarg, "%x", &tmp)) {
+ av_log(NULL, AV_LOG_ERROR, "Watermark: argument to -t must be a 6 digit hex number\n");
+ return -1;
+ }
+ ci->thrR = (tmp >> 16) & 0xff;
+ ci->thrG = (tmp >> 8) & 0xff;
+ ci->thrB = (tmp >> 0) & 0xff;
+ break;
+ default:
+ av_log(NULL, AV_LOG_ERROR, "Watermark: Unrecognized argument '%s'\n", argv[optind]);
+ return -1;
+ }
+ }
+
+ //
+ if (0 == ci->filename[0]) {
+ av_log(NULL, AV_LOG_ERROR, "Watermark: There is no filename specified.\n");
+ return -1;
+ }
+
+ av_register_all();
+ return get_watermark_picture(ci, 0);
+}
+
+
+/****************************************************************************
+ * For mode 0 (the original one)
+ ****************************************************************************/
+static void Process0(void *ctx,
+ AVPicture *picture,
+ enum PixelFormat pix_fmt,
+ int src_width,
+ int src_height,
+ int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ char *buf = 0;
+ AVPicture picture1;
+ AVPicture *pict = picture;
+
+ AVFrame *pFrameRGB;
+ int xm_size;
+ int ym_size;
+
+ int x;
+ int y;
+ int offs, offsm;
+ int mpoffs;
+ uint32_t *p_pixel = 0;
+ uint32_t pixel_meck;
+ uint32_t pixel;
+ uint32_t pixelm;
+ int tmp;
+ int thrR = ci->thrR;
+ int thrG = ci->thrG;
+ int thrB = ci->thrB;
+
+ if (pix_fmt != PIX_FMT_RGBA32) {
+ int size;
+
+ size = avpicture_get_size(PIX_FMT_RGBA32, src_width, src_height);
+ buf = av_malloc(size);
+
+ avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ src_width, src_height, pix_fmt,
+ src_width, src_height, PIX_FMT_RGBA32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, src_height,
+ picture1.data, picture1.linesize);
+
+ pict = &picture1;
+ }
+
+ /* Insert filter code here */ /* ok */
+
+ // Get me next frame
+ if (0 > get_watermark_picture(ci, 0)) {
+ return;
+ }
+ // These are the three original static variables in the ffmpeg hack.
+ pFrameRGB = ci->pFrameRGB;
+ xm_size = ci->x_size;
+ ym_size = ci->y_size;
+
+ // I'll do the *4 => <<2 crap later. Most compilers understand that anyway.
+ // According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner.
+ for (y=0; y<src_height; y++) {
+ offs = y * (src_width * 4);
+ offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs!
+ for (x=0; x<src_width; x++) {
+ mpoffs = offsm + (((x * xm_size) / src_width) * 4);
+ p_pixel = (uint32_t *)&((pFrameRGB->data[0])[mpoffs]);
+ pixelm = *p_pixel;
+ p_pixel = (uint32_t *)&((pict->data[0])[offs]);
+ pixel = *p_pixel;
+// pixelm = *((uint32_t *)&(pFrameRGB->data[mpoffs]));
+ pixel_meck = pixel & 0xff000000;
+
+ // R
+ tmp = (int)((pixel >> 16) & 0xff) + (int)((pixelm >> 16) & 0xff) - thrR;
+ if (tmp > 255) tmp = 255;
+ if (tmp < 0) tmp = 0;
+ pixel_meck |= (tmp << 16) & 0xff0000;
+ // G
+ tmp = (int)((pixel >> 8) & 0xff) + (int)((pixelm >> 8) & 0xff) - thrG;
+ if (tmp > 255) tmp = 255;
+ if (tmp < 0) tmp = 0;
+ pixel_meck |= (tmp << 8) & 0xff00;
+ // B
+ tmp = (int)((pixel >> 0) & 0xff) + (int)((pixelm >> 0) & 0xff) - thrB;
+ if (tmp > 255) tmp = 255;
+ if (tmp < 0) tmp = 0;
+ pixel_meck |= (tmp << 0) & 0xff;
+
+
+ // test:
+ //pixel_meck = pixel & 0xff000000;
+ //pixel_meck |= (pixelm & 0x00ffffff);
+
+ *p_pixel = pixel_meck;
+
+ offs += 4;
+ } // foreach X
+ } // foreach Y
+
+
+
+
+ if (pix_fmt != PIX_FMT_RGBA32) {
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ src_width, src_height, PIX_FMT_RGBA32,
+ src_width, src_height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, src_height,
+ picture->data, picture->linesize);
+ }
+
+ av_free(buf);
+}
+
+
+/****************************************************************************
+ * For mode 1 (the original one)
+ ****************************************************************************/
+static void Process1(void *ctx,
+ AVPicture *picture,
+ enum PixelFormat pix_fmt,
+ int src_width,
+ int src_height,
+ int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ char *buf = 0;
+ AVPicture picture1;
+ AVPicture *pict = picture;
+
+ AVFrame *pFrameRGB;
+ int xm_size;
+ int ym_size;
+
+ int x;
+ int y;
+ int offs, offsm;
+ int mpoffs;
+ uint32_t *p_pixel = 0;
+ uint32_t pixel;
+ uint32_t pixelm;
+
+ if (pix_fmt != PIX_FMT_RGBA32) {
+ int size;
+
+ size = avpicture_get_size(PIX_FMT_RGBA32, src_width, src_height);
+ buf = av_malloc(size);
+
+ avpicture_fill(&picture1, buf, PIX_FMT_RGBA32, src_width, src_height);
+
+ // if we already got a SWS context, let's realloc if is not re-useable
+ ci->toRGB_convert_ctx = sws_getCachedContext(ci->toRGB_convert_ctx,
+ src_width, src_height, pix_fmt,
+ src_width, src_height, PIX_FMT_RGBA32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->toRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the toRGB conversion context\n");
+ exit(1);
+ }
+
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->toRGB_convert_ctx,
+ picture->data, picture->linesize, 0, src_height,
+ picture1.data, picture1.linesize);
+
+ pict = &picture1;
+ }
+
+ /* Insert filter code here */ /* ok */
+
+ // Get me next frame
+ if (0 > get_watermark_picture(ci, 0)) {
+ return;
+ }
+ // These are the three original static variables in the ffmpeg hack.
+ pFrameRGB = ci->pFrameRGB;
+ xm_size = ci->x_size;
+ ym_size = ci->y_size;
+
+ // I'll do the *4 => <<2 crap later. Most compilers understand that anyway.
+ // According to avcodec.h PIX_FMT_RGBA32 is handled in endian specific manner.
+ for (y=0; y<src_height; y++) {
+ offs = y * (src_width * 4);
+ offsm = (((y * ym_size) / src_height) * 4) * xm_size; // offsm first in maskline. byteoffs!
+ for (x=0; x<src_width; x++) {
+ mpoffs = offsm + (((x * xm_size) / src_width) * 4);
+ p_pixel = (uint32_t *)&((pFrameRGB->data[0])[mpoffs]);
+ pixelm = *p_pixel; /* watermark pixel */
+ p_pixel = (uint32_t *)&((pict->data[0])[offs]);
+ pixel = *p_pixel;
+
+ if (((pixelm >> 16) & 0xff) > ci->thrR ||
+ ((pixelm >> 8) & 0xff) > ci->thrG ||
+ ((pixelm >> 0) & 0xff) > ci->thrB)
+ {
+ *p_pixel = pixelm;
+ } else {
+ *p_pixel = pixel;
+ }
+ offs += 4;
+ } // foreach X
+ } // foreach Y
+
+ if (pix_fmt != PIX_FMT_RGBA32) {
+ ci->fromRGB_convert_ctx = sws_getCachedContext(ci->fromRGB_convert_ctx,
+ src_width, src_height, PIX_FMT_RGBA32,
+ src_width, src_height, pix_fmt,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->fromRGB_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the fromRGB conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->fromRGB_convert_ctx,
+ picture1.data, picture1.linesize, 0, src_height,
+ picture->data, picture->linesize);
+ }
+
+ av_free(buf);
+}
+
+
+/****************************************************************************
+ * This is the function ffmpeg.c callbacks.
+ ****************************************************************************/
+void Process(void *ctx,
+ AVPicture *picture,
+ enum PixelFormat pix_fmt,
+ int src_width,
+ int src_height,
+ int64_t pts)
+{
+ ContextInfo *ci = (ContextInfo *) ctx;
+ if (1 == ci->mode) {
+ return Process1(ctx, picture, pix_fmt, src_width, src_height, pts);
+ } else {
+ return Process0(ctx, picture, pix_fmt, src_width, src_height, pts);
+ }
+}
+
+
+/****************************************************************************
+ * When cleanup == 0, we try to get the next frame. If no next frame, nothing
+ * is done.
+ *
+ * This code follows the example on
+ * http://www.inb.uni-luebeck.de/~boehme/using_libavcodec.html
+ *
+ * 0 = ok, -1 = error
+ ****************************************************************************/
+int get_watermark_picture(ContextInfo *ci, int cleanup)
+{
+ if (1 == ci->is_done && 0 == cleanup) return 0;
+
+ // Yes, *pFrameRGB arguments must be null the first time otherwise it's not good..
+ // This block is only executed the first time we enter this function.
+ if (0 == ci->pFrameRGB &&
+ 0 == cleanup)
+ {
+
+ /*
+ * The last three parameters specify the file format, buffer size and format
+ * parameters; by simply specifying NULL or 0 we ask libavformat to auto-detect
+ * the format and use a default buffer size. (Didn't work!)
+ */
+ if (av_open_input_file(&ci->pFormatCtx, ci->filename, NULL, 0, NULL) != 0) {
+
+ // Martin says this should not be necessary but it failed for me sending in
+ // NULL instead of file_iformat to av_open_input_file()
+ ci->i = strlen(ci->filename);
+ if (0 == ci->i) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() No filename to watermark vhook\n");
+ return -1;
+ }
+ while (ci->i > 0) {
+ if (ci->filename[ci->i] == '.') {
+ ci->i++;
+ break;
+ }
+ ci->i--;
+ }
+ ci->p_ext = &(ci->filename[ci->i]);
+ ci->file_iformat = av_find_input_format (ci->p_ext);
+ if (0 == ci->file_iformat) {
+ av_log(NULL, AV_LOG_INFO, "get_watermark_picture() attempt to use image2 for [%s]\n", ci->p_ext);
+ ci->file_iformat = av_find_input_format ("image2");
+ }
+ if (0 == ci->file_iformat) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Really failed to find iformat [%s]\n", ci->p_ext);
+ return -1;
+ }
+ // now continues the Martin template.
+
+ if (av_open_input_file(&ci->pFormatCtx, ci->filename, ci->file_iformat, 0, NULL)!=0) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to open input file [%s]\n", ci->filename);
+ return -1;
+ }
+ }
+
+ /*
+ * This fills the streams field of the AVFormatContext with valid information.
+ */
+ if(av_find_stream_info(ci->pFormatCtx)<0) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find stream info\n");
+ return -1;
+ }
+
+ /*
+ * As mentioned in the introduction, we'll handle only video streams, not audio
+ * streams. To make things nice and easy, we simply use the first video stream we
+ * find.
+ */
+ ci->videoStream=-1;
+ for(ci->i = 0; ci->i < ci->pFormatCtx->nb_streams; ci->i++)
+ if(ci->pFormatCtx->streams[ci->i]->codec->codec_type==CODEC_TYPE_VIDEO)
+ {
+ ci->videoStream = ci->i;
+ break;
+ }
+ if(ci->videoStream == -1) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find any video stream\n");
+ return -1;
+ }
+
+ ci->st = ci->pFormatCtx->streams[ci->videoStream];
+ ci->x_size = ci->st->codec->width;
+ ci->y_size = ci->st->codec->height;
+
+ // Get a pointer to the codec context for the video stream
+ ci->pCodecCtx = ci->pFormatCtx->streams[ci->videoStream]->codec;
+
+
+ /*
+ * OK, so now we've got a pointer to the so-called codec context for our video
+ * stream, but we still have to find the actual codec and open it.
+ */
+ // Find the decoder for the video stream
+ ci->pCodec = avcodec_find_decoder(ci->pCodecCtx->codec_id);
+ if(ci->pCodec == NULL) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to find any codec\n");
+ return -1;
+ }
+
+ // Inform the codec that we can handle truncated bitstreams -- i.e.,
+ // bitstreams where frame boundaries can fall in the middle of packets
+ if (ci->pCodec->capabilities & CODEC_CAP_TRUNCATED)
+ ci->pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
+
+ // Open codec
+ if(avcodec_open(ci->pCodecCtx, ci->pCodec)<0) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to open codec\n");
+ return -1;
+ }
+
+ // Hack to correct wrong frame rates that seem to be generated by some
+ // codecs
+ if (ci->pCodecCtx->time_base.den>1000 && ci->pCodecCtx->time_base.num==1)
+ ci->pCodecCtx->time_base.num=1000;
+
+ /*
+ * Allocate a video frame to store the decoded images in.
+ */
+ ci->pFrame = avcodec_alloc_frame();
+
+
+ /*
+ * The RGB image pFrameRGB (of type AVFrame *) is allocated like this:
+ */
+ // Allocate an AVFrame structure
+ ci->pFrameRGB=avcodec_alloc_frame();
+ if(ci->pFrameRGB==NULL) {
+ av_log(NULL, AV_LOG_ERROR, "get_watermark_picture() Failed to alloc pFrameRGB\n");
+ return -1;
+ }
+
+ // Determine required buffer size and allocate buffer
+ ci->numBytes = avpicture_get_size(PIX_FMT_RGBA32, ci->pCodecCtx->width,
+ ci->pCodecCtx->height);
+ ci->buffer = av_malloc(ci->numBytes);
+
+ // Assign appropriate parts of buffer to image planes in pFrameRGB
+ avpicture_fill((AVPicture *)ci->pFrameRGB, ci->buffer, PIX_FMT_RGBA32,
+ ci->pCodecCtx->width, ci->pCodecCtx->height);
+ }
+ // TODO loop, pingpong etc?
+ if (0 == cleanup)
+ {
+// av_log(NULL, AV_LOG_DEBUG, "get_watermark_picture() Get a frame\n");
+ while(av_read_frame(ci->pFormatCtx, &ci->packet)>=0)
+ {
+ // Is this a packet from the video stream?
+ if(ci->packet.stream_index == ci->videoStream)
+ {
+ // Decode video frame
+ avcodec_decode_video(ci->pCodecCtx, ci->pFrame, &ci->frameFinished,
+ ci->packet.data, ci->packet.size);
+
+ // Did we get a video frame?
+ if(ci->frameFinished)
+ {
+ // Convert the image from its native format to RGBA32
+ ci->watermark_convert_ctx =
+ sws_getCachedContext(ci->watermark_convert_ctx,
+ ci->pCodecCtx->width, ci->pCodecCtx->height, ci->pCodecCtx->pix_fmt,
+ ci->pCodecCtx->width, ci->pCodecCtx->height, PIX_FMT_RGBA32,
+ sws_flags, NULL, NULL, NULL);
+ if (ci->watermark_convert_ctx == NULL) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot initialize the watermark conversion context\n");
+ exit(1);
+ }
+// img_convert parameters are 2 first destination, then 4 source
+// sws_scale parameters are context, 4 first source, then 2 destination
+ sws_scale(ci->watermark_convert_ctx,
+ ci->pFrame->data, ci->pFrame->linesize, 0, ci->pCodecCtx->height,
+ ci->pFrameRGB->data, ci->pFrameRGB->linesize);
+
+ // Process the video frame (save to disk etc.)
+ //fprintf(stderr,"banan() New frame!\n");
+ //DoSomethingWithTheImage(ci->pFrameRGB);
+ return 0;
+ }
+ }
+
+ // Free the packet that was allocated by av_read_frame
+ av_free_packet(&ci->packet);
+ }
+ ci->is_done = 1;
+ return 0;
+ } // if 0 != cleanup
+
+ if (0 != cleanup)
+ {
+ // Free the RGB image
+ av_freep(&ci->buffer);
+ av_freep(&ci->pFrameRGB);
+
+ // Close the codec
+ if (0 != ci->pCodecCtx) {
+ avcodec_close(ci->pCodecCtx);
+ ci->pCodecCtx = 0;
+ }
+
+ // Close the video file
+ if (0 != ci->pFormatCtx) {
+ av_close_input_file(ci->pFormatCtx);
+ ci->pFormatCtx = 0;
+ }
+
+ ci->is_done = 0;
+ }
+ return 0;
+}
+
+
+void parse_arg_file(const char *filename)
+{
+}
diff --git a/contrib/ffmpeg/xvmc_render.h b/contrib/ffmpeg/xvmc_render.h
new file mode 100644
index 000000000..82dfeb0b8
--- /dev/null
+++ b/contrib/ffmpeg/xvmc_render.h
@@ -0,0 +1,47 @@
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+#include <X11/Xatom.h>
+#include <X11/extensions/Xv.h>
+#include <X11/extensions/Xvlib.h>
+#include <X11/extensions/XvMClib.h>
+
+
+//the surface should be shown, video driver manipulate this
+#define MP_XVMC_STATE_DISPLAY_PENDING 1
+//the surface is needed for prediction, codec manipulate this
+#define MP_XVMC_STATE_PREDICTION 2
+// 1337 IDCT MCo
+#define MP_XVMC_RENDER_MAGIC 0x1DC711C0
+
+typedef struct{
+//these are not changed by decoder!
+ int magic;
+
+ short * data_blocks;
+ XvMCMacroBlock * mv_blocks;
+ int total_number_of_mv_blocks;
+ int total_number_of_data_blocks;
+ int mc_type;//XVMC_MPEG1/2/4,XVMC_H263 without XVMC_IDCT
+ int idct;//does we use IDCT acceleration?
+ int chroma_format;//420,422,444
+ int unsigned_intra;//+-128 for intra pictures after clip
+ int reserved1[13];//future extenstions (e.g. gmc,qpel)
+ XvMCSurface* p_surface;//pointer to rendered surface, never changed
+
+//these are changed by decoder
+//used by XvMCRenderSurface function
+ XvMCSurface* p_past_surface;//pointer to the past surface
+ XvMCSurface* p_future_surface;//pointer to the future prediction surface
+
+ unsigned int picture_structure;//top/bottom fields or frame !
+ unsigned int flags;//XVMC_SECOND_FIELD - 1'st or 2'd field in the sequence
+ unsigned int display_flags; //1,2 or 1+2 fields for XvMCPutSurface,
+
+//these are internal communication one
+ int state;//0-free,1 Waiting to Display,2 Waiting for prediction
+ int start_mv_blocks_num;//offset in the array for the current slice,updated by vo
+ int filled_mv_blocks_num;//processed mv block in this slice,change by decoder
+
+ int next_free_data_block_num;//used in add_mv_block, pointer to next free block
+
+} xvmc_render_state_t;
diff --git a/src/libffmpeg/Makefile.am b/src/libffmpeg/Makefile.am
index bcae2c505..cf925bd3b 100644
--- a/src/libffmpeg/Makefile.am
+++ b/src/libffmpeg/Makefile.am
@@ -4,12 +4,22 @@ if HAVE_FFMPEG
AM_CFLAGS = $(FFMPEG_CFLAGS) $(FFMPEG_POSTPROC_CFLAGS)
link_ffmpeg = $(FFMPEG_LIBS) $(FFMPEG_POSTPROC_LIBS)
else
-ff_cppflags = -I$(top_srcdir)/src/libffmpeg/libavutil
+ff_cppflags = -I$(top_srcdir)/contrib/ffmpeg/libavutil \
+ -I$(top_srcdir)/contrib/ffmpeg/libavcodec \
+ -I$(top_srcdir)/contrib/ffmpeg/libpostproc
link_ffmpeg = \
- $(top_builddir)/src/libffmpeg/libavcodec/libavcodec.la \
- $(top_builddir)/src/libffmpeg/libavutil/libavutil.la \
- $(top_builddir)/src/libffmpeg/libavcodec/libpostproc/libpostprocess.la
-SUBDIRS = libavcodec libavutil
+ $(top_builddir)/contrib/ffmpeg/libavcodec/libavcodec.a \
+ $(top_builddir)/contrib/ffmpeg/libavutil/libavutil.a \
+ $(top_builddir)/contrib/ffmpeg/libpostproc/libpostproc.a
+
+$(top_builddir)/contrib/ffmpeg/libavcodec/libavcodec.a:
+ $(MAKE) -C $(top_builddir)/contrib/ffmpeg/ -f makefile.xine libavcodec/libavcodec.a
+
+$(top_builddir)/contrib/ffmpeg/libavutil/libavutil.a:
+ $(MAKE) -C $(top_builddir)/contrib/ffmpeg/ -f makefile.xine libavutil/libavutil.a
+
+$(top_builddir)/contrib/ffmpeg/libpostproc/libpostproc.a:
+ $(MAKE) -C $(top_builddir)/contrib/ffmpeg/ -f makefile.xine libpostproc/libpostproc.a
endif
# this must always be included, even if the current machine has no DXR3...
diff --git a/src/libffmpeg/diff_to_ffmpeg_cvs.txt b/src/libffmpeg/diff_to_ffmpeg_cvs.txt
deleted file mode 100644
index 617712ef3..000000000
--- a/src/libffmpeg/diff_to_ffmpeg_cvs.txt
+++ /dev/null
@@ -1,527 +0,0 @@
---- /home/melanson/projects/ffmpeg/libavcodec/avcodec.h 2006-08-01 20:03:33.000000000 -0700
-+++ libavcodec/avcodec.h 2006-08-01 20:55:16.000000000 -0700
-@@ -27,6 +27,13 @@
- #define AV_TIME_BASE 1000000
- #define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}
-
-+/* FIXME: We cannot use ffmpeg's XvMC capabilities, since that would require
-+ * linking the ffmpeg plugin against XvMC libraries, which is a bad thing,
-+ * since they are output dependend.
-+ * The correct fix would be to reimplement the XvMC functions libavcodec uses
-+ * and do the necessary talking with our XvMC output plugin there. */
-+#undef HAVE_XVMC
-+
- enum CodecID {
- CODEC_ID_NONE,
- CODEC_ID_MPEG1VIDEO,
-@@ -2629,6 +2636,13 @@
-
- extern unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
-
-+/* unused static macro */
-+#if defined(__GNUC__) && !defined(DEBUG)
-+/* since we do not compile the encoder part of ffmpeg, some static
-+ * functions will be unused; this is ok, the compiler will take care */
-+# define static static __attribute__((__unused__))
-+#endif
-+
- #ifdef __cplusplus
- }
- #endif
-
---- /home/melanson/projects/ffmpeg/libavcodec/dsputil.h 2006-08-01 20:03:33.000000000 -0700
-+++ libavcodec/dsputil.h 2006-08-01 20:55:16.000000000 -0700
-@@ -31,6 +31,9 @@
- #include "common.h"
- #include "avcodec.h"
-
-+#if defined(ARCH_X86) || defined(ARCH_X86_64)
-+#define HAVE_MMX 1
-+#endif
-
- //#define DEBUG
- /* dct code */
-
---- /home/melanson/projects/ffmpeg/libavcodec/motion_est.c 2006-07-02 20:16:54.000000000 -0700
-+++ libavcodec/motion_est.c 2006-08-01 21:18:45.000000000 -0700
-@@ -21,6 +21,9 @@
- * new Motion Estimation (X1/EPZS) by Michael Niedermayer <michaelni@gmx.at>
- */
-
-+/* motion estimation only needed for encoders */
-+#ifdef CONFIG_ENCODERS
-+
- /**
- * @file motion_est.c
- * Motion estimation.
-@@ -2113,3 +2116,5 @@
- }
- }
- }
-+
-+#endif /* CONFIG_ENCODERS */
-
---- /home/melanson/projects/ffmpeg/libavcodec/mpeg12.c 2006-07-02 20:16:54.000000000 -0700
-+++ libavcodec/mpeg12.c 2006-08-01 21:18:45.000000000 -0700
-@@ -34,6 +34,13 @@
- //#include <assert.h>
-
-
-+/* if xine's MPEG encoder is enabled, enable the encoding features in
-+ * this particular module */
-+#if defined(XINE_MPEG_ENCODER) && !defined(CONFIG_ENCODERS)
-+#define CONFIG_ENCODERS
-+#endif
-+
-+
- /* Start codes. */
- #define SEQ_END_CODE 0x000001b7
- #define SEQ_START_CODE 0x000001b3
-
---- /home/melanson/projects/ffmpeg/libavcodec/mpegvideo.c 2006-08-01 20:03:33.000000000 -0700
-+++ libavcodec/mpegvideo.c 2006-08-01 21:18:45.000000000 -0700
-@@ -38,6 +38,14 @@
- //#undef NDEBUG
- //#include <assert.h>
-
-+
-+/* if xine's MPEG encoder is enabled, enable the encoding features in
-+ * this particular module */
-+#if defined(XINE_MPEG_ENCODER) && !defined(CONFIG_ENCODERS)
-+#define CONFIG_ENCODERS
-+#endif
-+
-+
- #ifdef CONFIG_ENCODERS
- static void encode_picture(MpegEncContext *s, int picture_number);
- #endif //CONFIG_ENCODERS
-@@ -1165,6 +1173,8 @@
- s->low_delay= 0; //s->max_b_frames ? 0 : 1;
- avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
- break;
-+/* xine: this is never used in either decode or MPEG-1 encode mode */
-+#if 0
- case CODEC_ID_MPEG2VIDEO:
- s->out_format = FMT_MPEG1;
- s->low_delay= 0; //s->max_b_frames ? 0 : 1;
-@@ -1300,6 +1310,7 @@
- avctx->delay=0;
- s->low_delay=1;
- break;
-+#endif /* #if 0 */
- default:
- return -1;
- }
-@@ -1321,6 +1332,8 @@
- ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
- ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
-
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- #ifdef CONFIG_H261_ENCODER
- if (s->out_format == FMT_H261)
- ff_h261_encode_init(s);
-@@ -1329,6 +1342,8 @@
- h263_encode_init(s);
- if(s->msmpeg4_version)
- ff_msmpeg4_encode_init(s);
-+#endif /* #if 0 */
-+/* xine: we DO want this for MPEG-1 encoding */
- if (s->out_format == FMT_MPEG1)
- ff_mpeg1_encode_init(s);
-
-@@ -1373,9 +1388,12 @@
-
- ff_rate_control_uninit(s);
-
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- MPV_common_end(s);
- if (s->out_format == FMT_MJPEG)
- mjpeg_close(s);
-+#endif /* #if 0 */
-
- av_freep(&avctx->extradata);
-
-@@ -2516,8 +2534,11 @@
-
- MPV_frame_end(s);
-
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- if (s->out_format == FMT_MJPEG)
- mjpeg_picture_trailer(s);
-+#endif /* #if 0 */
-
- if(s->flags&CODEC_FLAG_PASS1)
- ff_write_pass1_stats(s);
-@@ -4516,6 +4537,8 @@
- case CODEC_ID_MPEG1VIDEO:
- case CODEC_ID_MPEG2VIDEO:
- mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- case CODEC_ID_MPEG4:
- mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
- case CODEC_ID_MSMPEG4V2:
-@@ -4536,6 +4559,7 @@
- h263_encode_mb(s, s->block, motion_x, motion_y); break;
- case CODEC_ID_MJPEG:
- mjpeg_encode_mb(s, s->block); break;
-+#endif /* #if 0 */
- default:
- assert(0);
- }
-@@ -4757,6 +4781,8 @@
- +sse(s, s->new_picture.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
- }
-
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
- MpegEncContext *s= arg;
-
-@@ -4800,6 +4826,7 @@
- }
- return 0;
- }
-+#endif /* #if 0 */
-
- static int mb_var_thread(AVCodecContext *c, void *arg){
- MpegEncContext *s= arg;
-@@ -4824,6 +4851,8 @@
- }
-
- static void write_slice_end(MpegEncContext *s){
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- if(s->codec_id==CODEC_ID_MPEG4){
- if(s->partitioned_frame){
- ff_mpeg4_merge_partitions(s);
-@@ -4833,6 +4862,7 @@
- }else if(s->out_format == FMT_MJPEG){
- ff_mjpeg_stuffing(&s->pb);
- }
-+#endif /* #if 0 */
-
- align_put_bits(&s->pb);
- flush_put_bits(&s->pb);
-@@ -4886,10 +4916,13 @@
- case CODEC_ID_FLV1:
- s->gob_index = ff_h263_get_gob_height(s);
- break;
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- case CODEC_ID_MPEG4:
- if(s->partitioned_frame)
- ff_mpeg4_init_partitions(s);
- break;
-+#endif /* #if 0 */
- }
-
- s->resync_mb_x=0;
-@@ -4962,9 +4995,12 @@
- if(s->start_mb_y != mb_y || mb_x!=0){
- write_slice_end(s);
-
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame){
- ff_mpeg4_init_partitions(s);
- }
-+#endif /* #if 0 */
- }
-
- assert((put_bits_count(&s->pb)&7) == 0);
-@@ -4988,19 +5024,25 @@
- }
-
- switch(s->codec_id){
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- case CODEC_ID_MPEG4:
- ff_mpeg4_encode_video_packet_header(s);
- ff_mpeg4_clean_buffers(s);
- break;
-+#endif /* #if 0 */
- case CODEC_ID_MPEG1VIDEO:
- case CODEC_ID_MPEG2VIDEO:
- ff_mpeg1_encode_slice_header(s);
- ff_mpeg1_clean_buffers(s);
- break;
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- case CODEC_ID_H263:
- case CODEC_ID_H263P:
- h263_encode_gob_header(s, mb_y);
- break;
-+#endif /* #if 0 */
- }
-
- if(s->flags&CODEC_FLAG_PASS1){
-@@ -5113,7 +5155,10 @@
-
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
- s->mb_intra= 0;
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- ff_mpeg4_set_direct_mv(s, mx, my);
-+#endif /* #if 0 */
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
- &dmin, &next_block, mx, my);
- }
-@@ -5299,7 +5344,10 @@
- s->mb_intra= 0;
- motion_x=s->b_direct_mv_table[xy][0];
- motion_y=s->b_direct_mv_table[xy][1];
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
-+#endif /* #if 0 */
- break;
- case CANDIDATE_MB_TYPE_BIDIR:
- s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
-@@ -5407,8 +5455,11 @@
- }
-
- //not beautiful here but we must write it before flushing so it has to be here
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
- msmpeg4_encode_ext_header(s);
-+#endif /* #if 0 */
-
- write_slice_end(s);
-
-@@ -5469,6 +5520,8 @@
- s->current_picture.quality = ff_rate_estimate_qscale(s, dry_run);
-
- if(s->adaptive_quant){
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- switch(s->codec_id){
- case CODEC_ID_MPEG4:
- ff_clean_mpeg4_qscales(s);
-@@ -5479,6 +5532,7 @@
- ff_clean_h263_qscales(s);
- break;
- }
-+#endif /* #if 0 */
-
- s->lambda= s->lambda_table[0];
- //FIXME broken
-@@ -5499,10 +5553,13 @@
- s->me.mb_var_sum_temp =
- s->me.mc_mb_var_sum_temp = 0;
-
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- /* we need to initialize some time vars before we can encode b-frames */
- // RAL: Condition added for MPEG1VIDEO
- if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
- ff_set_mpeg4_time(s, s->picture_number); //FIXME rename and use has_b_frames or similar
-+#endif /* #if 0 */
-
- s->me.scene_change_score=0;
-
-@@ -5532,6 +5589,8 @@
- ff_update_duplicate_context(s->thread_context[i], s);
- }
-
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- ff_init_me(s);
-
- /* Estimate motion for every MB */
-@@ -5546,6 +5605,8 @@
-
- s->avctx->execute(s->avctx, estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
- }else /* if(s->pict_type == I_TYPE) */{
-+#endif /* #if 0 */
-+ {
- /* I-Frame */
- for(i=0; i<s->mb_stride*s->mb_height; i++)
- s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
-@@ -5569,6 +5630,8 @@
- //printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
- }
-
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- if(!s->umvplus){
- if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
- s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
-@@ -5622,6 +5685,7 @@
- }
- }
- }
-+#endif /* #if 0 */
-
- estimate_qp(s, 0);
-
-@@ -5652,6 +5716,8 @@
-
- s->last_bits= put_bits_count(&s->pb);
- switch(s->out_format) {
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- case FMT_MJPEG:
- mjpeg_picture_header(s);
- break;
-@@ -5680,11 +5746,15 @@
- else
- h263_encode_picture_header(s, picture_number);
- break;
-+#endif /* #if 0 */
- case FMT_MPEG1:
- mpeg1_encode_picture_header(s, picture_number);
- break;
-+/* xine: do not need this for decode or MPEG-1 encoding modes */
-+#if 0
- case FMT_H264:
- break;
-+#endif /* #if 0 */
- default:
- assert(0);
- }
-
---- /home/melanson/projects/ffmpeg/libavcodec/snow.c 2006-07-02 20:16:54.000000000 -0700
-+++ libavcodec/snow.c 2006-08-01 21:18:45.000000000 -0700
-@@ -1975,6 +1975,7 @@
- #define P_MV1 P[9]
- #define FLAG_QPEL 1 //must be 1
-
-+#ifdef CONFIG_ENCODERS
- static int encode_q_branch(SnowContext *s, int level, int x, int y){
- uint8_t p_buffer[1024];
- uint8_t i_buffer[1024];
-@@ -2203,6 +2204,7 @@
- return score;
- }
- }
-+#endif
-
- static always_inline int same_block(BlockNode *a, BlockNode *b){
- if((a->type&BLOCK_INTRA) && (b->type&BLOCK_INTRA)){
-@@ -2317,6 +2319,7 @@
- }
- }
-
-+#ifdef CONFIG_ENCODERS
- static void encode_blocks(SnowContext *s){
- int x, y;
- int w= s->b_width;
-@@ -2338,6 +2341,7 @@
- }
- }
- }
-+#endif
-
- static void decode_blocks(SnowContext *s){
- int x, y;
-@@ -4030,6 +4034,7 @@
- }
- }
-
-+#ifdef CONFIG_ENCODERS
- static int encode_init(AVCodecContext *avctx)
- {
- SnowContext *s = avctx->priv_data;
-@@ -4117,6 +4122,7 @@
-
- return 0;
- }
-+#endif
-
- static int frame_start(SnowContext *s){
- AVFrame tmp;
-@@ -4155,6 +4161,7 @@
- return 0;
- }
-
-+#ifdef CONFIG_ENCODERS
- static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
- SnowContext *s = avctx->priv_data;
- RangeCoder * const c= &s->c;
-@@ -4382,6 +4389,7 @@
-
- return ff_rac_terminate(c);
- }
-+#endif
-
- static void common_end(SnowContext *s){
- int plane_index, level, orientation, i;
-@@ -4413,6 +4421,7 @@
- }
- }
-
-+#ifdef CONFIG_ENCODERS
- static int encode_end(AVCodecContext *avctx)
- {
- SnowContext *s = avctx->priv_data;
-@@ -4422,6 +4431,7 @@
-
- return 0;
- }
-+#endif
-
- static int decode_init(AVCodecContext *avctx)
- {
-
---- /home/melanson/projects/ffmpeg/libavutil/common.h 2006-08-01 20:03:31.000000000 -0700
-+++ libavutil/./common.h 2006-08-01 21:18:45.000000000 -0700
-@@ -358,4 +358,16 @@
- void *av_realloc(void *ptr, unsigned int size);
- void av_free(void *ptr);
-
-+/* xine: inline causes trouble for debug compiling */
-+#ifdef DISABLE_INLINE
-+# ifdef inline
-+# undef inline
-+# endif
-+# ifdef always_inline
-+# undef always_inline
-+# endif
-+# define inline
-+# define always_inline
-+#endif
-+
- #endif /* COMMON_H */
-Index: libavutil/integer.c
-===================================================================
-RCS file: /cvsroot/xine/xine-lib/src/libffmpeg/libavutil/integer.c,v
-retrieving revision 1.2
-diff -u -p -r1.2 integer.c
---- libavutil/integer.c 2 Aug 2006 07:12:57 -0000 1.2
-+++ libavutil/integer.c 13 Sep 2006 21:05:42 -0000
-@@ -124,8 +124,8 @@ AVInteger av_mod_i(AVInteger *quot, AVIn
- AVInteger quot_temp;
- if(!quot) quot = &quot_temp;
-
-- assert((int16_t)a[AV_INTEGER_SIZE-1] >= 0 && (int16_t)b[AV_INTEGER_SIZE-1] >= 0);
-- assert(av_log2(b)>=0);
-+ assert((int16_t)a.v[AV_INTEGER_SIZE-1] >= 0 && (int16_t)b.v[AV_INTEGER_SIZE-1] >= 0);
-+ assert(av_log2_i(b)>=0);
-
- if(i > 0)
- b= av_shr_i(b, -i);
-Index: libavutil/internal.h
-===================================================================
-RCS file: /cvsroot/xine/xine-lib/src/libffmpeg/libavutil/internal.h,v
-retrieving revision 1.1
-diff -u -p -r1.1 internal.h
---- libavutil/internal.h 2 Aug 2006 07:39:20 -0000 1.1
-+++ libavutil/internal.h 13 Sep 2006 21:05:42 -0000
-@@ -87,7 +87,11 @@
-
- /* dprintf macros */
- # ifdef DEBUG
--# define dprintf(fmt,...) av_log(NULL, AV_LOG_DEBUG, fmt, __VA_ARGS__)
-+# ifdef __GNUC__
-+# define dprintf(fmt,args...) av_log(NULL, AV_LOG_DEBUG, fmt, ##args)
-+# else
-+# define dprintf(fmt,...) av_log(NULL, AV_LOG_DEBUG, fmt, __VA_ARGS__)
-+# endif
- # else
- # define dprintf(fmt,...)
- # endif
diff --git a/src/libffmpeg/dvaudio_decoder.c b/src/libffmpeg/dvaudio_decoder.c
index 663335aaa..71e58417b 100644
--- a/src/libffmpeg/dvaudio_decoder.c
+++ b/src/libffmpeg/dvaudio_decoder.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: dvaudio_decoder.c,v 1.11 2006/07/10 22:08:29 dgp85 Exp $
+ * $Id: dvaudio_decoder.c,v 1.11.2.1 2006/12/02 01:20:07 dgp85 Exp $
*
* dv audio decoder based on patch by Dan Dennedy <dan@dennedy.org>
*
@@ -55,13 +55,8 @@
# undef uint64_t
#endif
-#ifdef HAVE_FFMPEG
-# include <avcodec.h>
-# include "libavcodec/dvdata.h"
-#else
-# include "libavcodec/avcodec.h"
-# include "libavcodec/dvdata.h"
-#endif
+#include <avcodec.h>
+#include <dvdata.h> /* This is not installed by FFmpeg, its usage has to be cleared up */
#ifdef _MSC_VER
# undef malloc
@@ -96,23 +91,6 @@ typedef struct dvaudio_decoder_s {
} dvaudio_decoder_t;
-
-enum dv_pack_type {
- dv_header525 = 0x3f, /* see dv_write_pack for important details on */
- dv_header625 = 0xbf, /* these two packs */
- dv_timecode = 0x13,
- dv_audio_source = 0x50,
- dv_audio_control = 0x51,
- dv_audio_recdate = 0x52,
- dv_audio_rectime = 0x53,
- dv_video_source = 0x60,
- dv_video_control = 0x61,
- dv_viedo_recdate = 0x62,
- dv_video_rectime = 0x63,
- dv_unknown_pack = 0xff,
-};
-
-
/*
* This is the dumbest implementation of all -- it simply looks at
* a fixed offset and if pack isn't there -- fails. We might want
diff --git a/src/libffmpeg/libavcodec/.cvsignore b/src/libffmpeg/libavcodec/.cvsignore
deleted file mode 100644
index 7d926a554..000000000
--- a/src/libffmpeg/libavcodec/.cvsignore
+++ /dev/null
@@ -1,6 +0,0 @@
-Makefile
-Makefile.in
-.libs
-.deps
-*.lo
-*.la
diff --git a/src/libffmpeg/libavcodec/Makefile.am b/src/libffmpeg/libavcodec/Makefile.am
deleted file mode 100644
index bf98dd7ca..000000000
--- a/src/libffmpeg/libavcodec/Makefile.am
+++ /dev/null
@@ -1,172 +0,0 @@
-include $(top_srcdir)/misc/Makefile.common
-
-SUBDIRS = armv4l i386 mlib alpha ppc sparc libpostproc
-
-# some of ffmpeg's decoders are not used by xine yet
-EXTRA_DIST = motion_est_template.c \
- adx.c cljr.c fdctref.c ffv1.c g726.c jpeg_ls.c mdec.c raw.c snow.c svq3.c wmv2.c
-
-# we need to compile everything in debug mode, including the encoders,
-# otherwise we get unresolved symbols, because some unsatisfied function calls
-# are not optimized away with debug optimization
-AM_CFLAGS = `test "$(CFLAGS)" = "$(DEBUG_CFLAGS)" && echo -DCONFIG_ENCODERS` -fno-strict-aliasing -DCONFIG_VC1_DECODER
-AM_CPPFLAGS = $(ZLIB_CPPFLAGS) $(LIBFFMPEG_CPPFLAGS) \
- -I$(top_srcdir)/src/libffmpeg/libavutil
-ASFLAGS =
-
-noinst_LTLIBRARIES = libavcodec.la
-
-libavcodec_la_SOURCES = \
- 4xm.c \
- 8bps.c \
- aasc.c \
- adpcm.c \
- alac.c \
- asv1.c \
- avs.c \
- bitstream.c \
- cabac.c \
- cavs.c \
- cinepak.c \
- cook.c \
- cscd.c \
- cyuv.c \
- dpcm.c \
- dsputil.c \
- dv.c \
- error_resilience.c \
- eval.c \
- faandct.c \
- flac.c \
- flashsv.c \
- flicvideo.c \
- fraps.c \
- fft.c \
- golomb.c \
- h261.c \
- h263.c \
- h263dec.c \
- h264.c \
- h264idct.c \
- huffyuv.c \
- idcinvideo.c \
- imgconvert.c \
- imgresample.c \
- indeo2.c \
- indeo3.c \
- interplayvideo.c \
- jfdctfst.c \
- jfdctint.c \
- jrevdct.c \
- kmvc.c \
- lcl.c \
- loco.c \
- lzo.c \
- mdct.c \
- mace.c \
- mem.c \
- mjpeg.c \
- mmvideo.c \
- motion_est.c \
- mpeg12.c \
- mpegaudiodec.c \
- mpegvideo.c \
- msmpeg4.c \
- msrle.c \
- msvideo1.c \
- nuv.c \
- parser.c \
- pcm.c \
- qdm2.c \
- qdrw.c \
- qpeg.c \
- qtrle.c \
- ra144.c \
- ra288.c \
- rangecoder.c \
- ratecontrol.c \
- resample2.c \
- roqvideo.c \
- rpza.c \
- rtjpeg.c \
- rv10.c \
- shorten.c \
- simple_idct.c \
- smacker.c \
- smc.c \
- svq1.c \
- tscc.c \
- truemotion1.c \
- truemotion2.c \
- truespeech.c \
- tta.c \
- ulti.c \
- utils.c \
- vc1.c \
- vc1dsp.c \
- vcr1.c \
- vmdav.c \
- vorbis.c \
- vp3.c \
- vp3dsp.c \
- vqavideo.c \
- wmadec.c \
- wnv1.c \
- xan.c \
- xl.c \
- zmbv.c
-
-libavcodec_la_LDFLAGS = \
- $(top_builddir)/src/libffmpeg/libavcodec/armv4l/libavcodec_armv4l.la \
- $(top_builddir)/src/libffmpeg/libavcodec/i386/libavcodec_mmx.la \
- $(top_builddir)/src/libffmpeg/libavcodec/mlib/libavcodec_mlib.la \
- $(top_builddir)/src/libffmpeg/libavcodec/ppc/libavcodec_ppc.la \
- $(top_builddir)/src/libffmpeg/libavcodec/sparc/libavcodec_sparc.la \
- -avoid-version -module
-
-
-noinst_HEADERS = \
- avcodec.h \
- bitstream.h \
- cabac.h \
- cavsdata.h \
- cookdata.h \
- dsputil.h \
- dvdata.h \
- faandct.h \
- fastmemcpy.h \
- golomb.h \
- imgconvert_template.h \
- indeo2data.h \
- indeo3data.h \
- h261data.h \
- h263data.h \
- h264data.h \
- lzo.h \
- mpeg4data.h \
- mpeg12data.h \
- mpegaudio.h \
- mpegaudiodectab.h \
- mpegaudiotab.h \
- mpegvideo.h \
- msmpeg4data.h \
- opt.h \
- qdm2data.h \
- ra144.h \
- ra288.h \
- rangecoder.h \
- rtjpeg.h \
- simple_idct.h \
- snow.h \
- sp5x.h \
- svq1_cb.h \
- svq1_vlc.h \
- swscale.h \
- truemotion1data.h \
- truespeech_data.h \
- ulti_cb.h \
- vorbis.h \
- vc1acdata.h \
- vc1data.h \
- vp3data.h \
- wmadata.h
diff --git a/src/libffmpeg/libavcodec/alpha/.cvsignore b/src/libffmpeg/libavcodec/alpha/.cvsignore
deleted file mode 100644
index 7d926a554..000000000
--- a/src/libffmpeg/libavcodec/alpha/.cvsignore
+++ /dev/null
@@ -1,6 +0,0 @@
-Makefile
-Makefile.in
-.libs
-.deps
-*.lo
-*.la
diff --git a/src/libffmpeg/libavcodec/alpha/Makefile.am b/src/libffmpeg/libavcodec/alpha/Makefile.am
deleted file mode 100644
index c69106ad3..000000000
--- a/src/libffmpeg/libavcodec/alpha/Makefile.am
+++ /dev/null
@@ -1,13 +0,0 @@
-include $(top_srcdir)/misc/Makefile.common
-
-AM_CPPFLAGS = $(LIBFFMPEG_CPPFLAGS)
-AM_CFLAGS = -fno-strict-aliasing
-
-EXTRA_DIST = asm.h \
- dsputil_alpha.c \
- mpegvideo_alpha.c \
- motion_est_alpha.c \
- motion_est_mvi_asm.S \
- regdef.h \
- dsputil_alpha_asm.S \
- simple_idct_alpha.c
diff --git a/src/libffmpeg/libavcodec/armv4l/.cvsignore b/src/libffmpeg/libavcodec/armv4l/.cvsignore
deleted file mode 100644
index 7d926a554..000000000
--- a/src/libffmpeg/libavcodec/armv4l/.cvsignore
+++ /dev/null
@@ -1,6 +0,0 @@
-Makefile
-Makefile.in
-.libs
-.deps
-*.lo
-*.la
diff --git a/src/libffmpeg/libavcodec/armv4l/Makefile.am b/src/libffmpeg/libavcodec/armv4l/Makefile.am
deleted file mode 100644
index 0f3d230f6..000000000
--- a/src/libffmpeg/libavcodec/armv4l/Makefile.am
+++ /dev/null
@@ -1,18 +0,0 @@
-include $(top_srcdir)/misc/Makefile.common
-
-AM_CFLAGS = -O2 -fno-strict-aliasing
-AM_CPPFLAGS = $(LIBFFMPEG_CPPFLAGS)
-ASFLAGS =
-
-noinst_LTLIBRARIES = libavcodec_armv4l.la
-
-libavcodec_armv4l_src = dsputil_arm.c jrevdct_arm.S mpegvideo_arm.c simple_idct_arm.S
-libavcodec_armv4l_dummy = libavcodec_armv4l_dummy.c
-EXTRA_DIST = $(libavcodec_armv4l_src) $(libavcodec_armv4l_dummy)
-
-#if HAVE_ARMV4L
-#armv4l_modules = $(libavcodec_armv4l_src)
-#endif
-armv4l_modules =
-
-libavcodec_armv4l_la_SOURCES = $(armv4l_modules) $(libavcodec_armv4l_dummy)
diff --git a/src/libffmpeg/libavcodec/armv4l/libavcodec_armv4l_dummy.c b/src/libffmpeg/libavcodec/armv4l/libavcodec_armv4l_dummy.c
deleted file mode 100644
index ebe4c235c..000000000
--- a/src/libffmpeg/libavcodec/armv4l/libavcodec_armv4l_dummy.c
+++ /dev/null
@@ -1,2 +0,0 @@
-
-char libavcodec_armv4l_dummy;
diff --git a/src/libffmpeg/libavcodec/cabac.h b/src/libffmpeg/libavcodec/cabac.h
deleted file mode 100644
index e79774157..000000000
--- a/src/libffmpeg/libavcodec/cabac.h
+++ /dev/null
@@ -1,429 +0,0 @@
-/*
- * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
- * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-
-/**
- * @file cabac.h
- * Context Adaptive Binary Arithmetic Coder.
- */
-
-
-//#undef NDEBUG
-#include <assert.h>
-
-#define CABAC_BITS 8
-#define CABAC_MASK ((1<<CABAC_BITS)-1)
-
-typedef struct CABACContext{
- int low;
- int range;
- int outstanding_count;
-#ifdef STRICT_LIMITS
- int symCount;
-#endif
- uint8_t lps_range[2*65][4]; ///< rangeTabLPS
- uint8_t lps_state[2*64]; ///< transIdxLPS
- uint8_t mps_state[2*64]; ///< transIdxMPS
- const uint8_t *bytestream_start;
- const uint8_t *bytestream;
- const uint8_t *bytestream_end;
- PutBitContext pb;
-}CABACContext;
-
-extern const uint8_t ff_h264_lps_range[64][4];
-extern const uint8_t ff_h264_mps_state[64];
-extern const uint8_t ff_h264_lps_state[64];
-extern const uint8_t ff_h264_norm_shift[256];
-
-
-void ff_init_cabac_encoder(CABACContext *c, uint8_t *buf, int buf_size);
-void ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size);
-void ff_init_cabac_states(CABACContext *c, uint8_t const (*lps_range)[4],
- uint8_t const *mps_state, uint8_t const *lps_state, int state_count);
-
-
-static inline void put_cabac_bit(CABACContext *c, int b){
- put_bits(&c->pb, 1, b);
- for(;c->outstanding_count; c->outstanding_count--){
- put_bits(&c->pb, 1, 1-b);
- }
-}
-
-static inline void renorm_cabac_encoder(CABACContext *c){
- while(c->range < 0x100){
- //FIXME optimize
- if(c->low<0x100){
- put_cabac_bit(c, 0);
- }else if(c->low<0x200){
- c->outstanding_count++;
- c->low -= 0x100;
- }else{
- put_cabac_bit(c, 1);
- c->low -= 0x200;
- }
-
- c->range+= c->range;
- c->low += c->low;
- }
-}
-
-static inline void put_cabac(CABACContext *c, uint8_t * const state, int bit){
- int RangeLPS= c->lps_range[*state][c->range>>6];
-
- if(bit == ((*state)&1)){
- c->range -= RangeLPS;
- *state= c->mps_state[*state];
- }else{
- c->low += c->range - RangeLPS;
- c->range = RangeLPS;
- *state= c->lps_state[*state];
- }
-
- renorm_cabac_encoder(c);
-
-#ifdef STRICT_LIMITS
- c->symCount++;
-#endif
-}
-
-static inline void put_cabac_static(CABACContext *c, int RangeLPS, int bit){
- assert(c->range > RangeLPS);
-
- if(!bit){
- c->range -= RangeLPS;
- }else{
- c->low += c->range - RangeLPS;
- c->range = RangeLPS;
- }
-
- renorm_cabac_encoder(c);
-
-#ifdef STRICT_LIMITS
- c->symCount++;
-#endif
-}
-
-/**
- * @param bit 0 -> write zero bit, !=0 write one bit
- */
-static inline void put_cabac_bypass(CABACContext *c, int bit){
- c->low += c->low;
-
- if(bit){
- c->low += c->range;
- }
-//FIXME optimize
- if(c->low<0x200){
- put_cabac_bit(c, 0);
- }else if(c->low<0x400){
- c->outstanding_count++;
- c->low -= 0x200;
- }else{
- put_cabac_bit(c, 1);
- c->low -= 0x400;
- }
-
-#ifdef STRICT_LIMITS
- c->symCount++;
-#endif
-}
-
-/**
- *
- * @return the number of bytes written
- */
-static inline int put_cabac_terminate(CABACContext *c, int bit){
- c->range -= 2;
-
- if(!bit){
- renorm_cabac_encoder(c);
- }else{
- c->low += c->range;
- c->range= 2;
-
- renorm_cabac_encoder(c);
-
- assert(c->low <= 0x1FF);
- put_cabac_bit(c, c->low>>9);
- put_bits(&c->pb, 2, ((c->low>>7)&3)|1);
-
- flush_put_bits(&c->pb); //FIXME FIXME FIXME XXX wrong
- }
-
-#ifdef STRICT_LIMITS
- c->symCount++;
-#endif
-
- return (put_bits_count(&c->pb)+7)>>3;
-}
-
-/**
- * put (truncated) unary binarization.
- */
-static inline void put_cabac_u(CABACContext *c, uint8_t * state, int v, int max, int max_index, int truncated){
- int i;
-
- assert(v <= max);
-
-#if 1
- for(i=0; i<v; i++){
- put_cabac(c, state, 1);
- if(i < max_index) state++;
- }
- if(truncated==0 || v<max)
- put_cabac(c, state, 0);
-#else
- if(v <= max_index){
- for(i=0; i<v; i++){
- put_cabac(c, state+i, 1);
- }
- if(truncated==0 || v<max)
- put_cabac(c, state+i, 0);
- }else{
- for(i=0; i<=max_index; i++){
- put_cabac(c, state+i, 1);
- }
- for(; i<v; i++){
- put_cabac(c, state+max_index, 1);
- }
- if(truncated==0 || v<max)
- put_cabac(c, state+max_index, 0);
- }
-#endif
-}
-
-/**
- * put unary exp golomb k-th order binarization.
- */
-static inline void put_cabac_ueg(CABACContext *c, uint8_t * state, int v, int max, int is_signed, int k, int max_index){
- int i;
-
- if(v==0)
- put_cabac(c, state, 0);
- else{
- const int sign= v < 0;
-
- if(is_signed) v= ABS(v);
-
- if(v<max){
- for(i=0; i<v; i++){
- put_cabac(c, state, 1);
- if(i < max_index) state++;
- }
-
- put_cabac(c, state, 0);
- }else{
- int m= 1<<k;
-
- for(i=0; i<max; i++){
- put_cabac(c, state, 1);
- if(i < max_index) state++;
- }
-
- v -= max;
- while(v >= m){ //FIXME optimize
- put_cabac_bypass(c, 1);
- v-= m;
- m+= m;
- }
- put_cabac_bypass(c, 0);
- while(m>>=1){
- put_cabac_bypass(c, v&m);
- }
- }
-
- if(is_signed)
- put_cabac_bypass(c, sign);
- }
-}
-
-static void refill(CABACContext *c){
- if(c->bytestream <= c->bytestream_end)
-#if CABAC_BITS == 16
- c->low+= ((c->bytestream[0]<<9) + (c->bytestream[1])<<1);
-#else
- c->low+= c->bytestream[0]<<1;
-#endif
- c->low -= CABAC_MASK;
- c->bytestream+= CABAC_BITS/8;
-}
-
-#if 0 /* all use commented */
-static void refill2(CABACContext *c){
- int i, x;
-
- x= c->low ^ (c->low-1);
- i= 8 - ff_h264_norm_shift[x>>(CABAC_BITS+1)];
-
- x= -CABAC_MASK;
-
- if(c->bytestream < c->bytestream_end)
-#if CABAC_BITS == 16
- x+= (c->bytestream[0]<<9) + (c->bytestream[1]<<1);
-#else
- x+= c->bytestream[0]<<1;
-#endif
-
- c->low += x<<i;
- c->bytestream+= CABAC_BITS/8;
-}
-#endif
-
-static inline void renorm_cabac_decoder(CABACContext *c){
- while(c->range < (0x200 << CABAC_BITS)){
- c->range+= c->range;
- c->low+= c->low;
- if(!(c->low & CABAC_MASK))
- refill(c);
- }
-}
-
-static inline void renorm_cabac_decoder_once(CABACContext *c){
- int mask= (c->range - (0x200 << CABAC_BITS))>>31;
- c->range+= c->range&mask;
- c->low += c->low &mask;
- if(!(c->low & CABAC_MASK))
- refill(c);
-}
-
-static inline int get_cabac(CABACContext *c, uint8_t * const state){
- int RangeLPS= c->lps_range[*state][c->range>>(CABAC_BITS+7)]<<(CABAC_BITS+1);
- int bit, lps_mask attribute_unused;
-
- c->range -= RangeLPS;
-#if 1
- if(c->low < c->range){
- bit= (*state)&1;
- *state= c->mps_state[*state];
- renorm_cabac_decoder_once(c);
- }else{
-// int shift= ff_h264_norm_shift[RangeLPS>>17];
- bit= ((*state)&1)^1;
- c->low -= c->range;
- *state= c->lps_state[*state];
- c->range = RangeLPS;
- renorm_cabac_decoder(c);
-/* c->range = RangeLPS<<shift;
- c->low <<= shift;
- if(!(c->low & 0xFFFF)){
- refill2(c);
- }*/
- }
-#else
- lps_mask= (c->range - c->low)>>31;
-
- c->low -= c->range & lps_mask;
- c->range += (RangeLPS - c->range) & lps_mask;
-
- bit= ((*state)^lps_mask)&1;
- *state= c->mps_state[(*state) - (128&lps_mask)];
-
- lps_mask= ff_h264_norm_shift[c->range>>(CABAC_BITS+2)];
- c->range<<= lps_mask;
- c->low <<= lps_mask;
- if(!(c->low & CABAC_MASK))
- refill2(c);
-#endif
-
- return bit;
-}
-
-static inline int get_cabac_bypass(CABACContext *c){
- c->low += c->low;
-
- if(!(c->low & CABAC_MASK))
- refill(c);
-
- if(c->low < c->range){
- return 0;
- }else{
- c->low -= c->range;
- return 1;
- }
-}
-
-/**
- *
- * @return the number of bytes read or 0 if no end
- */
-static inline int get_cabac_terminate(CABACContext *c){
- c->range -= 4<<CABAC_BITS;
- if(c->low < c->range){
- renorm_cabac_decoder_once(c);
- return 0;
- }else{
- return c->bytestream - c->bytestream_start;
- }
-}
-
-/**
- * get (truncated) unnary binarization.
- */
-static inline int get_cabac_u(CABACContext *c, uint8_t * state, int max, int max_index, int truncated){
- int i;
-
- for(i=0; i<max; i++){
- if(get_cabac(c, state)==0)
- return i;
-
- if(i< max_index) state++;
- }
-
- return truncated ? max : -1;
-}
-
-/**
- * get unary exp golomb k-th order binarization.
- */
-static inline int get_cabac_ueg(CABACContext *c, uint8_t * state, int max, int is_signed, int k, int max_index){
- int i, v;
- int m= 1<<k;
-
- if(get_cabac(c, state)==0)
- return 0;
-
- if(0 < max_index) state++;
-
- for(i=1; i<max; i++){
- if(get_cabac(c, state)==0){
- if(is_signed && get_cabac_bypass(c)){
- return -i;
- }else
- return i;
- }
-
- if(i < max_index) state++;
- }
-
- while(get_cabac_bypass(c)){
- i+= m;
- m+= m;
- }
-
- v=0;
- while(m>>=1){
- v+= v + get_cabac_bypass(c);
- }
- i += v;
-
- if(is_signed && get_cabac_bypass(c)){
- return -i;
- }else
- return i;
-}
diff --git a/src/libffmpeg/libavcodec/eval.c b/src/libffmpeg/libavcodec/eval.c
deleted file mode 100644
index 5b0e51d62..000000000
--- a/src/libffmpeg/libavcodec/eval.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * simple arithmetic expression evaluator
- *
- * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-
-/**
- * @file eval.c
- * simple arithmetic expression evaluator.
- *
- * see http://joe.hotchkiss.com/programming/eval/eval.html
- */
-
-#include "avcodec.h"
-#include "mpegvideo.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <math.h>
-
-#ifndef NAN
- #define NAN 0
-#endif
-
-#ifndef M_PI
-#define M_PI 3.14159265358979323846
-#endif
-
-typedef struct Parser{
- int stack_index;
- char *s;
- double *const_value;
- const char **const_name; // NULL terminated
- double (**func1)(void *, double a); // NULL terminated
- const char **func1_name; // NULL terminated
- double (**func2)(void *, double a, double b); // NULL terminated
- char **func2_name; // NULL terminated
- void *opaque;
-} Parser;
-
-static double evalExpression(Parser *p);
-
-static int strmatch(const char *s, const char *prefix){
- int i;
- for(i=0; prefix[i]; i++){
- if(prefix[i] != s[i]) return 0;
- }
- return 1;
-}
-
-static double evalPrimary(Parser *p){
- double d, d2=NAN;
- char *next= p->s;
- int i;
-
- /* number */
- d= strtod(p->s, &next);
- if(next != p->s){
- p->s= next;
- return d;
- }
-
- /* named constants */
- for(i=0; p->const_name && p->const_name[i]; i++){
- if(strmatch(p->s, p->const_name[i])){
- p->s+= strlen(p->const_name[i]);
- return p->const_value[i];
- }
- }
-
- p->s= strchr(p->s, '(');
- if(p->s==NULL){
- av_log(NULL, AV_LOG_ERROR, "Parser: missing ( in \"%s\"\n", next);
- return NAN;
- }
- p->s++; // "("
- d= evalExpression(p);
- if(p->s[0]== ','){
- p->s++; // ","
- d2= evalExpression(p);
- }
- if(p->s[0] != ')'){
- av_log(NULL, AV_LOG_ERROR, "Parser: missing ) in \"%s\"\n", next);
- return NAN;
- }
- p->s++; // ")"
-
- if( strmatch(next, "sinh" ) ) d= sinh(d);
- else if( strmatch(next, "cosh" ) ) d= cosh(d);
- else if( strmatch(next, "tanh" ) ) d= tanh(d);
- else if( strmatch(next, "sin" ) ) d= sin(d);
- else if( strmatch(next, "cos" ) ) d= cos(d);
- else if( strmatch(next, "tan" ) ) d= tan(d);
- else if( strmatch(next, "exp" ) ) d= exp(d);
- else if( strmatch(next, "log" ) ) d= log(d);
- else if( strmatch(next, "squish") ) d= 1/(1+exp(4*d));
- else if( strmatch(next, "gauss" ) ) d= exp(-d*d/2)/sqrt(2*M_PI);
- else if( strmatch(next, "abs" ) ) d= fabs(d);
- else if( strmatch(next, "max" ) ) d= d > d2 ? d : d2;
- else if( strmatch(next, "min" ) ) d= d < d2 ? d : d2;
- else if( strmatch(next, "gt" ) ) d= d > d2 ? 1.0 : 0.0;
- else if( strmatch(next, "gte" ) ) d= d >= d2 ? 1.0 : 0.0;
- else if( strmatch(next, "lt" ) ) d= d > d2 ? 0.0 : 1.0;
- else if( strmatch(next, "lte" ) ) d= d >= d2 ? 0.0 : 1.0;
- else if( strmatch(next, "eq" ) ) d= d == d2 ? 1.0 : 0.0;
- else if( strmatch(next, "(" ) ) d= d;
-// else if( strmatch(next, "l1" ) ) d= 1 + d2*(d - 1);
-// else if( strmatch(next, "sq01" ) ) d= (d >= 0.0 && d <=1.0) ? 1.0 : 0.0;
- else{
- for(i=0; p->func1_name && p->func1_name[i]; i++){
- if(strmatch(next, p->func1_name[i])){
- return p->func1[i](p->opaque, d);
- }
- }
-
- for(i=0; p->func2_name && p->func2_name[i]; i++){
- if(strmatch(next, p->func2_name[i])){
- return p->func2[i](p->opaque, d, d2);
- }
- }
-
- av_log(NULL, AV_LOG_ERROR, "Parser: unknown function in \"%s\"\n", next);
- return NAN;
- }
-
- return d;
-}
-
-static double evalPow(Parser *p){
- int sign= (*p->s == '+') - (*p->s == '-');
- p->s += sign&1;
- return (sign|1) * evalPrimary(p);
-}
-
-static double evalFactor(Parser *p){
- double ret= evalPow(p);
- while(p->s[0]=='^'){
- p->s++;
- ret= pow(ret, evalPow(p));
- }
- return ret;
-}
-
-static double evalTerm(Parser *p){
- double ret= evalFactor(p);
- while(p->s[0]=='*' || p->s[0]=='/'){
- if(*p->s++ == '*') ret*= evalFactor(p);
- else ret/= evalFactor(p);
- }
- return ret;
-}
-
-static double evalExpression(Parser *p){
- double ret= 0;
-
- if(p->stack_index <= 0) //protect against stack overflows
- return NAN;
- p->stack_index--;
-
- do{
- ret += evalTerm(p);
- }while(*p->s == '+' || *p->s == '-');
-
- p->stack_index++;
-
- return ret;
-}
-
-double ff_eval(char *s, double *const_value, const char **const_name,
- double (**func1)(void *, double), const char **func1_name,
- double (**func2)(void *, double, double), char **func2_name,
- void *opaque){
- Parser p;
-
- p.stack_index=100;
- p.s= s;
- p.const_value= const_value;
- p.const_name = const_name;
- p.func1 = func1;
- p.func1_name = func1_name;
- p.func2 = func2;
- p.func2_name = func2_name;
- p.opaque = opaque;
-
- return evalExpression(&p);
-}
-
-#ifdef TEST
-#undef printf
-static double const_values[]={
- M_PI,
- M_E,
- 0
-};
-static const char *const_names[]={
- "PI",
- "E",
- 0
-};
-main(){
- int i;
- printf("%f == 12.7\n", ff_eval("1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)", const_values, const_names, NULL, NULL, NULL, NULL, NULL));
-
- for(i=0; i<1050; i++){
- START_TIMER
- ff_eval("1+(5-2)^(3-1)+1/2+sin(PI)-max(-2.2,-3.1)", const_values, const_names, NULL, NULL, NULL, NULL, NULL);
- STOP_TIMER("ff_eval")
- }
-}
-#endif
diff --git a/src/libffmpeg/libavcodec/fastmemcpy.h b/src/libffmpeg/libavcodec/fastmemcpy.h
deleted file mode 100644
index 3459bf1ce..000000000
--- a/src/libffmpeg/libavcodec/fastmemcpy.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#if 0
-extern void *xine_fast_memcpy(void *to, const void *from, size_t len);
-#define memcpy(a,b,c) xine_fast_memcpy(a,b,c)
-#endif
diff --git a/src/libffmpeg/libavcodec/i386/.cvsignore b/src/libffmpeg/libavcodec/i386/.cvsignore
deleted file mode 100644
index 7d926a554..000000000
--- a/src/libffmpeg/libavcodec/i386/.cvsignore
+++ /dev/null
@@ -1,6 +0,0 @@
-Makefile
-Makefile.in
-.libs
-.deps
-*.lo
-*.la
diff --git a/src/libffmpeg/libavcodec/i386/Makefile.am b/src/libffmpeg/libavcodec/i386/Makefile.am
deleted file mode 100644
index 0d649ae24..000000000
--- a/src/libffmpeg/libavcodec/i386/Makefile.am
+++ /dev/null
@@ -1,49 +0,0 @@
-include $(top_srcdir)/misc/Makefile.common
-
-# -fomit-frame-pointer is always needed. it might cause debug to not
-# work, but at least it compiles.
-AM_CFLAGS = -fomit-frame-pointer -fno-strict-aliasing
-# CFLAGS is here to filter out -funroll-loops because it causes bad
-# behavior of libavcodec
-CFLAGS := `echo @CFLAGS@ | sed -e 's/-funroll-loops//g'`
-AM_CPPFLAGS = $(LIBFFMPEG_CPPFLAGS) -I$(top_srcdir)/src/libffmpeg/libavutil
-
-# Avoid "can't find register" failures with -O1 and higher
-dsputil_mmx.o dsputil_mmx.lo: CFLAGS=`echo @CFLAGS@ | sed -e 's/-funroll-loops//g; s/-O[0-9]/-Os/g'`
-
-# Avoid errors on (at least) amd64 with -O0
-fdct_mmx.o fdct_mmx.lo: CFLAGS=`echo @CFLAGS@ | sed -e 's/^/-Os /; s/-O0\?\s/-Os /g'`
-
-ASFLAGS =
-
-noinst_LTLIBRARIES = libavcodec_mmx.la
-
-libavcodec_mmx_src = \
- cputest.c \
- dsputil_mmx.c \
- fdct_mmx.c \
- fft_sse.c \
- idct_mmx.c \
- idct_mmx_xvid.c \
- motion_est_mmx.c \
- mpegvideo_mmx.c \
- simple_idct_mmx.c \
- vp3dsp_mmx.c \
- vp3dsp_sse2.c
-
-libavcodec_mmx_dummy = libavcodec_mmx_dummy.c
-
-EXTRA_DIST = \
- $(libavcodec_mmx_dummy) \
- $(libavcodec_mmx_src) \
- dsputil_h264_template_mmx.c \
- h264dsp_mmx.c \
- mpegvideo_mmx_template.c
-
-if HAVE_FFMMX
-mmx_modules = $(libavcodec_mmx_src)
-endif
-
-libavcodec_mmx_la_SOURCES = $(mmx_modules) $(libavcodec_mmx_dummy)
-
-noinst_HEADERS = dsputil_mmx_avg.h dsputil_mmx_rnd.h mmx.h
diff --git a/src/libffmpeg/libavcodec/i386/fft_sse.c b/src/libffmpeg/libavcodec/i386/fft_sse.c
deleted file mode 100644
index 631848265..000000000
--- a/src/libffmpeg/libavcodec/i386/fft_sse.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * FFT/MDCT transform with SSE optimizations
- * Copyright (c) 2002 Fabrice Bellard.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "../dsputil.h"
-#include <math.h>
-
-#ifdef HAVE_BUILTIN_VECTOR
-
-#include <xmmintrin.h>
-
-static const int p1p1p1m1[4] __attribute__((aligned(16))) =
- { 0, 0, 0, 1 << 31 };
-
-static const int p1p1m1p1[4] __attribute__((aligned(16))) =
- { 0, 0, 1 << 31, 0 };
-
-static const int p1p1m1m1[4] __attribute__((aligned(16))) =
- { 0, 0, 1 << 31, 1 << 31 };
-
-#if 0
-static void print_v4sf(const char *str, __m128 a)
-{
- float *p = (float *)&a;
- printf("%s: %f %f %f %f\n",
- str, p[0], p[1], p[2], p[3]);
-}
-#endif
-
-/* XXX: handle reverse case */
-void ff_fft_calc_sse(FFTContext *s, FFTComplex *z)
-{
- int ln = s->nbits;
- int j, np, np2;
- int nblocks, nloops;
- register FFTComplex *p, *q;
- FFTComplex *cptr, *cptr1;
- int k;
-
- np = 1 << ln;
-
- {
- __m128 *r, a, b, a1, c1, c2;
-
- r = (__m128 *)&z[0];
- c1 = *(__m128 *)p1p1m1m1;
- if (s->inverse)
- c2 = *(__m128 *)p1p1m1p1;
- else
- c2 = *(__m128 *)p1p1p1m1;
-
- j = (np >> 2);
- do {
- a = r[0];
- b = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2));
- a = _mm_xor_ps(a, c1);
- /* do the pass 0 butterfly */
- a = _mm_add_ps(a, b);
-
- a1 = r[1];
- b = _mm_shuffle_ps(a1, a1, _MM_SHUFFLE(1, 0, 3, 2));
- a1 = _mm_xor_ps(a1, c1);
- /* do the pass 0 butterfly */
- b = _mm_add_ps(a1, b);
-
- /* multiply third by -i */
- /* by toggling the sign bit */
- b = _mm_shuffle_ps(b, b, _MM_SHUFFLE(2, 3, 1, 0));
- b = _mm_xor_ps(b, c2);
-
- /* do the pass 1 butterfly */
- r[0] = _mm_add_ps(a, b);
- r[1] = _mm_sub_ps(a, b);
- r += 2;
- } while (--j != 0);
- }
- /* pass 2 .. ln-1 */
-
- nblocks = np >> 3;
- nloops = 1 << 2;
- np2 = np >> 1;
-
- cptr1 = s->exptab1;
- do {
- p = z;
- q = z + nloops;
- j = nblocks;
- do {
- cptr = cptr1;
- k = nloops >> 1;
- do {
- __m128 a, b, c, t1, t2;
-
- a = *(__m128 *)p;
- b = *(__m128 *)q;
-
- /* complex mul */
- c = *(__m128 *)cptr;
- /* cre*re cim*re */
- t1 = _mm_mul_ps(c,
- _mm_shuffle_ps(b, b, _MM_SHUFFLE(2, 2, 0, 0)));
- c = *(__m128 *)(cptr + 2);
- /* -cim*im cre*im */
- t2 = _mm_mul_ps(c,
- _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 3, 1, 1)));
- b = _mm_add_ps(t1, t2);
-
- /* butterfly */
- *(__m128 *)p = _mm_add_ps(a, b);
- *(__m128 *)q = _mm_sub_ps(a, b);
-
- p += 2;
- q += 2;
- cptr += 4;
- } while (--k);
-
- p += nloops;
- q += nloops;
- } while (--j);
- cptr1 += nloops * 2;
- nblocks = nblocks >> 1;
- nloops = nloops << 1;
- } while (nblocks != 0);
-}
-
-#endif
diff --git a/src/libffmpeg/libavcodec/i386/libavcodec_mmx_dummy.c b/src/libffmpeg/libavcodec/i386/libavcodec_mmx_dummy.c
deleted file mode 100644
index 04a6fc298..000000000
--- a/src/libffmpeg/libavcodec/i386/libavcodec_mmx_dummy.c
+++ /dev/null
@@ -1,2 +0,0 @@
-
-char libavcodec_mmx_dummy;
diff --git a/src/libffmpeg/libavcodec/libpostproc/.cvsignore b/src/libffmpeg/libavcodec/libpostproc/.cvsignore
deleted file mode 100644
index 7d926a554..000000000
--- a/src/libffmpeg/libavcodec/libpostproc/.cvsignore
+++ /dev/null
@@ -1,6 +0,0 @@
-Makefile
-Makefile.in
-.libs
-.deps
-*.lo
-*.la
diff --git a/src/libffmpeg/libavcodec/libpostproc/Makefile.am b/src/libffmpeg/libavcodec/libpostproc/Makefile.am
deleted file mode 100644
index af1976b48..000000000
--- a/src/libffmpeg/libavcodec/libpostproc/Makefile.am
+++ /dev/null
@@ -1,15 +0,0 @@
-include $(top_srcdir)/misc/Makefile.common
-
-# -fomit-frame-pointer is always needed. it might cause debug to not
-# work, but at least it compiles.
-AM_CFLAGS = -fomit-frame-pointer -fno-strict-aliasing
-AM_CPPFLAGS = $(LIBFFMPEG_CPPFLAGS) -I$(top_srcdir)/src/libffmpeg/libavcodec
-ASFLAGS =
-
-noinst_LTLIBRARIES = libpostprocess.la
-
-EXTRA_DIST = postprocess_template.c postprocess_altivec_template.c
-
-libpostprocess_la_SOURCES = postprocess.c
-
-noinst_HEADERS = mangle.h postprocess.h postprocess_internal.h
diff --git a/src/libffmpeg/libavcodec/libpostproc/mangle.h b/src/libffmpeg/libavcodec/libpostproc/mangle.h
deleted file mode 100644
index aa09cd6bf..000000000
--- a/src/libffmpeg/libavcodec/libpostproc/mangle.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* mangle.h - This file has some CPP macros to deal with different symbol
- * mangling across binary formats.
- * (c)2002 by Felix Buenemann <atmosfear at users.sourceforge.net>
- * File licensed under the GPL, see http://www.fsf.org/ for more info.
- */
-
-#ifndef __MANGLE_H
-#define __MANGLE_H
-
-/* Feel free to add more to the list, eg. a.out IMO */
-/* Use rip-relative addressing if compiling PIC code on x86-64. */
-#if defined(__CYGWIN__) || defined(__MINGW32__) || defined(__OS2__) || \
- (defined(__OpenBSD__) && !defined(__ELF__))
-#if defined(ARCH_X86_64) && defined(PIC)
-#define MANGLE(a) "_" #a"(%%rip)"
-#else
-#define MANGLE(a) "_" #a
-#endif
-#else
-#if defined(ARCH_X86_64) && defined(PIC)
-#define MANGLE(a) #a"(%%rip)"
-#else
-#define MANGLE(a) #a
-#endif
-#endif
-
-#endif /* !__MANGLE_H */
-
diff --git a/src/libffmpeg/libavcodec/lzo.h b/src/libffmpeg/libavcodec/lzo.h
deleted file mode 100644
index dbce13770..000000000
--- a/src/libffmpeg/libavcodec/lzo.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LZO_H
-#define LZO_H
-
-#define LZO_INPUT_DEPLETED 1
-#define LZO_OUTPUT_FULL 2
-#define LZO_INVALID_BACKPTR 4
-#define LZO_ERROR 8
-
-#define LZO_INPUT_PADDING 4
-#define LZO_OUTPUT_PADDING 12
-
-int lzo1x_decode(void *out, int *outlen, void *in, int *inlen);
-
-#endif
diff --git a/src/libffmpeg/libavcodec/mlib/.cvsignore b/src/libffmpeg/libavcodec/mlib/.cvsignore
deleted file mode 100644
index 7d926a554..000000000
--- a/src/libffmpeg/libavcodec/mlib/.cvsignore
+++ /dev/null
@@ -1,6 +0,0 @@
-Makefile
-Makefile.in
-.libs
-.deps
-*.lo
-*.la
diff --git a/src/libffmpeg/libavcodec/mlib/Makefile.am b/src/libffmpeg/libavcodec/mlib/Makefile.am
deleted file mode 100644
index ed8b3c8d0..000000000
--- a/src/libffmpeg/libavcodec/mlib/Makefile.am
+++ /dev/null
@@ -1,18 +0,0 @@
-include $(top_srcdir)/misc/Makefile.common
-
-AM_CPPFLAGS = $(LIBFFMPEG_CPPFLAGS) -I$(top_srcdir)/src/libffmpeg/libavutil
-AM_CFLAGS = -fno-strict-aliasing
-ASFLAGS =
-
-noinst_LTLIBRARIES = libavcodec_mlib.la
-
-libavcodec_mlib_src = dsputil_mlib.c
-libavcodec_mlib_dummy = libavcodec_mlib_dummy.c
-
-EXTRA_DIST = $(libavcodec_mlib_src) $(libavcodec_mlib_dummy)
-
-if HAVE_MLIB
-mlib_modules = $(libavcodec_mlib_src)
-endif
-
-libavcodec_mlib_la_SOURCES = $(mlib_modules) $(libavcodec_mlib_dummy)
diff --git a/src/libffmpeg/libavcodec/mlib/libavcodec_mlib_dummy.c b/src/libffmpeg/libavcodec/mlib/libavcodec_mlib_dummy.c
deleted file mode 100644
index a09ee4e28..000000000
--- a/src/libffmpeg/libavcodec/mlib/libavcodec_mlib_dummy.c
+++ /dev/null
@@ -1,2 +0,0 @@
-
-char libavcodec_mlib_dummy;
diff --git a/src/libffmpeg/libavcodec/ppc/.cvsignore b/src/libffmpeg/libavcodec/ppc/.cvsignore
deleted file mode 100644
index 7d926a554..000000000
--- a/src/libffmpeg/libavcodec/ppc/.cvsignore
+++ /dev/null
@@ -1,6 +0,0 @@
-Makefile
-Makefile.in
-.libs
-.deps
-*.lo
-*.la
diff --git a/src/libffmpeg/libavcodec/ppc/Makefile.am b/src/libffmpeg/libavcodec/ppc/Makefile.am
deleted file mode 100644
index 00e796f6d..000000000
--- a/src/libffmpeg/libavcodec/ppc/Makefile.am
+++ /dev/null
@@ -1,34 +0,0 @@
-include $(top_srcdir)/misc/Makefile.common
-
-AM_CPPFLAGS = $(LIBFFMPEG_CPPFLAGS)
-AM_CFLAGS = -fno-strict-aliasing
-# CFLAGS is here to filter out -funroll-loops because it causes bad
-# behavior of libavcodec
-CFLAGS = `echo @CFLAGS@ | sed -e 's/-funroll-loops//g'`
-
-ASFLAGS =
-
-noinst_LTLIBRARIES = libavcodec_ppc.la
-
-libavcodec_ppc_src = dsputil_altivec.c \
- dsputil_ppc.c \
- dsputil_h264_altivec.c \
- dsputil_h264_template_altivec.c \
- fdct_altivec.c \
- fft_altivec.c \
- idct_altivec.c \
- gmc_altivec.c \
- mpegvideo_altivec.c \
- mpegvideo_ppc.c
-libavcodec_ppc_dummy = libavcodec_ppc_dummy.c
-
-EXTRA_DIST = $(libavcodec_ppc_src) $(libavcodec_ppc_dummy)
-
-#if PPC_ARCH
-#ppc_modules = $(libavcodec_ppc_src)
-#endif
-
-
-libavcodec_ppc_la_SOURCES = $(ppc_modules) $(libavcodec_ppc_dummy)
-
-noinst_HEADERS = dsputil_altivec.h dsputil_ppc.h gcc_fixes.h
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h
deleted file mode 100644
index ac54817d0..000000000
--- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2002 Brian Foley
- * Copyright (c) 2002 Dieter Shirley
- * Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef _DSPUTIL_ALTIVEC_
-#define _DSPUTIL_ALTIVEC_
-
-#include "dsputil_ppc.h"
-
-#ifdef HAVE_ALTIVEC
-
-extern int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
-extern int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
-extern int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
-extern int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
-extern int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
-extern int pix_norm1_altivec(uint8_t *pix, int line_size);
-extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
-extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
-extern int pix_sum_altivec(uint8_t * pix, int line_size);
-extern void diff_pixels_altivec(DCTELEM* block, const uint8_t* s1, const uint8_t* s2, int stride);
-extern void get_pixels_altivec(DCTELEM* block, const uint8_t * pixels, int line_size);
-
-extern void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w);
-extern void put_pixels_clamped_altivec(const DCTELEM *block, uint8_t *restrict pixels, int line_size);
-extern void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
-extern void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
-extern void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h);
-extern void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
-extern void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
-extern void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h);
-extern void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h);
-extern int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h);
-extern int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h);
-extern void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
-
-extern void gmc1_altivec(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder);
-
-extern int has_altivec(void);
-
-// used to build registers permutation vectors (vcprm)
-// the 's' are for words in the _s_econd vector
-#define WORD_0 0x00,0x01,0x02,0x03
-#define WORD_1 0x04,0x05,0x06,0x07
-#define WORD_2 0x08,0x09,0x0a,0x0b
-#define WORD_3 0x0c,0x0d,0x0e,0x0f
-#define WORD_s0 0x10,0x11,0x12,0x13
-#define WORD_s1 0x14,0x15,0x16,0x17
-#define WORD_s2 0x18,0x19,0x1a,0x1b
-#define WORD_s3 0x1c,0x1d,0x1e,0x1f
-
-#ifdef CONFIG_DARWIN
-#define vcprm(a,b,c,d) (const vector unsigned char)(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
-#else
-#define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d}
-#endif
-
-// vcprmle is used to keep the same index as in the SSE version.
-// it's the same as vcprm, with the index inversed
-// ('le' is Little Endian)
-#define vcprmle(a,b,c,d) vcprm(d,c,b,a)
-
-// used to build inverse/identity vectors (vcii)
-// n is _n_egative, p is _p_ositive
-#define FLOAT_n -1.
-#define FLOAT_p 1.
-
-
-#ifdef CONFIG_DARWIN
-#define vcii(a,b,c,d) (const vector float)(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
-#else
-#define vcii(a,b,c,d) (const vector float){FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d}
-#endif
-
-#else /* HAVE_ALTIVEC */
-#ifdef ALTIVEC_USE_REFERENCE_C_CODE
-#error "I can't use ALTIVEC_USE_REFERENCE_C_CODE if I don't use HAVE_ALTIVEC"
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
-#endif /* HAVE_ALTIVEC */
-
-#endif /* _DSPUTIL_ALTIVEC_ */
diff --git a/src/libffmpeg/libavcodec/ppc/libavcodec_ppc_dummy.c b/src/libffmpeg/libavcodec/ppc/libavcodec_ppc_dummy.c
deleted file mode 100644
index 506a55beb..000000000
--- a/src/libffmpeg/libavcodec/ppc/libavcodec_ppc_dummy.c
+++ /dev/null
@@ -1,2 +0,0 @@
-
-char libavcodec_ppc_dummy;
diff --git a/src/libffmpeg/libavcodec/rtjpeg.h b/src/libffmpeg/libavcodec/rtjpeg.h
deleted file mode 100644
index 1fc2fc934..000000000
--- a/src/libffmpeg/libavcodec/rtjpeg.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef RTJPEG_H
-#define RTJPEG_H
-
-typedef struct {
- int w, h;
- DSPContext *dsp;
- DCTELEM block[64];
- uint8_t scan[64];
- uint32_t lquant[64];
- uint32_t cquant[64];
-} RTJpegContext;
-
-void rtjpeg_decode_init(RTJpegContext *c, DSPContext *dsp,
- int width, int height,
- uint32_t *lquant, uint32_t *cquant);
-
-int rtjpeg_decode_frame_yuv420(RTJpegContext *c, AVFrame *f,
- uint8_t *buf, int buf_size);
-#endif
diff --git a/src/libffmpeg/libavcodec/sparc/.cvsignore b/src/libffmpeg/libavcodec/sparc/.cvsignore
deleted file mode 100644
index 7d926a554..000000000
--- a/src/libffmpeg/libavcodec/sparc/.cvsignore
+++ /dev/null
@@ -1,6 +0,0 @@
-Makefile
-Makefile.in
-.libs
-.deps
-*.lo
-*.la
diff --git a/src/libffmpeg/libavcodec/sparc/Makefile.am b/src/libffmpeg/libavcodec/sparc/Makefile.am
deleted file mode 100644
index 4f464e682..000000000
--- a/src/libffmpeg/libavcodec/sparc/Makefile.am
+++ /dev/null
@@ -1,18 +0,0 @@
-include $(top_srcdir)/misc/Makefile.common
-
-AM_CPPFLAGS = $(LIBFFMPEG_CPPFLAGS) -I$(top_srcdir)/src/libffmpeg/libavutil
-AM_CFLAGS = -fno-strict-aliasing
-ASFLAGS =
-
-noinst_LTLIBRARIES = libavcodec_sparc.la
-
-libavcodec_sparc_src = dsputil_vis.c
-libavcodec_sparc_dummy = libavcodec_sparc_dummy.c
-
-EXTRA_DIST = $(libavcodec_sparc_src) $(libavcodec_sparc_dummy) vis.h
-
-if ENABLE_VIS
-sparc_modules = $(libavcodec_sparc_src)
-endif
-
-libavcodec_sparc_la_SOURCES = $(sparc_modules) $(libavcodec_sparc_dummy)
diff --git a/src/libffmpeg/libavcodec/sparc/libavcodec_sparc_dummy.c b/src/libffmpeg/libavcodec/sparc/libavcodec_sparc_dummy.c
deleted file mode 100644
index 5efeac422..000000000
--- a/src/libffmpeg/libavcodec/sparc/libavcodec_sparc_dummy.c
+++ /dev/null
@@ -1,2 +0,0 @@
-
-char libavcodec_sparc_dummy;
diff --git a/src/libffmpeg/libavcodec/swscale.h b/src/libffmpeg/libavcodec/swscale.h
deleted file mode 100644
index 5d13f90da..000000000
--- a/src/libffmpeg/libavcodec/swscale.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef SWSCALE_EMU_H
-#define SWSCALE_EMU_H
-/* Dummy, only useful for compilation! */
-#define SWS_FAST_BILINEAR 1
-#define SWS_BILINEAR 2
-#define SWS_BICUBIC 4
-#define SWS_X 8
-#define SWS_POINT 0x10
-#define SWS_AREA 0x20
-#define SWS_BICUBLIN 0x40
-#define SWS_GAUSS 0x80
-#define SWS_SINC 0x100
-#define SWS_LANCZOS 0x200
-#define SWS_SPLINE 0x400
-
-#define SwsFilter void
-struct SwsContext {
- struct ImgReSampleContext *resampling_ctx;
- enum PixelFormat src_pix_fmt, dst_pix_fmt;
-};
-
-struct SwsContext *sws_getContext(int srcW, int srcH, int srcFormat,
- int dstW, int dstH, int dstFormat,
- int flags, SwsFilter *srcFilter,
- SwsFilter *dstFilter, double *param);
-
-int sws_scale(struct SwsContext *ctx, uint8_t* src[], int srcStride[],
- int srcSliceY, int srcSliceH, uint8_t* dst[], int dstStride[]);
-
-void sws_freeContext(struct SwsContext *swsContext);
-
-#endif /* SWSCALE_EMU_H */
diff --git a/src/libffmpeg/libavutil/.cvsignore b/src/libffmpeg/libavutil/.cvsignore
deleted file mode 100644
index 7d926a554..000000000
--- a/src/libffmpeg/libavutil/.cvsignore
+++ /dev/null
@@ -1,6 +0,0 @@
-Makefile
-Makefile.in
-.libs
-.deps
-*.lo
-*.la
diff --git a/src/libffmpeg/libavutil/Makefile.am b/src/libffmpeg/libavutil/Makefile.am
deleted file mode 100644
index 13f645957..000000000
--- a/src/libffmpeg/libavutil/Makefile.am
+++ /dev/null
@@ -1,35 +0,0 @@
-include $(top_srcdir)/misc/Makefile.common
-
-AM_CPPFLAGS = $(LIBFFMPEG_CPPFLAGS)
-AM_CFLAGS = -fno-strict-aliasing
-ASFLAGS =
-
-noinst_LTLIBRARIES = libavutil.la
-
-libavutil_la_SOURCES = \
- adler32.c \
- crc.c \
- integer.c \
- lls.c \
- log.c \
- mathematics.c \
- md5.c \
- rational.c
-
-libavutil_la_LDFLAGS = -avoid-version -module
-
-noinst_HEADERS = \
- adler32.h \
- avutil.h \
- bswap.h \
- common.h \
- crc.h \
- integer.h \
- internal.h \
- intfloat_readwrite.h \
- lls.h \
- log.h \
- mathematics.h \
- md5.h \
- rational.h \
- x86_cpu.h
diff --git a/src/libffmpeg/libavutil/adler32.h b/src/libffmpeg/libavutil/adler32.h
deleted file mode 100644
index 4b035dcdf..000000000
--- a/src/libffmpeg/libavutil/adler32.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef ADLER32_H
-#define ADLER32_H
-
-unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,
- unsigned int len);
-
-#endif
diff --git a/src/libffmpeg/libavutil/avutil.h b/src/libffmpeg/libavutil/avutil.h
deleted file mode 100644
index 6f66fbb07..000000000
--- a/src/libffmpeg/libavutil/avutil.h
+++ /dev/null
@@ -1,80 +0,0 @@
-#ifndef AVUTIL_H
-#define AVUTIL_H
-
-/**
- * @file avutil.h
- * external api header.
- */
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define AV_STRINGIFY(s) AV_TOSTRING(s)
-#define AV_TOSTRING(s) #s
-
-#define LIBAVUTIL_VERSION_INT ((49<<16)+(0<<8)+0)
-#define LIBAVUTIL_VERSION 49.0.0
-#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
-
-#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
-
-
-#include "common.h"
-#include "mathematics.h"
-#include "rational.h"
-#include "integer.h"
-#include "intfloat_readwrite.h"
-#include "log.h"
-
-/**
- * Pixel format. Notes:
- *
- * PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA
- * color is put together as:
- * (A << 24) | (R << 16) | (G << 8) | B
- * This is stored as BGRA on little endian CPU architectures and ARGB on
- * big endian CPUs.
- *
- * When the pixel format is palettized RGB (PIX_FMT_PAL8), the palettized
- * image data is stored in AVFrame.data[0]. The palette is transported in
- * AVFrame.data[1] and, is 1024 bytes long (256 4-byte entries) and is
- * formatted the same as in PIX_FMT_RGBA32 described above (i.e., it is
- * also endian-specific). Note also that the individual RGB palette
- * components stored in AVFrame.data[1] should be in the range 0..255.
- * This is important as many custom PAL8 video codecs that were designed
- * to run on the IBM VGA graphics adapter use 6-bit palette components.
- */
-enum PixelFormat {
- PIX_FMT_NONE= -1,
- PIX_FMT_YUV420P, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples)
- PIX_FMT_YUV422, ///< Packed pixel, Y0 Cb Y1 Cr
- PIX_FMT_RGB24, ///< Packed pixel, 3 bytes per pixel, RGBRGB...
- PIX_FMT_BGR24, ///< Packed pixel, 3 bytes per pixel, BGRBGR...
- PIX_FMT_YUV422P, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
- PIX_FMT_YUV444P, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples)
- PIX_FMT_RGBA32, ///< Packed pixel, 4 bytes per pixel, BGRABGRA..., stored in cpu endianness
- PIX_FMT_YUV410P, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples)
- PIX_FMT_YUV411P, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples)
- PIX_FMT_RGB565, ///< always stored in cpu endianness
- PIX_FMT_RGB555, ///< always stored in cpu endianness, most significant bit to 1
- PIX_FMT_GRAY8,
- PIX_FMT_MONOWHITE, ///< 0 is white
- PIX_FMT_MONOBLACK, ///< 0 is black
- PIX_FMT_PAL8, ///< 8 bit with RGBA palette
- PIX_FMT_YUVJ420P, ///< Planar YUV 4:2:0 full scale (jpeg)
- PIX_FMT_YUVJ422P, ///< Planar YUV 4:2:2 full scale (jpeg)
- PIX_FMT_YUVJ444P, ///< Planar YUV 4:4:4 full scale (jpeg)
- PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing(xvmc_render.h)
- PIX_FMT_XVMC_MPEG2_IDCT,
- PIX_FMT_UYVY422, ///< Packed pixel, Cb Y0 Cr Y1
- PIX_FMT_UYVY411, ///< Packed pixel, Cb Y0 Y1 Cr Y2 Y3
- PIX_FMT_NB,
-};
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* AVUTIL_H */
diff --git a/src/libffmpeg/libavutil/crc.h b/src/libffmpeg/libavutil/crc.h
deleted file mode 100644
index c5b217017..000000000
--- a/src/libffmpeg/libavutil/crc.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef CRC_H
-#define CRC_H
-
-typedef uint32_t AVCRC;
-
-extern AVCRC *av_crcEDB88320;
-extern AVCRC *av_crc04C11DB7;
-extern AVCRC *av_crc8005 ;
-extern AVCRC *av_crc07 ;
-
-int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);
-uint32_t av_crc(const AVCRC *ctx, uint32_t start_crc, const uint8_t *buffer, size_t length);
-
-#endif /* CRC_H */
-
diff --git a/src/libffmpeg/libavutil/intfloat_readwrite.h b/src/libffmpeg/libavutil/intfloat_readwrite.h
deleted file mode 100644
index 33e4c636c..000000000
--- a/src/libffmpeg/libavutil/intfloat_readwrite.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef INTFLOAT_READWRITE_H
-#define INTFLOAT_READWRITE_H
-
-#include "common.h"
-
-/* IEEE 80 bits extended float */
-typedef struct AVExtFloat {
- uint8_t exponent[2];
- uint8_t mantissa[8];
-} AVExtFloat;
-
-double av_int2dbl(int64_t v);
-float av_int2flt(int32_t v);
-double av_ext2dbl(const AVExtFloat ext);
-int64_t av_dbl2int(double d);
-int32_t av_flt2int(float d);
-AVExtFloat av_dbl2ext(double d);
-
-#endif /* INTFLOAT_READWRITE_H */
diff --git a/src/libffmpeg/libavutil/md5.h b/src/libffmpeg/libavutil/md5.h
deleted file mode 100644
index c8144b4cc..000000000
--- a/src/libffmpeg/libavutil/md5.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef MD5_H
-#define MD5_H
-
-extern const int av_md5_size;
-
-struct AVMD5;
-
-void av_md5_init(struct AVMD5 *ctx);
-void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, const int len);
-void av_md5_final(struct AVMD5 *ctx, uint8_t *dst);
-void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);
-
-#endif /* MD5_H */
-
diff --git a/src/libffmpeg/libavutil/x86_cpu.h b/src/libffmpeg/libavutil/x86_cpu.h
deleted file mode 100644
index 8fd5f8600..000000000
--- a/src/libffmpeg/libavutil/x86_cpu.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef AVUTIL_X86CPU_H
-#define AVUTIL_X86CPU_H
-
-#ifdef ARCH_X86_64
-# define REG_a "rax"
-# define REG_b "rbx"
-# define REG_c "rcx"
-# define REG_d "rdx"
-# define REG_D "rdi"
-# define REG_S "rsi"
-# define PTR_SIZE "8"
-
-# define REG_SP "rsp"
-# define REG_BP "rbp"
-# define REGBP rbp
-# define REGa rax
-# define REGb rbx
-# define REGSP rsp
-
-#else
-
-# define REG_a "eax"
-# define REG_b "ebx"
-# define REG_c "ecx"
-# define REG_d "edx"
-# define REG_D "edi"
-# define REG_S "esi"
-# define PTR_SIZE "4"
-
-# define REG_SP "esp"
-# define REG_BP "ebp"
-# define REGBP ebp
-# define REGa eax
-# define REGb ebx
-# define REGSP esp
-#endif
-
-#endif /* AVUTIL_X86CPU_H */
diff --git a/src/libffmpeg/video_decoder.c b/src/libffmpeg/video_decoder.c
index 8f6f70ab9..2d5733211 100644
--- a/src/libffmpeg/video_decoder.c
+++ b/src/libffmpeg/video_decoder.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: video_decoder.c,v 1.63 2006/09/18 18:56:56 tmattern Exp $
+ * $Id: video_decoder.c,v 1.63.2.1 2006/12/02 01:20:07 dgp85 Exp $
*
* xine video decoder plugin using ffmpeg
*
@@ -47,11 +47,7 @@
#include "xine_decoder.h"
#include "mpeg_parser.h"
-#ifdef HAVE_FFMPEG
-# include <postprocess.h>
-#else
-# include "libavcodec/libpostproc/postprocess.h"
-#endif
+#include <postprocess.h>
#define VIDEOBUFSIZE (128*1024)
#define SLICE_BUFFER_SIZE (1194*1024)
diff --git a/src/libffmpeg/xine_decoder.h b/src/libffmpeg/xine_decoder.h
index 879ee3175..f005f09e5 100644
--- a/src/libffmpeg/xine_decoder.h
+++ b/src/libffmpeg/xine_decoder.h
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: xine_decoder.h,v 1.7 2006/08/02 07:15:27 tmmm Exp $
+ * $Id: xine_decoder.h,v 1.7.2.1 2006/12/02 01:20:07 dgp85 Exp $
*
*/
@@ -28,11 +28,7 @@
#include "config.h"
#endif
-#ifdef HAVE_FFMPEG
-# include <avcodec.h>
-#else
-# include "libavcodec/avcodec.h"
-#endif
+#include <avcodec.h>
typedef struct ff_codec_s {
uint32_t type;
diff --git a/src/post/deinterlace/plugins/Makefile.am b/src/post/deinterlace/plugins/Makefile.am
index e6e785211..d2b1cefaf 100644
--- a/src/post/deinterlace/plugins/Makefile.am
+++ b/src/post/deinterlace/plugins/Makefile.am
@@ -30,7 +30,7 @@ EXTRA_DIST = greedy2frame_template.c greedyh.asm \
# libpostproc is here so we can use their nice mangle.h
AM_CFLAGS = -I$(top_srcdir)/src/post/deinterlace \
- -I$(top_srcdir)/src/libffmpeg/libavcodec/libpostproc
+ -I$(top_srcdir)/contrib/ffmpeg/libpostproc
libdir = $(XINE_PLUGINDIR)/post
diff --git a/src/post/planar/Makefile.am b/src/post/planar/Makefile.am
index 3440ff6ec..9f8ffed45 100644
--- a/src/post/planar/Makefile.am
+++ b/src/post/planar/Makefile.am
@@ -6,9 +6,11 @@ if HAVE_FFMPEG
postproc_lib = $(FFMPEG_POSTPROC_LIBS)
ff_cflags = $(FFMPEG_POSTPROC_CFLAGS)
else
-ff_cflags = -I$(top_srcdir)/src/libffmpeg/libavcodec/libpostproc
-postproc_lib = $(POSTPROC_INT_LIB)
-postproc_dep = $(postproc_lib)
+ff_cflags = -I$(top_srcdir)/contrib/ffmpeg/libpostproc
+postproc_lib = $(top_builddir)/contrib/ffmpeg/libpostproc/libpostproc.a
+
+$(top_builddir)/contrib/ffmpeg/libpostproc/libpostproc.a:
+ $(MAKE) -C $(top_builddir)/contrib/ffmpeg/ -f makefile.xine libpostproc/libpostproc.a
endif
# -fomit-frame-pointer is always needed. it might cause debug to not
@@ -21,7 +23,6 @@ lib_LTLIBRARIES = xineplug_post_planar.la
xineplug_post_planar_la_SOURCES = planar.c invert.c expand.c fill.c boxblur.c \
denoise3d.c eq.c eq2.c unsharp.c pp.c noise.c
-xineplug_post_planar_la_DEPENDENCIES = $(postproc_dep)
xineplug_post_planar_la_LIBADD = $(XINE_LIB) $(postproc_lib) -lm $(THREAD_LIBS)
xineplug_post_planar_la_LDFLAGS = -avoid-version -module \
@IMPURE_TEXT_LDFLAGS@