diff options
Diffstat (limited to 'contrib/ffmpeg/doc')
-rw-r--r-- | contrib/ffmpeg/doc/TODO | 90 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/avutil.txt | 37 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/faq.texi | 472 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/ffmpeg-doc.texi | 853 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt | 172 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/ffplay-doc.texi | 135 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/ffserver-doc.texi | 224 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/ffserver.conf | 352 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/general.texi | 985 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/hooks.texi | 299 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/issue_tracker.txt | 222 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/optimization.txt | 233 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/snow.txt | 630 | ||||
-rw-r--r-- | contrib/ffmpeg/doc/soc.txt | 24 | ||||
-rwxr-xr-x | contrib/ffmpeg/doc/texi2pod.pl | 427 |
15 files changed, 0 insertions, 5155 deletions
diff --git a/contrib/ffmpeg/doc/TODO b/contrib/ffmpeg/doc/TODO deleted file mode 100644 index a8567a5ec..000000000 --- a/contrib/ffmpeg/doc/TODO +++ /dev/null @@ -1,90 +0,0 @@ -ffmpeg TODO list: ----------------- - -Fabrice's TODO list: (unordered) -------------------- -Short term: - -- use AVFMTCTX_DISCARD_PKT in ffplay so that DV has a chance to work -- add RTSP regression test (both client and server) -- make ffserver allocate AVFormatContext -- clean up (incompatible change, for 0.5.0): - * AVStream -> AVComponent - * AVFormatContext -> AVInputStream/AVOutputStream - * suppress rate_emu from AVCodecContext -- add new float/integer audio filterting and conversion : suppress - CODEC_ID_PCM_xxc and use CODEC_ID_RAWAUDIO. -- fix telecine and frame rate conversion - -Long term (ask me if you want to help): - -- commit new imgconvert API and new PIX_FMT_xxx alpha formats -- commit new LGPL'ed float and integer-only AC3 decoder -- add WMA integer-only decoder -- add new MPEG4-AAC audio decoder (both integer-only and float version) - -Michael's TODO list: (unordered) (if anyone wanna help with sth, just ask) -------------------- -- optimize H264 CABAC -- more optimizations -- simper rate control - -Francois' TODO list: (unordered, without any timeframe) -------------------- -- test MACE decoder against the openquicktime one as suggested by A'rpi -- BeOS audio input grabbing backend -- BeOS video input grabbing backend -- publish my BeOS libposix on BeBits so I can officially support ffserver :) -- check the whole code for thread-safety (global and init stuff) - -Philip'a TODO list: (alphabetically ordered) (please help) ------------------- -- Add a multi-ffm filetype so that feeds can be recorded into multiple files rather - than one big file. -- Authenticated users support -- where the authentication is in the URL -- Change ASF files so that the embedded timestamp in the frames is right rather - than being an offset from the start of the stream -- Make ffm files more resilient to changes in the codec structures so that you - can play old ffm files. - -Baptiste's TODO list: ------------------ -- mov edit list support (AVEditList) -- YUV 10 bit per component support "2vuy" -- mxf muxer -- mpeg2 non linear quantizer - -unassigned TODO: (unordered) ---------------- -- use AVFrame for audio codecs too -- rework aviobuf.c buffering strategy and fix url_fskip -- generate optimal huffman tables for mjpeg encoding -- fix ffserver regression tests -- support xvids motion estimation -- support x264s motion estimation -- support x264s rate control -- SNOW: non translational motion compensation -- SNOW: more optimal quantization -- SNOW: 4x4 block support -- SNOW: 1/8 pel motion compensation support -- SNOW: iterative motion estimation based on subsampled images -- SNOW: try B frames and MCTF and see how their PSNR/bitrate/complexity behaves -- SNOW: try to use the wavelet transformed MC-ed reference frame as context for the entropy coder -- SNOW: think about/analyize how to make snow use multiple cpus/threads -- SNOW: finish spec -- FLAC: lossy encoding (viterbi and naive scalar quantization) -- libavfilter -- JPEG2000 decoder & encoder -- MPEG4 GMC encoding support -- macroblock based pixel format (better cache locality, somewhat complex, one paper claimed it faster for high res) -- regression tests for codecs which do not have an encoder (I+P-frame bitstream in svn) -- add support for using mplayers video filters to ffmpeg -- H264 encoder -- per MB ratecontrol (so VCD and such do work better) -- replace/rewrite libavcodec/fdctref.c -- write a script which iteratively changes all functions between always_inline and noinline and benchmarks the result to find the best set of inlined functions -- convert all the non SIMD asm into small asm vs. C testcases and submit them to the gcc devels so they can improve gcc -- generic audio mixing API -- extract PES packetizer from PS muxer and use it for new TS muxer -- implement automatic AVBistreamFilter activation -- make cabac encoder use bytestream (see http://trac.videolan.org/x264/changeset/?format=diff&new=651) diff --git a/contrib/ffmpeg/doc/avutil.txt b/contrib/ffmpeg/doc/avutil.txt deleted file mode 100644 index 210bd0726..000000000 --- a/contrib/ffmpeg/doc/avutil.txt +++ /dev/null @@ -1,37 +0,0 @@ -AVUtil -====== -libavutil is a small lightweight library of generally useful functions. -It is not a library for code needed by both libavcodec and libavformat. - - -Overview: -========= -adler32.c adler32 checksum -aes.c AES encryption and decryption -fifo.c resizeable first in first out buffer -intfloat_readwrite.c portable reading and writing of floating point values -log.c "printf" with context and level -md5.c MD5 Message-Digest Algorithm -rational.c code to perform exact calculations with rational numbers -tree.c generic AVL tree -crc.c generic CRC checksumming code -integer.c 128bit integer math -lls.c -mathematics.c greatest common divisor, integer sqrt, integer log2, ... -mem.c memory allocation routines with guaranteed alignment -softfloat.c - -Headers: -bswap.h big/little/native-endian conversion code -x86_cpu.h a few useful macros for unifying x86-64 and x86-32 code -avutil.h -common.h -intreadwrite.h reading and writing of unaligned big/little/native-endian integers - - -Goals: -====== -* Modular (few interdependencies and the possibility of disabling individual parts during ./configure) -* Small (source and object) -* Efficient (low CPU and memory usage) -* Useful (avoid useless features almost no one needs) diff --git a/contrib/ffmpeg/doc/faq.texi b/contrib/ffmpeg/doc/faq.texi deleted file mode 100644 index 85a0915b9..000000000 --- a/contrib/ffmpeg/doc/faq.texi +++ /dev/null @@ -1,472 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg FAQ -@titlepage -@sp 7 -@center @titlefont{FFmpeg FAQ} -@sp 3 -@end titlepage - - -@chapter General Questions - -@section When will the next FFmpeg version be released? / Why are FFmpeg releases so few and far between? - -Like most open source projects FFmpeg suffers from a certain lack of -manpower. For this reason the developers have to prioritize the work -they do and putting out releases is not at the top of the list, fixing -bugs and reviewing patches takes precedence. Please don't complain or -request more timely and/or frequent releases unless you are willing to -help out creating them. - -@section I have a problem with an old version of FFmpeg; where should I report it? -Nowhere. Upgrade to the latest release or if there is no recent release upgrade -to Subversion HEAD. You could also try to report it. Maybe you will get lucky and -become the first person in history to get an answer different from "upgrade -to Subversion HEAD". - -@section Why doesn't FFmpeg support feature [xyz]? - -Because no one has taken on that task yet. FFmpeg development is -driven by the tasks that are important to the individual developers. -If there is a feature that is important to you, the best way to get -it implemented is to undertake the task yourself or sponsor a developer. - -@section FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it? - -No. Windows DLLs are not portable, bloated and often slow. -Moreover FFmpeg strives to support all codecs natively. -A DLL loader is not conducive to that goal. - -@section My bugreport/mail to ffmpeg-devel/user has not received any replies. - -Likely reasons -@itemize -@item We are busy and haven't had time yet to read your report or -investigate the issue. -@item You didn't follow bugreports.html. -@item You didn't use Subversion HEAD. -@item You reported a segmentation fault without gdb output. -@item You describe a problem but not how to reproduce it. -@item It's unclear if you use ffmpeg as command line tool or use -libav* from another application. -@item You speak about a video having problems on playback but -not what you use to play it. -@item We have no faint clue what you are talking about besides -that it is related to FFmpeg. -@end itemize - -@section Is there a forum for FFmpeg? I do not like mailing lists. - -Yes, (@url{http://dir.gmane.org/gmane.comp.video.ffmpeg.user}). - -@section I cannot read this file although this format seems to be supported by ffmpeg. - -Even if ffmpeg can read the container format, it may not support all its -codecs. Please consult the supported codec list in the ffmpeg -documentation. - -@section Which codecs are supported by Windows? - -Windows does not support standard formats like MPEG very well, unless you -install some additional codecs - -The following list of video codecs should work on most Windows systems: -@table @option -@item msmpeg4v2 -.avi/.asf -@item msmpeg4 -.asf only -@item wmv1 -.asf only -@item wmv2 -.asf only -@item mpeg4 -only if you have some MPEG-4 codec installed like ffdshow or XviD -@item mpeg1 -.mpg only -@end table -Note, ASF files often have .wmv or .wma extensions in Windows. It should also -be mentioned that Microsoft claims a patent on the ASF format, and may sue -or threaten users who create ASF files with non-Microsoft software. It is -strongly advised to avoid ASF where possible. - -The following list of audio codecs should work on most Windows systems: -@table @option -@item adpcm_ima_wav -@item adpcm_ms -@item pcm -@item mp3 -if some MP3 codec like LAME is installed -@end table - - -@chapter Usage - -@section ffmpeg does not work; What is wrong? - -Try a @code{make distclean} in the ffmpeg source directory before the build. If this does not help see -(@url{http://ffmpeg.org/bugreports.html}). - -@section How do I encode single pictures to movies? - -First, rename your pictures to follow a numerical sequence. -For example, img1.jpg, img2.jpg, img3.jpg,... -Then you may run: - -@example - ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg -@end example - -Notice that @samp{%d} is replaced by the image number. - -@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc... - -The same logic is used for any image format that ffmpeg reads. - -@section How do I encode movie to single pictures? - -Use: - -@example - ffmpeg -i movie.mpg movie%d.jpg -@end example - -The @file{movie.mpg} used as input will be converted to -@file{movie1.jpg}, @file{movie2.jpg}, etc... - -Instead of relying on file format self-recognition, you may also use -@table @option -@item -vcodec ppm -@item -vcodec png -@item -vcodec mjpeg -@end table -to force the encoding. - -Applying that to the previous example: -@example - ffmpeg -i movie.mpg -f image2 -vcodec mjpeg menu%d.jpg -@end example - -Beware that there is no "jpeg" codec. Use "mjpeg" instead. - -@section I get "Unsupported codec (id=86043) for input stream #0.1". What is the problem? - -This is the Qcelp codec, FFmpeg has no support for that codec currently. Try mencoder/mplayer it might work. - -@section Why do I see a slight quality degradation with multithreaded MPEG* encoding? - -For multithreaded MPEG* encoding, the encoded slices must be independent, -otherwise thread n would practically have to wait for n-1 to finish, so it's -quite logical that there is a small reduction of quality. This is not a bug. - -@section How can I read from the standard input or write to the standard output? - -Use @file{-} as filename. - -@section Why does FFmpeg not decode audio in VOB files? - -The audio is AC-3 (a.k.a. A/52). AC-3 decoding is an optional component in FFmpeg -as the component that handles AC-3 decoding is currently released under the GPL. -Enable AC-3 decoding with @code{./configure --enable-gpl}. Take care: By -enabling AC-3, you automatically change the license of libavcodec from -LGPL to GPL. - -@section Why does the chrominance data seem to be sampled at a different time from the luminance data on bt8x8 captures on Linux? - -This is a well-known bug in the bt8x8 driver. For 2.4.26 there is a patch at -(@url{http://svn.mplayerhq.hu/michael/trunk/patches/bttv-420-2.4.26.patch?view=co}). This may also -apply cleanly to other 2.4-series kernels. - -@section How do I avoid the ugly aliasing artifacts in bt8x8 captures on Linux? - -Pass 'combfilter=1 lumafilter=1' to the bttv driver. Note though that 'combfilter=1' -will cause somewhat too strong filtering. A fix is to apply (@url{http://svn.mplayerhq.hu/michael/trunk/patches/bttv-comb-2.4.26.patch?view=co}) -or (@url{http://svn.mplayerhq.hu/michael/trunk/patches/bttv-comb-2.6.6.patch?view=co}) -and pass 'combfilter=2'. - -@section -f jpeg doesn't work. - -Try '-f image2 test%d.jpg'. - -@section Why can I not change the framerate? - -Some codecs, like MPEG-1/2, only allow a small number of fixed framerates. -Choose a different codec with the -vcodec command line option. - -@section How do I encode XviD or DivX video with ffmpeg? - -Both XviD and DivX (version 4+) are implementations of the ISO MPEG-4 -standard (note that there are many other coding formats that use this -same standard). Thus, use '-vcodec mpeg4' to encode these formats. The -default fourcc stored in an MPEG-4-coded file will be 'FMP4'. If you want -a different fourcc, use the '-vtag' option. E.g., '-vtag xvid' will -force the fourcc 'xvid' to be stored as the video fourcc rather than the -default. - -@section How do I encode videos which play on the iPod? - -@table @option -@item needed stuff --acodec libfaac -vcodec mpeg4 width<=320 height<=240 -@item working stuff -4mv, title -@item non-working stuff -B-frames -@item example command line -ffmpeg -i input -acodec libfaac -ab 128kb -vcodec mpeg4 -b 1200kb -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -s 320x180 -title X output.mp4 -@end table - -@section How do I encode videos which play on the PSP? - -@table @option -@item needed stuff --acodec libfaac -vcodec mpeg4 width*height<=76800 width%16=0 height%16=0 -ar 24000 -r 30000/1001 or 15000/1001 -f psp -@item working stuff -4mv, title -@item non-working stuff -B-frames -@item example command line -ffmpeg -i input -acodec libfaac -ab 128kb -vcodec mpeg4 -b 1200kb -ar 24000 -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -s 368x192 -r 30000/1001 -title X -f psp output.mp4 -@item needed stuff for H.264 --acodec libfaac -vcodec h264 width*height<=76800 width%16=0? height%16=0? -ar 48000 -coder 1 -r 30000/1001 or 15000/1001 -f psp -@item working stuff for H.264 -title, loop filter -@item non-working stuff for H.264 -CAVLC -@item example command line -ffmpeg -i input -acodec libfaac -ab 128kb -vcodec h264 -b 1200kb -ar 48000 -mbd 2 -coder 1 -cmp 2 -subcmp 2 -s 368x192 -r 30000/1001 -title X -f psp -flags loop -trellis 2 -partitions parti4x4+parti8x8+partp4x4+partp8x8+partb8x8 output.mp4 -@end table - -@section Which are good parameters for encoding high quality MPEG-4? - -'-mbd rd -flags +4mv+trell+aic -cmp 2 -subcmp 2 -g 300 -pass 1/2', -things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd'. - -@section Which are good parameters for encoding high quality MPEG-1/MPEG-2? - -'-mbd rd -flags +trell -cmp 2 -subcmp 2 -g 100 -pass 1/2' -but beware the '-g 100' might cause problems with some decoders. -Things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd. - -@section Interlaced video looks very bad when encoded with ffmpeg, whats wrong? - -You should use '-flags +ilme+ildct' and maybe '-flags +alt' for interlaced -material, and try '-top 0/1' if the result looks really messed-up. - -@section How can I read DirectShow files? - -If you have built FFmpeg with @code{./configure --enable-avisynth} -(only possible on MinGW/Cygwin platforms), -then you may use any file that DirectShow can read as input. -(Be aware that this feature has been recently added, -so you will need to help yourself in case of problems.) - -Just create an "input.avs" text file with this single line ... -@example - DirectShowSource("C:\path to your file\yourfile.asf") -@end example -... and then feed that text file to FFmpeg: -@example - ffmpeg -i input.avs -@end example - -For ANY other help on Avisynth, please visit @url{http://www.avisynth.org/}. - -@section How can I join video files? - -A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to join video files by -merely concatenating them. - -Hence you may concatenate your multimedia files by first transcoding them to -these privileged formats, then using the humble @code{cat} command (or the -equally humble @code{copy} under Windows), and finally transcoding back to your -format of choice. - -@example -ffmpeg -i input1.avi -sameq intermediate1.mpg -ffmpeg -i input2.avi -sameq intermediate2.mpg -cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg -ffmpeg -i intermediate_all.mpg -sameq output.avi -@end example - -Notice that you should either use @code{-sameq} or set a reasonably high -bitrate for your intermediate and output files, if you want to preserve -video quality. - -Also notice that you may avoid the huge intermediate files by taking advantage -of named pipes, should your platform support it: - -@example -mkfifo intermediate1.mpg -mkfifo intermediate2.mpg -ffmpeg -i input1.avi -sameq -y intermediate1.mpg < /dev/null & -ffmpeg -i input2.avi -sameq -y intermediate2.mpg < /dev/null & -cat intermediate1.mpg intermediate2.mpg |\ -ffmpeg -f mpeg -i - -sameq -vcodec mpeg4 -acodec libmp3lame output.avi -@end example - -Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also -allow concatenation, and the transcoding step is almost lossless. - -For example, let's say we want to join two FLV files into an output.flv file: - -@example -mkfifo temp1.a -mkfifo temp1.v -mkfifo temp2.a -mkfifo temp2.v -mkfifo all.a -mkfifo all.v -ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null & -ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null & -ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null & -ffmpeg -i input2.flv -an -f yuv4mpegpipe - > temp2.v < /dev/null & -cat temp1.a temp2.a > all.a & -cat temp1.v temp2.v > all.v & -ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \ - -f yuv4mpegpipe -i all.v \ - -sameq -y output.flv -rm temp[12].[av] all.[av] -@end example - -@section FFmpeg does not adhere to the -maxrate setting, some frames are bigger than maxrate/fps. - -Read the MPEG spec about video buffer verifier. - -@section I want CBR, but no matter what I do frame sizes differ. - -You do not understand what CBR is, please read the MPEG spec. -Read about video buffer verifier and constant bitrate. -The one sentence summary is that there is a buffer and the input rate is -constant, the output can vary as needed. - -@section How do I check if a stream is CBR? - -To quote the MPEG-2 spec: -"There is no way to tell that a bitstream is constant bitrate without -examining all of the vbv_delay values and making complicated computations." - - -@chapter Development - -@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat? - -Yes. Read the Developers Guide of the FFmpeg documentation. Alternatively, -examine the source code for one of the many open source projects that -already incorporate ffmpeg at (@url{projects.html}). - -@section Can you support my C compiler XXX? - -It depends. If your compiler is C99-compliant, then patches to support -it are likely to be welcome if they do not pollute the source code -with @code{#ifdef}s related to the compiler. - -@section Is Microsoft Visual C++ supported? - -No. Microsoft Visual C++ is not compliant to the C99 standard and does -not - among other things - support the inline assembly used in FFmpeg. -If you wish to use MSVC++ for your -project then you can link the MSVC++ code with libav* as long as -you compile the latter with a working C compiler. For more information, see -the @emph{Microsoft Visual C++ compatibility} section in the FFmpeg -documentation. - -There have been efforts to make FFmpeg compatible with MSVC++ in the -past. However, they have all been rejected as too intrusive, especially -since MinGW does the job adequately. None of the core developers -work with MSVC++ and thus this item is low priority. Should you find -the silver bullet that solves this problem, feel free to shoot it at us. - -We strongly recommend you to move over from MSVC++ to MinGW tools. - -@section Can I use FFmpeg or libavcodec under Windows? - -Yes, but the Cygwin or MinGW tools @emph{must} be used to compile FFmpeg. -Read the @emph{Windows} section in the FFmpeg documentation to find more -information. - -To get help and instructions for building FFmpeg under Windows, check out -the FFmpeg Windows Help Forum at -@url{http://arrozcru.no-ip.org/ffmpeg/}. - -@section Can you add automake, libtool or autoconf support? - -No. These tools are too bloated and they complicate the build. - -@section Why not rewrite ffmpeg in object-oriented C++? - -ffmpeg is already organized in a highly modular manner and does not need to -be rewritten in a formal object language. Further, many of the developers -favor straight C; it works for them. For more arguments on this matter, -read "Programming Religion" at (@url{http://www.tux.org/lkml/#s15}). - -@section Why are the ffmpeg programs devoid of debugging symbols? - -The build process creates ffmpeg_g, ffplay_g, etc. which contain full debug -information. Those binaries are strip'd to create ffmpeg, ffplay, etc. If -you need the debug information, used the *_g versions. - -@section I do not like the LGPL, can I contribute code under the GPL instead? - -Yes, as long as the code is optional and can easily and cleanly be placed -under #ifdef CONFIG_GPL without breaking anything. So for example a new codec -or filter would be OK under GPL while a bugfix to LGPL code would not. - -@section I want to compile xyz.c alone but my compiler produced many errors. - -Common code is in its own files in libav* and is used by the individual -codecs. They will not work without the common parts, you have to compile -the whole libav*. If you wish, disable some parts with configure switches. -You can also try to hack it and remove more, but if you had problems fixing -the compilation failure then you are probably not qualified for this. - -@section I'm using libavcodec from within my C++ application but the linker complains about missing symbols which seem to be available. - -FFmpeg is a pure C project, so to use the libraries within your C++ application -you need to explicitly state that you are using a C library. You can do this by -encompassing your FFmpeg includes using @code{extern "C"}. - -See @url{http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3} - -@section I have a file in memory / a API different from *open/*read/ libc how do i use it with libavformat? - -You have to implement a URLProtocol, see libavformat/file.c in FFmpeg -and libmpdemux/demux_lavf.c in MPlayer sources. - -@section I get "No compatible shell script interpreter found." in MSys. - -The standard MSys bash (2.04) is broken. You need to install 2.05 or later. - -@section I get "./configure: line <xxx>: pr: command not found" in MSys. - -The standard MSys install doesn't come with pr. You need to get it from the coreutils package. - -@section I tried to pass RTP packets into a decoder, but it doesn't work. - -RTP is a container format like any other, you must first depacketize the -codec frames/samples stored in RTP and then feed to the decoder. - -@section Where can I find libav* headers for Pascal/Delphi? - -see @url{http://www.iversenit.dk/dev/ffmpeg-headers/} - -@section Where is the documentation about ffv1, msmpeg4, asv1, 4xm? - -see @url{http://svn.mplayerhq.hu/michael/trunk/docs/} - -@section How do I feed H.263-RTP (and other codecs in RTP) to libavcodec? - -Even if peculiar since it is network oriented, RTP is a container like any -other. You have to @emph{demux} RTP before feeding the payload to libavcodec. -In this specific case please look at RFC 4629 to see how it should be done. - -@section AVStream.r_frame_rate is wrong, it is much larger than the framerate. - -r_frame_rate is NOT the average framerate, it is the smallest framerate -that can accurately represent all timestamps. So no, it is not -wrong if it is larger than the average! -For example, if you have mixed 25 and 30 fps content, then r_frame_rate -will be 150. - -@bye diff --git a/contrib/ffmpeg/doc/ffmpeg-doc.texi b/contrib/ffmpeg/doc/ffmpeg-doc.texi deleted file mode 100644 index baa257726..000000000 --- a/contrib/ffmpeg/doc/ffmpeg-doc.texi +++ /dev/null @@ -1,853 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFmpeg Documentation -@titlepage -@sp 7 -@center @titlefont{FFmpeg Documentation} -@sp 3 -@end titlepage - - -@chapter Introduction - -FFmpeg is a very fast video and audio converter. It can also grab from -a live audio/video source. - -The command line interface is designed to be intuitive, in the sense -that FFmpeg tries to figure out all parameters that can possibly be -derived automatically. You usually only have to specify the target -bitrate you want. - -FFmpeg can also convert from any sample rate to any other, and resize -video on the fly with a high quality polyphase filter. - -@chapter Quick Start - -@c man begin EXAMPLES -@section Video and Audio grabbing - -FFmpeg can grab video and audio from devices given that you specify the input -format and device. - -@example -ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg -@end example - -Note that you must activate the right video source and channel before -launching FFmpeg with any TV viewer such as xawtv -(@url{http://bytesex.org/xawtv/}) by Gerd Knorr. You also -have to set the audio recording levels correctly with a -standard mixer. - -@section X11 grabbing - -FFmpeg can grab the X11 display. - -@example -ffmpeg -f x11grab -s cif -i :0.0 /tmp/out.mpg -@end example - -0.0 is display.screen number of your X11 server, same as -the DISPLAY environment variable. - -@example -ffmpeg -f x11grab -s cif -i :0.0+10,20 /tmp/out.mpg -@end example - -0.0 is display.screen number of your X11 server, same as the DISPLAY environment -variable. 10 is the x-offset and 20 the y-offset for the grabbing. - -@section Video and Audio file format conversion - -* FFmpeg can use any supported file format and protocol as input: - -Examples: - -* You can use YUV files as input: - -@example -ffmpeg -i /tmp/test%d.Y /tmp/out.mpg -@end example - -It will use the files: -@example -/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V, -/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc... -@end example - -The Y files use twice the resolution of the U and V files. They are -raw files, without header. They can be generated by all decent video -decoders. You must specify the size of the image with the @option{-s} option -if FFmpeg cannot guess it. - -* You can input from a raw YUV420P file: - -@example -ffmpeg -i /tmp/test.yuv /tmp/out.avi -@end example - -test.yuv is a file containing raw YUV planar data. Each frame is composed -of the Y plane followed by the U and V planes at half vertical and -horizontal resolution. - -* You can output to a raw YUV420P file: - -@example -ffmpeg -i mydivx.avi hugefile.yuv -@end example - -* You can set several input files and output files: - -@example -ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg -@end example - -Converts the audio file a.wav and the raw YUV video file a.yuv -to MPEG file a.mpg. - -* You can also do audio and video conversions at the same time: - -@example -ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2 -@end example - -Converts a.wav to MPEG audio at 22050Hz sample rate. - -* You can encode to several formats at the same time and define a -mapping from input stream to output streams: - -@example -ffmpeg -i /tmp/a.wav -ab 64k /tmp/a.mp2 -ab 128k /tmp/b.mp2 -map 0:0 -map 0:0 -@end example - -Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. '-map -file:index' specifies which input stream is used for each output -stream, in the order of the definition of output streams. - -* You can transcode decrypted VOBs - -@example -ffmpeg -i snatch_1.vob -f avi -vcodec mpeg4 -b 800k -g 300 -bf 2 -acodec libmp3lame -ab 128k snatch.avi -@end example - -This is a typical DVD ripping example; the input is a VOB file, the -output an AVI file with MPEG-4 video and MP3 audio. Note that in this -command we use B-frames so the MPEG-4 stream is DivX5 compatible, and -GOP size is 300 which means one intra frame every 10 seconds for 29.97fps -input video. Furthermore, the audio stream is MP3-encoded so you need -to enable LAME support by passing @code{--enable-libmp3lame} to configure. -The mapping is particularly useful for DVD transcoding -to get the desired audio language. - -NOTE: To see the supported input formats, use @code{ffmpeg -formats}. -@c man end - -@chapter Invocation - -@section Syntax - -The generic syntax is: - -@example -@c man begin SYNOPSIS -ffmpeg [[infile options][@option{-i} @var{infile}]]... @{[outfile options] @var{outfile}@}... -@c man end -@end example -@c man begin DESCRIPTION -As a general rule, options are applied to the next specified -file. Therefore, order is important, and you can have the same -option on the command line multiple times. Each occurrence is -then applied to the next input or output file. - -* To set the video bitrate of the output file to 64kbit/s: -@example -ffmpeg -i input.avi -b 64k output.avi -@end example - -* To force the frame rate of the input and output file to 24 fps: -@example -ffmpeg -r 24 -i input.avi output.avi -@end example - -* To force the frame rate of the output file to 24 fps: -@example -ffmpeg -i input.avi -r 24 output.avi -@end example - -* To force the frame rate of input file to 1 fps and the output file to 24 fps: -@example -ffmpeg -r 1 -i input.avi -r 24 output.avi -@end example - -The format option may be needed for raw input files. - -By default, FFmpeg tries to convert as losslessly as possible: It -uses the same audio and video parameters for the outputs as the one -specified for the inputs. -@c man end - -@c man begin OPTIONS -@section Main options - -@table @option -@item -L -Show license. - -@item -h -Show help. - -@item -version -Show version. - -@item -formats -Show available formats, codecs, protocols, ... - -@item -f fmt -Force format. - -@item -i filename -input filename - -@item -y -Overwrite output files. - -@item -t duration -Restrict the transcoded/captured video sequence -to the duration specified in seconds. -@code{hh:mm:ss[.xxx]} syntax is also supported. - -@item -fs limit_size -Set the file size limit. - -@item -ss position -Seek to given time position in seconds. -@code{hh:mm:ss[.xxx]} syntax is also supported. - -@item -itsoffset offset -Set the input time offset in seconds. -@code{[-]hh:mm:ss[.xxx]} syntax is also supported. -This option affects all the input files that follow it. -The offset is added to the timestamps of the input files. -Specifying a positive offset means that the corresponding -streams are delayed by 'offset' seconds. - -@item -title string -Set the title. - -@item -timestamp time -Set the timestamp. - -@item -author string -Set the author. - -@item -copyright string -Set the copyright. - -@item -comment string -Set the comment. - -@item -album string -Set the album. - -@item -track number -Set the track. - -@item -year number -Set the year. - -@item -v number -Set the logging verbosity level. - -@item -target type -Specify target file type ("vcd", "svcd", "dvd", "dv", "dv50", "pal-vcd", -"ntsc-svcd", ... ). All the format options (bitrate, codecs, -buffer sizes) are then set automatically. You can just type: - -@example -ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg -@end example - -Nevertheless you can specify additional options as long as you know -they do not conflict with the standard, as in: - -@example -ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg -@end example - -@item -dframes number -Set the number of data frames to record. - -@item -scodec codec -Force subtitle codec ('copy' to copy stream). - -@item -newsubtitle -Add a new subtitle stream to the current output stream. - -@item -slang code -Set the ISO 639 language code (3 letters) of the current subtitle stream. - -@end table - -@section Video Options - -@table @option -@item -b bitrate -Set the video bitrate in bit/s (default = 200 kb/s). -@item -vframes number -Set the number of video frames to record. -@item -r fps -Set frame rate (Hz value, fraction or abbreviation), (default = 25). -@item -s size -Set frame size. The format is @samp{wxh} (ffserver default = 160x128, ffmpeg default = same as source). -The following abbreviations are recognized: -@table @samp -@item sqcif -128x96 -@item qcif -176x144 -@item cif -352x288 -@item 4cif -704x576 -@item qqvga -160x120 -@item qvga -320x240 -@item vga -640x480 -@item svga -800x600 -@item xga -1024x768 -@item uxga -1600x1200 -@item qxga -2048x1536 -@item sxga -1280x1024 -@item qsxga -2560x2048 -@item hsxga -5120x4096 -@item wvga -852x480 -@item wxga -1366x768 -@item wsxga -1600x1024 -@item wuxga -1920x1200 -@item woxga -2560x1600 -@item wqsxga -3200x2048 -@item wquxga -3840x2400 -@item whsxga -6400x4096 -@item whuxga -7680x4800 -@item cga -320x200 -@item ega -640x350 -@item hd480 -852x480 -@item hd720 -1280x720 -@item hd1080 -1920x1080 -@end table - -@item -aspect aspect -Set aspect ratio (4:3, 16:9 or 1.3333, 1.7777). -@item -croptop size -Set top crop band size (in pixels). -@item -cropbottom size -Set bottom crop band size (in pixels). -@item -cropleft size -Set left crop band size (in pixels). -@item -cropright size -Set right crop band size (in pixels). -@item -padtop size -Set top pad band size (in pixels). -@item -padbottom size -Set bottom pad band size (in pixels). -@item -padleft size -Set left pad band size (in pixels). -@item -padright size -Set right pad band size (in pixels). -@item -padcolor (hex color) -Set color of padded bands. The value for padcolor is expressed -as a six digit hexadecimal number where the first two digits -represent red, the middle two digits green and last two digits -blue (default = 000000 (black)). -@item -vn -Disable video recording. -@item -bt tolerance -Set video bitrate tolerance (in bit/s). -@item -maxrate bitrate -Set max video bitrate (in bit/s). -@item -minrate bitrate -Set min video bitrate (in bit/s). -@item -bufsize size -Set video buffer verifier buffer size (in bits). -@item -vcodec codec -Force video codec to @var{codec}. Use the @code{copy} special value to -tell that the raw codec data must be copied as is. -@item -sameq -Use same video quality as source (implies VBR). - -@item -pass n -Select the pass number (1 or 2). It is useful to do two pass -encoding. The statistics of the video are recorded in the first -pass and the video is generated at the exact requested bitrate -in the second pass. - -@item -passlogfile file -Set two pass logfile name to @var{file}. - -@item -newvideo -Add a new video stream to the current output stream. - -@end table - -@section Advanced Video Options - -@table @option -@item -pix_fmt format -Set pixel format. Use 'list' as parameter to show all the supported -pixel formats. -@item -sws_flags flags -Set SwScaler flags (only available when compiled with SwScaler support). -@item -g gop_size -Set the group of pictures size. -@item -intra -Use only intra frames. -@item -vdt n -Discard threshold. -@item -qscale q -Use fixed video quantizer scale (VBR). -@item -qmin q -minimum video quantizer scale (VBR) -@item -qmax q -maximum video quantizer scale (VBR) -@item -qdiff q -maximum difference between the quantizer scales (VBR) -@item -qblur blur -video quantizer scale blur (VBR) -@item -qcomp compression -video quantizer scale compression (VBR) - -@item -lmin lambda -minimum video lagrange factor (VBR) -@item -lmax lambda -max video lagrange factor (VBR) -@item -mblmin lambda -minimum macroblock quantizer scale (VBR) -@item -mblmax lambda -maximum macroblock quantizer scale (VBR) - -These four options (lmin, lmax, mblmin, mblmax) use 'lambda' units, -but you may use the QP2LAMBDA constant to easily convert from 'q' units: -@example -ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext -@end example - -@item -rc_init_cplx complexity -initial complexity for single pass encoding -@item -b_qfactor factor -qp factor between P- and B-frames -@item -i_qfactor factor -qp factor between P- and I-frames -@item -b_qoffset offset -qp offset between P- and B-frames -@item -i_qoffset offset -qp offset between P- and I-frames -@item -rc_eq equation -Set rate control equation (@pxref{FFmpeg formula -evaluator}) (default = @code{tex^qComp}). -@item -rc_override override -rate control override for specific intervals -@item -me_method method -Set motion estimation method to @var{method}. -Available methods are (from lowest to best quality): -@table @samp -@item zero -Try just the (0, 0) vector. -@item phods -@item log -@item x1 -@item hex -@item umh -@item epzs -(default method) -@item full -exhaustive search (slow and marginally better than epzs) -@end table - -@item -dct_algo algo -Set DCT algorithm to @var{algo}. Available values are: -@table @samp -@item 0 -FF_DCT_AUTO (default) -@item 1 -FF_DCT_FASTINT -@item 2 -FF_DCT_INT -@item 3 -FF_DCT_MMX -@item 4 -FF_DCT_MLIB -@item 5 -FF_DCT_ALTIVEC -@end table - -@item -idct_algo algo -Set IDCT algorithm to @var{algo}. Available values are: -@table @samp -@item 0 -FF_IDCT_AUTO (default) -@item 1 -FF_IDCT_INT -@item 2 -FF_IDCT_SIMPLE -@item 3 -FF_IDCT_SIMPLEMMX -@item 4 -FF_IDCT_LIBMPEG2MMX -@item 5 -FF_IDCT_PS2 -@item 6 -FF_IDCT_MLIB -@item 7 -FF_IDCT_ARM -@item 8 -FF_IDCT_ALTIVEC -@item 9 -FF_IDCT_SH4 -@item 10 -FF_IDCT_SIMPLEARM -@end table - -@item -er n -Set error resilience to @var{n}. -@table @samp -@item 1 -FF_ER_CAREFUL (default) -@item 2 -FF_ER_COMPLIANT -@item 3 -FF_ER_AGGRESSIVE -@item 4 -FF_ER_VERY_AGGRESSIVE -@end table - -@item -ec bit_mask -Set error concealment to @var{bit_mask}. @var{bit_mask} is a bit mask of -the following values: -@table @samp -@item 1 -FF_EC_GUESS_MVS (default = enabled) -@item 2 -FF_EC_DEBLOCK (default = enabled) -@end table - -@item -bf frames -Use 'frames' B-frames (supported for MPEG-1, MPEG-2 and MPEG-4). -@item -mbd mode -macroblock decision -@table @samp -@item 0 -FF_MB_DECISION_SIMPLE: Use mb_cmp (cannot change it yet in FFmpeg). -@item 1 -FF_MB_DECISION_BITS: Choose the one which needs the fewest bits. -@item 2 -FF_MB_DECISION_RD: rate distortion -@end table - -@item -4mv -Use four motion vector by macroblock (MPEG-4 only). -@item -part -Use data partitioning (MPEG-4 only). -@item -bug param -Work around encoder bugs that are not auto-detected. -@item -strict strictness -How strictly to follow the standards. -@item -aic -Enable Advanced intra coding (h263+). -@item -umv -Enable Unlimited Motion Vector (h263+) - -@item -deinterlace -Deinterlace pictures. -@item -ilme -Force interlacing support in encoder (MPEG-2 and MPEG-4 only). -Use this option if your input file is interlaced and you want -to keep the interlaced format for minimum losses. -The alternative is to deinterlace the input stream with -@option{-deinterlace}, but deinterlacing introduces losses. -@item -psnr -Calculate PSNR of compressed frames. -@item -vstats -Dump video coding statistics to @file{vstats_HHMMSS.log}. -@item -vstats_file file -Dump video coding statistics to @var{file}. -@item -vhook module -Insert video processing @var{module}. @var{module} contains the module -name and its parameters separated by spaces. -@item -top n -top=1/bottom=0/auto=-1 field first -@item -dc precision -Intra_dc_precision. -@item -vtag fourcc/tag -Force video tag/fourcc. -@item -qphist -Show QP histogram. -@item -vbsf bitstream filter -Bitstream filters available are "dump_extra", "remove_extra", "noise". -@end table - -@section Audio Options - -@table @option -@item -aframes number -Set the number of audio frames to record. -@item -ar freq -Set the audio sampling frequency (default = 44100 Hz). -@item -ab bitrate -Set the audio bitrate in bit/s (default = 64k). -@item -ac channels -Set the number of audio channels (default = 1). -@item -an -Disable audio recording. -@item -acodec codec -Force audio codec to @var{codec}. Use the @code{copy} special value to -specify that the raw codec data must be copied as is. -@item -newaudio -Add a new audio track to the output file. If you want to specify parameters, -do so before @code{-newaudio} (@code{-acodec}, @code{-ab}, etc..). - -Mapping will be done automatically, if the number of output streams is equal to -the number of input streams, else it will pick the first one that matches. You -can override the mapping using @code{-map} as usual. - -Example: -@example -ffmpeg -i file.mpg -vcodec copy -acodec ac3 -ab 384k test.mpg -acodec mp2 -ab 192k -newaudio -@end example -@item -alang code -Set the ISO 639 language code (3 letters) of the current audio stream. -@end table - -@section Advanced Audio options: - -@table @option -@item -atag fourcc/tag -Force audio tag/fourcc. -@item -absf bitstream filter -Bitstream filters available are "dump_extra", "remove_extra", "noise", "mp3comp", "mp3decomp". -@end table - -@section Subtitle options: - -@table @option -@item -scodec codec -Force subtitle codec ('copy' to copy stream). -@item -newsubtitle -Add a new subtitle stream to the current output stream. -@item -slang code -Set the ISO 639 language code (3 letters) of the current subtitle stream. -@end table - -@section Audio/Video grab options - -@table @option -@item -vc channel -Set video grab channel (DV1394 only). -@item -tvstd standard -Set television standard (NTSC, PAL (SECAM)). -@item -isync -Synchronize read on input. -@end table - -@section Advanced options - -@table @option -@item -map input stream id[:input stream id] -Set stream mapping from input streams to output streams. -Just enumerate the input streams in the order you want them in the output. -[input stream id] sets the (input) stream to sync against. -@item -map_meta_data outfile:infile -Set meta data information of outfile from infile. -@item -debug -Print specific debug info. -@item -benchmark -Add timings for benchmarking. -@item -dump -Dump each input packet. -@item -hex -When dumping packets, also dump the payload. -@item -bitexact -Only use bit exact algorithms (for codec testing). -@item -ps size -Set packet size in bits. -@item -re -Read input at native frame rate. Mainly used to simulate a grab device. -@item -loop_input -Loop over the input stream. Currently it works only for image -streams. This option is used for automatic FFserver testing. -@item -loop_output number_of_times -Repeatedly loop output for formats that support looping such as animated GIF -(0 will loop the output infinitely). -@item -threads count -Thread count. -@item -vsync parameter -Video sync method. Video will be stretched/squeezed to match the timestamps, -it is done by duplicating and dropping frames. With -map you can select from -which stream the timestamps should be taken. You can leave either video or -audio unchanged and sync the remaining stream(s) to the unchanged one. -@item -async samples_per_second -Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps, -the parameter is the maximum samples per second by which the audio is changed. --async 1 is a special case where only the start of the audio stream is corrected -without any later correction. -@item -copyts -Copy timestamps from input to output. -@item -shortest -Finish encoding when the shortest input stream ends. -@item -dts_delta_threshold -Timestamp discontinuity delta threshold. -@item -muxdelay seconds -Set the maximum demux-decode delay. -@item -muxpreload seconds -Set the initial demux-decode delay. -@end table - -@node FFmpeg formula evaluator -@section FFmpeg formula evaluator - -When evaluating a rate control string, FFmpeg uses an internal formula -evaluator. - -The following binary operators are available: @code{+}, @code{-}, -@code{*}, @code{/}, @code{^}. - -The following unary operators are available: @code{+}, @code{-}, -@code{(...)}. - -The following functions are available: -@table @var -@item sinh(x) -@item cosh(x) -@item tanh(x) -@item sin(x) -@item cos(x) -@item tan(x) -@item exp(x) -@item log(x) -@item squish(x) -@item gauss(x) -@item abs(x) -@item max(x, y) -@item min(x, y) -@item gt(x, y) -@item lt(x, y) -@item eq(x, y) -@item bits2qp(bits) -@item qp2bits(qp) -@end table - -The following constants are available: -@table @var -@item PI -@item E -@item iTex -@item pTex -@item tex -@item mv -@item fCode -@item iCount -@item mcVar -@item var -@item isI -@item isP -@item isB -@item avgQP -@item qComp -@item avgIITex -@item avgPITex -@item avgPPTex -@item avgBPTex -@item avgTex -@end table - -@c man end - -@ignore - -@setfilename ffmpeg -@settitle FFmpeg video converter - -@c man begin SEEALSO -ffserver(1), ffplay(1) and the HTML documentation of @file{ffmpeg}. -@c man end - -@c man begin AUTHOR -Fabrice Bellard -@c man end - -@end ignore - -@section Protocols - -The filename can be @file{-} to read from standard input or to write -to standard output. - -FFmpeg also handles many protocols specified with an URL syntax. - -Use 'ffmpeg -formats' to see a list of the supported protocols. - -The protocol @code{http:} is currently used only to communicate with -FFserver (see the FFserver documentation). When FFmpeg will be a -video player it will also be used for streaming :-) - -@chapter Tips - -@itemize -@item For streaming at very low bitrate application, use a low frame rate -and a small GOP size. This is especially true for RealVideo where -the Linux player does not seem to be very fast, so it can miss -frames. An example is: - -@example -ffmpeg -g 3 -r 3 -t 10 -b 50k -s qcif -f rv10 /tmp/b.rm -@end example - -@item The parameter 'q' which is displayed while encoding is the current -quantizer. The value 1 indicates that a very good quality could -be achieved. The value 31 indicates the worst quality. If q=31 appears -too often, it means that the encoder cannot compress enough to meet -your bitrate. You must either increase the bitrate, decrease the -frame rate or decrease the frame size. - -@item If your computer is not fast enough, you can speed up the -compression at the expense of the compression ratio. You can use -'-me zero' to speed up motion estimation, and '-intra' to disable -motion estimation completely (you have only I-frames, which means it -is about as good as JPEG compression). - -@item To have very low audio bitrates, reduce the sampling frequency -(down to 22050 kHz for MPEG audio, 22050 or 11025 for AC3). - -@item To have a constant quality (but a variable bitrate), use the option -'-qscale n' when 'n' is between 1 (excellent quality) and 31 (worst -quality). - -@item When converting video files, you can use the '-sameq' option which -uses the same quality factor in the encoder as in the decoder. -It allows almost lossless encoding. - -@end itemize - -@bye diff --git a/contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt b/contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt deleted file mode 100644 index 2eb4ee71a..000000000 --- a/contrib/ffmpeg/doc/ffmpeg_powerpc_performance_evaluation_howto.txt +++ /dev/null @@ -1,172 +0,0 @@ -FFmpeg & evaluating performance on the PowerPC Architecture HOWTO - -(c) 2003-2004 Romain Dolbeau <romain@dolbeau.org> - - - -I - Introduction - -The PowerPC architecture and its SIMD extension AltiVec offer some -interesting tools to evaluate performance and improve the code. -This document tries to explain how to use those tools with FFmpeg. - -The architecture itself offers two ways to evaluate the performance of -a given piece of code: - -1) The Time Base Registers (TBL) -2) The Performance Monitor Counter Registers (PMC) - -The first ones are always available, always active, but they're not very -accurate: the registers increment by one every four *bus* cycles. On -my 667 Mhz tiBook (ppc7450), this means once every twenty *processor* -cycles. So we won't use that. - -The PMC are much more useful: not only can they report cycle-accurate -timing, but they can also be used to monitor many other parameters, -such as the number of AltiVec stalls for every kind of instruction, -or instruction cache misses. The downside is that not all processors -support the PMC (all G3, all G4 and the 970 do support them), and -they're inactive by default - you need to activate them with a -dedicated tool. Also, the number of available PMC depends on the -procesor: the various 604 have 2, the various 75x (aka. G3) have 4, -and the various 74xx (aka G4) have 6. - -*WARNING*: The PowerPC 970 is not very well documented, and its PMC -registers are 64 bits wide. To properly notify the code, you *must* -tune for the 970 (using --tune=970), or the code will assume 32 bit -registers. - - -II - Enabling FFmpeg PowerPC performance support - -This needs to be done by hand. First, you need to configure FFmpeg as -usual, but add the "--powerpc-perf-enable" option. For instance: - -##### -./configure --prefix=/usr/local/ffmpeg-svn --cc=gcc-3.3 --tune=7450 --powerpc-perf-enable -##### - -This will configure FFmpeg to install inside /usr/local/ffmpeg-svn, -compiling with gcc-3.3 (you should try to use this one or a newer -gcc), and tuning for the PowerPC 7450 (i.e. the newer G4; as a rule of -thumb, those at 550Mhz and more). It will also enable the PMC. - -You may also edit the file "config.h" to enable the following line: - -##### -// #define ALTIVEC_USE_REFERENCE_C_CODE 1 -##### - -If you enable this line, then the code will not make use of AltiVec, -but will use the reference C code instead. This is useful to compare -performance between two versions of the code. - -Also, the number of enabled PMC is defined in "libavcodec/ppc/dsputil_ppc.h": - -##### -#define POWERPC_NUM_PMC_ENABLED 4 -##### - -If you have a G4 CPU, you can enable all 6 PMC. DO NOT enable more -PMC than available on your CPU! - -Then, simply compile FFmpeg as usual (make && make install). - - - -III - Using FFmpeg PowerPC performance support - -This FFmeg can be used exactly as usual. But before exiting, FFmpeg -will dump a per-function report that looks like this: - -##### -PowerPC performance report - Values are from the PMC registers, and represent whatever the - registers are set to record. - Function "gmc1_altivec" (pmc1): - min: 231 - max: 1339867 - avg: 558.25 (255302) - Function "gmc1_altivec" (pmc2): - min: 93 - max: 2164 - avg: 267.31 (255302) - Function "gmc1_altivec" (pmc3): - min: 72 - max: 1987 - avg: 276.20 (255302) -(...) -##### - -In this example, PMC1 was set to record CPU cycles, PMC2 was set to -record AltiVec Permute Stall Cycles, and PMC3 was set to record AltiVec -Issue Stalls. - -The function "gmc1_altivec" was monitored 255302 times, and the -minimum execution time was 231 processor cycles. The max and average -aren't much use, as it's very likely the OS interrupted execution for -reasons of its own :-( - -With the exact same settings and source file, but using the reference C -code we get: - -##### -PowerPC performance report - Values are from the PMC registers, and represent whatever the - registers are set to record. - Function "gmc1_altivec" (pmc1): - min: 592 - max: 2532235 - avg: 962.88 (255302) - Function "gmc1_altivec" (pmc2): - min: 0 - max: 33 - avg: 0.00 (255302) - Function "gmc1_altivec" (pmc3): - min: 0 - max: 350 - avg: 0.03 (255302) -(...) -##### - -592 cycles, so the fastest AltiVec execution is about 2.5x faster than -the fastest C execution in this example. It's not perfect but it's not -bad (well I wrote this function so I can't say otherwise :-). - -Once you have that kind of report, you can try to improve things by -finding what goes wrong and fixing it; in the example above, one -should try to diminish the number of AltiVec stalls, as this *may* -improve performance. - - - -IV) Enabling the PMC in Mac OS X - -This is easy. Use "Monster" and "monster". Those tools come from -Apple's CHUD package, and can be found hidden in the developer web -site & FTP site. "MONster" is the graphical application, use it to -generate a config file specifying what each register should -monitor. Then use the command-line application "monster" to use that -config file, and enjoy the results. - -Note that "MONster" can be used for many other things, but it's -documented by Apple, it's not my subject. - -If you are using CHUD 4.4.2 or later, you'll notice that MONster is -no longer available. It's been superseeded by Shark, where -configuration of PMCs is available as a plugin. - - - -V) Enabling the PMC on Linux - -On linux you may use oprofile from http://oprofile.sf.net, depending on the -version and the cpu you may need to apply a patch[1] to access a set of the -possibile counters from the userspace application. You can always define them -using the kernel interface /dev/oprofile/* . - -[1] http://dev.gentoo.org/~lu_zero/development/oprofile-g4-20060423.patch - --- -Romain Dolbeau <romain@dolbeau.org> -Luca Barbato <lu_zero@gentoo.org> diff --git a/contrib/ffmpeg/doc/ffplay-doc.texi b/contrib/ffmpeg/doc/ffplay-doc.texi deleted file mode 100644 index 1ac315662..000000000 --- a/contrib/ffmpeg/doc/ffplay-doc.texi +++ /dev/null @@ -1,135 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFplay Documentation -@titlepage -@sp 7 -@center @titlefont{FFplay Documentation} -@sp 3 -@end titlepage - - -@chapter Introduction - -@c man begin DESCRIPTION -FFplay is a very simple and portable media player using the FFmpeg -libraries and the SDL library. It is mostly used as a testbed for the -various FFmpeg APIs. -@c man end - -@chapter Invocation - -@section Syntax -@example -@c man begin SYNOPSIS -ffplay [options] @file{input_file} -@c man end -@end example - -@c man begin OPTIONS -@section Main options - -@table @option -@item -h -show help -@item -x width -force displayed width -@item -y height -force displayed height -@item -s size -Set frame size (WxH or abbreviation), needed for videos which don't -contain a header with the framesize like raw YUV. -@item -an -disable audio -@item -vn -disable video -@item -ss pos -seek to a given position in seconds -@item -bytes -seek by bytes -@item -nodisp -disable graphical display -@item -f fmt -force format -@end table - -@section Advanced options -@table @option -@item -pix_fmt format -set pixel format -@item -stats -Show the stream duration, the codec parameters, the current position in -the stream and the audio/video synchronisation drift. -@item -debug -print specific debug info -@item -bug -work around bugs -@item -vismv -visualize motion vectors -@item -fast -non-spec-compliant optimizations -@item -genpts -generate pts -@item -rtp_tcp -Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful -if you are streaming with the RTSP protocol. -@item -sync type -Set the master clock to audio (@code{type=audio}), video -(@code{type=video}) or external (@code{type=ext}). Default is audio. The -master clock is used to control audio-video synchronization. Most media -players use audio as master clock, but in some cases (streaming or high -quality broadcast) it is necessary to change that. This option is mainly -used for debugging purposes. -@item -threads count -thread count -@end table - -@section While playing - -@table @key -@item q, ESC -quit - -@item f -toggle full screen - -@item p, SPC -pause - -@item a -cycle audio channel - -@item v -cycle video channel - -@item w -show audio waves - -@item left/right -seek backward/forward 10 seconds - -@item down/up -seek backward/forward 1 minute - -@item mouse click -seek to percentage in file corresponding to fraction of width - -@end table - -@c man end - -@ignore - -@setfilename ffplay -@settitle FFplay media player - -@c man begin SEEALSO -ffmpeg(1), ffserver(1) and the html documentation of @file{ffmpeg}. -@c man end - -@c man begin AUTHOR -Fabrice Bellard -@c man end - -@end ignore - -@bye diff --git a/contrib/ffmpeg/doc/ffserver-doc.texi b/contrib/ffmpeg/doc/ffserver-doc.texi deleted file mode 100644 index 9b0373360..000000000 --- a/contrib/ffmpeg/doc/ffserver-doc.texi +++ /dev/null @@ -1,224 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle FFserver Documentation -@titlepage -@sp 7 -@center @titlefont{FFserver Documentation} -@sp 3 -@end titlepage - - -@chapter Introduction - -@c man begin DESCRIPTION -FFserver is a streaming server for both audio and video. It supports -several live feeds, streaming from files and time shifting on live feeds -(you can seek to positions in the past on each live feed, provided you -specify a big enough feed storage in ffserver.conf). - -This documentation covers only the streaming aspects of ffserver / -ffmpeg. All questions about parameters for ffmpeg, codec questions, -etc. are not covered here. Read @file{ffmpeg-doc.html} for more -information. -@c man end - -@chapter QuickStart - -[Contributed by Philip Gladstone, philip-ffserver at gladstonefamily dot net] - -@section What can this do? - -When properly configured and running, you can capture video and audio in real -time from a suitable capture card, and stream it out over the Internet to -either Windows Media Player or RealAudio player (with some restrictions). - -It can also stream from files, though that is currently broken. Very often, a -web server can be used to serve up the files just as well. - -It can stream prerecorded video from .ffm files, though it is somewhat tricky -to make it work correctly. - -@section What do I need? - -I use Linux on a 900MHz Duron with a cheapo Bt848 based TV capture card. I'm -using stock Linux 2.4.17 with the stock drivers. [Actually that isn't true, -I needed some special drivers for my motherboard-based sound card.] - -I understand that FreeBSD systems work just fine as well. - -@section How do I make it work? - -First, build the kit. It *really* helps to have installed LAME first. Then when -you run the ffserver ./configure, make sure that you have the -@code{--enable-libmp3lame} flag turned on. - -LAME is important as it allows for streaming audio to Windows Media Player. -Don't ask why the other audio types do not work. - -As a simple test, just run the following two command lines (assuming that you -have a V4L video capture card): - -@example -./ffserver -f doc/ffserver.conf & -./ffmpeg http://localhost:8090/feed1.ffm -@end example - -At this point you should be able to go to your Windows machine and fire up -Windows Media Player (WMP). Go to Open URL and enter - -@example - http://<linuxbox>:8090/test.asf -@end example - -You should (after a short delay) see video and hear audio. - -WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to -transfer the entire file before starting to play. -The same is true of AVI files. - -@section What happens next? - -You should edit the ffserver.conf file to suit your needs (in terms of -frame rates etc). Then install ffserver and ffmpeg, write a script to start -them up, and off you go. - -@section Troubleshooting - -@subsection I don't hear any audio, but video is fine. - -Maybe you didn't install LAME, or got your ./configure statement wrong. Check -the ffmpeg output to see if a line referring to MP3 is present. If not, then -your configuration was incorrect. If it is, then maybe your wiring is not -set up correctly. Maybe the sound card is not getting data from the right -input source. Maybe you have a really awful audio interface (like I do) -that only captures in stereo and also requires that one channel be flipped. -If you are one of these people, then export 'AUDIO_FLIP_LEFT=1' before -starting ffmpeg. - -@subsection The audio and video loose sync after a while. - -Yes, they do. - -@subsection After a long while, the video update rate goes way down in WMP. - -Yes, it does. Who knows why? - -@subsection WMP 6.4 behaves differently to WMP 7. - -Yes, it does. Any thoughts on this would be gratefully received. These -differences extend to embedding WMP into a web page. [There are two -object IDs that you can use: The old one, which does not play well, and -the new one, which does (both tested on the same system). However, -I suspect that the new one is not available unless you have installed WMP 7]. - -@section What else can it do? - -You can replay video from .ffm files that was recorded earlier. -However, there are a number of caveats, including the fact that the -ffserver parameters must match the original parameters used to record the -file. If they do not, then ffserver deletes the file before recording into it. -(Now that I write this, it seems broken). - -You can fiddle with many of the codec choices and encoding parameters, and -there are a bunch more parameters that you cannot control. Post a message -to the mailing list if there are some 'must have' parameters. Look in -ffserver.conf for a list of the currently available controls. - -It will automatically generate the ASX or RAM files that are often used -in browsers. These files are actually redirections to the underlying ASF -or RM file. The reason for this is that the browser often fetches the -entire file before starting up the external viewer. The redirection files -are very small and can be transferred quickly. [The stream itself is -often 'infinite' and thus the browser tries to download it and never -finishes.] - -@section Tips - -* When you connect to a live stream, most players (WMP, RA, etc) want to -buffer a certain number of seconds of material so that they can display the -signal continuously. However, ffserver (by default) starts sending data -in realtime. This means that there is a pause of a few seconds while the -buffering is being done by the player. The good news is that this can be -cured by adding a '?buffer=5' to the end of the URL. This means that the -stream should start 5 seconds in the past -- and so the first 5 seconds -of the stream are sent as fast as the network will allow. It will then -slow down to real time. This noticeably improves the startup experience. - -You can also add a 'Preroll 15' statement into the ffserver.conf that will -add the 15 second prebuffering on all requests that do not otherwise -specify a time. In addition, ffserver will skip frames until a key_frame -is found. This further reduces the startup delay by not transferring data -that will be discarded. - -* You may want to adjust the MaxBandwidth in the ffserver.conf to limit -the amount of bandwidth consumed by live streams. - -@section Why does the ?buffer / Preroll stop working after a time? - -It turns out that (on my machine at least) the number of frames successfully -grabbed is marginally less than the number that ought to be grabbed. This -means that the timestamp in the encoded data stream gets behind realtime. -This means that if you say 'Preroll 10', then when the stream gets 10 -or more seconds behind, there is no Preroll left. - -Fixing this requires a change in the internals of how timestamps are -handled. - -@section Does the @code{?date=} stuff work. - -Yes (subject to the limitation outlined above). Also note that whenever you -start ffserver, it deletes the ffm file (if any parameters have changed), -thus wiping out what you had recorded before. - -The format of the @code{?date=xxxxxx} is fairly flexible. You should use one -of the following formats (the 'T' is literal): - -@example -* YYYY-MM-DDTHH:MM:SS (localtime) -* YYYY-MM-DDTHH:MM:SSZ (UTC) -@end example - -You can omit the YYYY-MM-DD, and then it refers to the current day. However -note that @samp{?date=16:00:00} refers to 16:00 on the current day -- this -may be in the future and so is unlikely to be useful. - -You use this by adding the ?date= to the end of the URL for the stream. -For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}. - -@chapter Invocation -@section Syntax -@example -@c man begin SYNOPSIS -ffserver [options] -@c man end -@end example - -@section Options -@c man begin OPTIONS -@table @option -@item -L -Print the license. -@item -h -Print the help. -@item -f configfile -Use @file{configfile} instead of @file{/etc/ffserver.conf}. -@end table -@c man end - -@ignore - -@setfilename ffsserver -@settitle FFserver video server - -@c man begin SEEALSO -ffmpeg(1), ffplay(1), the @file{ffmpeg/doc/ffserver.conf} example and -the HTML documentation of @file{ffmpeg}. -@c man end - -@c man begin AUTHOR -Fabrice Bellard -@c man end - -@end ignore - -@bye diff --git a/contrib/ffmpeg/doc/ffserver.conf b/contrib/ffmpeg/doc/ffserver.conf deleted file mode 100644 index f7db66ed2..000000000 --- a/contrib/ffmpeg/doc/ffserver.conf +++ /dev/null @@ -1,352 +0,0 @@ -# Port on which the server is listening. You must select a different -# port from your standard HTTP web server if it is running on the same -# computer. -Port 8090 - -# Address on which the server is bound. Only useful if you have -# several network interfaces. -BindAddress 0.0.0.0 - -# Number of simultaneous requests that can be handled. Since FFServer -# is very fast, it is more likely that you will want to leave this high -# and use MaxBandwidth, below. -MaxClients 1000 - -# This the maximum amount of kbit/sec that you are prepared to -# consume when streaming to clients. -MaxBandwidth 1000 - -# Access log file (uses standard Apache log file format) -# '-' is the standard output. -CustomLog - - -# Suppress that if you want to launch ffserver as a daemon. -NoDaemon - - -################################################################## -# Definition of the live feeds. Each live feed contains one video -# and/or audio sequence coming from an ffmpeg encoder or another -# ffserver. This sequence may be encoded simultaneously with several -# codecs at several resolutions. - -<Feed feed1.ffm> - -# You must use 'ffmpeg' to send a live feed to ffserver. In this -# example, you can type: -# -# ffmpeg http://localhost:8090/feed1.ffm - -# ffserver can also do time shifting. It means that it can stream any -# previously recorded live stream. The request should contain: -# "http://xxxx?date=[YYYY-MM-DDT][[HH:]MM:]SS[.m...]".You must specify -# a path where the feed is stored on disk. You also specify the -# maximum size of the feed, where zero means unlimited. Default: -# File=/tmp/feed_name.ffm FileMaxSize=5M -File /tmp/feed1.ffm -FileMaxSize 200K - -# You could specify -# ReadOnlyFile /saved/specialvideo.ffm -# This marks the file as readonly and it will not be deleted or updated. - -# Specify launch in order to start ffmpeg automatically. -# First ffmpeg must be defined with an appropriate path if needed, -# after that options can follow, but avoid adding the http:// field -#Launch ffmpeg - -# Only allow connections from localhost to the feed. -ACL allow 127.0.0.1 - -</Feed> - - -################################################################## -# Now you can define each stream which will be generated from the -# original audio and video stream. Each format has a filename (here -# 'test1.mpg'). FFServer will send this stream when answering a -# request containing this filename. - -<Stream test1.mpg> - -# coming from live feed 'feed1' -Feed feed1.ffm - -# Format of the stream : you can choose among: -# mpeg : MPEG-1 multiplexed video and audio -# mpegvideo : only MPEG-1 video -# mp2 : MPEG-2 audio (use AudioCodec to select layer 2 and 3 codec) -# ogg : Ogg format (Vorbis audio codec) -# rm : RealNetworks-compatible stream. Multiplexed audio and video. -# ra : RealNetworks-compatible stream. Audio only. -# mpjpeg : Multipart JPEG (works with Netscape without any plugin) -# jpeg : Generate a single JPEG image. -# asf : ASF compatible streaming (Windows Media Player format). -# swf : Macromedia Flash compatible stream -# avi : AVI format (MPEG-4 video, MPEG audio sound) -# master : special ffmpeg stream used to duplicate a server -Format mpeg - -# Bitrate for the audio stream. Codecs usually support only a few -# different bitrates. -AudioBitRate 32 - -# Number of audio channels: 1 = mono, 2 = stereo -AudioChannels 1 - -# Sampling frequency for audio. When using low bitrates, you should -# lower this frequency to 22050 or 11025. The supported frequencies -# depend on the selected audio codec. -AudioSampleRate 44100 - -# Bitrate for the video stream -VideoBitRate 64 - -# Ratecontrol buffer size -VideoBufferSize 40 - -# Number of frames per second -VideoFrameRate 3 - -# Size of the video frame: WxH (default: 160x128) -# The following abbreviations are defined: sqcif, qcif, cif, 4cif, qqvga, -# qvga, vga, svga, xga, uxga, qxga, sxga, qsxga, hsxga, wvga, wxga, wsxga, -# wuxga, woxga, wqsxga, wquxga, whsxga, whuxga, cga, ega, hd480, hd720, -# hd1080 -VideoSize 160x128 - -# Transmit only intra frames (useful for low bitrates, but kills frame rate). -#VideoIntraOnly - -# If non-intra only, an intra frame is transmitted every VideoGopSize -# frames. Video synchronization can only begin at an intra frame. -VideoGopSize 12 - -# More MPEG-4 parameters -# VideoHighQuality -# Video4MotionVector - -# Choose your codecs: -#AudioCodec mp2 -#VideoCodec mpeg1video - -# Suppress audio -#NoAudio - -# Suppress video -#NoVideo - -#VideoQMin 3 -#VideoQMax 31 - -# Set this to the number of seconds backwards in time to start. Note that -# most players will buffer 5-10 seconds of video, and also you need to allow -# for a keyframe to appear in the data stream. -#Preroll 15 - -# ACL: - -# You can allow ranges of addresses (or single addresses) -#ACL ALLOW <first address> <last address> - -# You can deny ranges of addresses (or single addresses) -#ACL DENY <first address> <last address> - -# You can repeat the ACL allow/deny as often as you like. It is on a per -# stream basis. The first match defines the action. If there are no matches, -# then the default is the inverse of the last ACL statement. -# -# Thus 'ACL allow localhost' only allows access from localhost. -# 'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and -# allow everybody else. - -</Stream> - - -################################################################## -# Example streams - - -# Multipart JPEG - -#<Stream test.mjpg> -#Feed feed1.ffm -#Format mpjpeg -#VideoFrameRate 2 -#VideoIntraOnly -#NoAudio -#Strict -1 -#</Stream> - - -# Single JPEG - -#<Stream test.jpg> -#Feed feed1.ffm -#Format jpeg -#VideoFrameRate 2 -#VideoIntraOnly -##VideoSize 352x240 -#NoAudio -#Strict -1 -#</Stream> - - -# Flash - -#<Stream test.swf> -#Feed feed1.ffm -#Format swf -#VideoFrameRate 2 -#VideoIntraOnly -#NoAudio -#</Stream> - - -# ASF compatible - -<Stream test.asf> -Feed feed1.ffm -Format asf -VideoFrameRate 15 -VideoSize 352x240 -VideoBitRate 256 -VideoBufferSize 40 -VideoGopSize 30 -AudioBitRate 64 -StartSendOnKey -</Stream> - - -# MP3 audio - -#<Stream test.mp3> -#Feed feed1.ffm -#Format mp2 -#AudioCodec mp3 -#AudioBitRate 64 -#AudioChannels 1 -#AudioSampleRate 44100 -#NoVideo -#</Stream> - - -# Ogg Vorbis audio - -#<Stream test.ogg> -#Feed feed1.ffm -#Title "Stream title" -#AudioBitRate 64 -#AudioChannels 2 -#AudioSampleRate 44100 -#NoVideo -#</Stream> - - -# Real with audio only at 32 kbits - -#<Stream test.ra> -#Feed feed1.ffm -#Format rm -#AudioBitRate 32 -#NoVideo -#NoAudio -#</Stream> - - -# Real with audio and video at 64 kbits - -#<Stream test.rm> -#Feed feed1.ffm -#Format rm -#AudioBitRate 32 -#VideoBitRate 128 -#VideoFrameRate 25 -#VideoGopSize 25 -#NoAudio -#</Stream> - - -################################################################## -# A stream coming from a file: you only need to set the input -# filename and optionally a new format. Supported conversions: -# AVI -> ASF - -#<Stream file.rm> -#File "/usr/local/httpd/htdocs/tlive.rm" -#NoAudio -#</Stream> - -#<Stream file.asf> -#File "/usr/local/httpd/htdocs/test.asf" -#NoAudio -#Author "Me" -#Copyright "Super MegaCorp" -#Title "Test stream from disk" -#Comment "Test comment" -#</Stream> - - -################################################################## -# RTSP examples -# -# You can access this stream with the RTSP URL: -# rtsp://localhost:5454/test1-rtsp.mpg -# -# A non-standard RTSP redirector is also created. Its URL is: -# http://localhost:8090/test1-rtsp.rtsp - -#<Stream test1-rtsp.mpg> -#Format rtp -#File "/usr/local/httpd/htdocs/test1.mpg" -#</Stream> - - -################################################################## -# SDP/multicast examples -# -# If you want to send your stream in multicast, you must set the -# multicast address with MulticastAddress. The port and the TTL can -# also be set. -# -# An SDP file is automatically generated by ffserver by adding the -# 'sdp' extension to the stream name (here -# http://localhost:8090/test1-sdp.sdp). You should usually give this -# file to your player to play the stream. -# -# The 'NoLoop' option can be used to avoid looping when the stream is -# terminated. - -#<Stream test1-sdp.mpg> -#Format rtp -#File "/usr/local/httpd/htdocs/test1.mpg" -#MulticastAddress 224.124.0.1 -#MulticastPort 5000 -#MulticastTTL 16 -#NoLoop -#</Stream> - - -################################################################## -# Special streams - -# Server status - -<Stream stat.html> -Format status - -# Only allow local people to get the status -ACL allow localhost -ACL allow 192.168.0.0 192.168.255.255 - -#FaviconURL http://pond1.gladstonefamily.net:8080/favicon.ico -</Stream> - - -# Redirect index.html to the appropriate site - -<Redirect index.html> -URL http://www.ffmpeg.org/ -</Redirect> - - diff --git a/contrib/ffmpeg/doc/general.texi b/contrib/ffmpeg/doc/general.texi deleted file mode 100644 index 8fc27d603..000000000 --- a/contrib/ffmpeg/doc/general.texi +++ /dev/null @@ -1,985 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle General Documentation -@titlepage -@sp 7 -@center @titlefont{General Documentation} -@sp 3 -@end titlepage - - -@chapter external libraries - -FFmpeg can be hooked up with a number of external libraries to add support -for more formats. None of them are used by default, their use has to be -explicitly requested by passing the appropriate flags to @file{./configure}. - -@section AMR - -AMR comes in two different flavors, wideband and narrowband. FFmpeg can make -use of the AMR wideband (floating-point mode) and the AMR narrowband -(floating-point mode) reference decoders and encoders. - -Go to @url{http://www.penguin.cz/~utx/amr} and follow the instructions for -installing the libraries. Then pass @code{--enable-libamr-nb} and/or -@code{--enable-libamr-wb} to configure to enable the libraries. - -Note that libamr is copyrighted without any sort of license grant. This means -that you can use it if you legally obtained it but you are not allowed to -redistribute it in any way. @strong{Any FFmpeg binaries with libamr support -you create are non-free and unredistributable!} - - -@chapter Supported File Formats and Codecs - -You can use the @code{-formats} option to have an exhaustive list. - -@section File Formats - -FFmpeg supports the following file formats through the @code{libavformat} -library: - -@multitable @columnfractions .4 .1 .1 .4 -@item Supported File Format @tab Encoding @tab Decoding @tab Comments -@item MPEG audio @tab X @tab X -@item MPEG-1 systems @tab X @tab X -@tab muxed audio and video -@item MPEG-2 PS @tab X @tab X -@tab also known as @code{VOB} file -@item MPEG-2 TS @tab @tab X -@tab also known as DVB Transport Stream -@item ASF@tab X @tab X -@item AVI@tab X @tab X -@item WAV@tab X @tab X -@item Macromedia Flash@tab X @tab X -@item AVM2 (Flash 9) @tab X @tab X -@tab Only embedded audio is decoded. -@item FLV @tab X @tab X -@tab Macromedia Flash video files -@item Real Audio and Video @tab X @tab X -@item Raw AC3 @tab X @tab X -@item Raw MJPEG @tab X @tab X -@item Raw MPEG video @tab X @tab X -@item Raw PCM8/16 bits, mulaw/Alaw@tab X @tab X -@item Raw CRI ADX audio @tab X @tab X -@item Raw Shorten audio @tab @tab X -@item SUN AU format @tab X @tab X -@item NUT @tab X @tab X @tab NUT Open Container Format -@item QuickTime @tab X @tab X -@item MPEG-4 @tab X @tab X -@tab MPEG-4 is a variant of QuickTime. -@item Raw MPEG4 video @tab X @tab X -@item DV @tab X @tab X -@item 4xm @tab @tab X -@tab 4X Technologies format, used in some games. -@item Playstation STR @tab @tab X -@item Id RoQ @tab X @tab X -@tab Used in Quake III, Jedi Knight 2, other computer games. -@item Interplay MVE @tab @tab X -@tab Format used in various Interplay computer games. -@item WC3 Movie @tab @tab X -@tab Multimedia format used in Origin's Wing Commander III computer game. -@item Sega FILM/CPK @tab @tab X -@tab Used in many Sega Saturn console games. -@item Westwood Studios VQA/AUD @tab @tab X -@tab Multimedia formats used in Westwood Studios games. -@item Id Cinematic (.cin) @tab @tab X -@tab Used in Quake II. -@item FLIC format @tab @tab X -@tab .fli/.flc files -@item Sierra VMD @tab @tab X -@tab Used in Sierra CD-ROM games. -@item Sierra Online @tab @tab X -@tab .sol files used in Sierra Online games. -@item Matroska @tab X @tab X -@item Electronic Arts Multimedia @tab @tab X -@tab Used in various EA games; files have extensions like WVE and UV2. -@item Nullsoft Video (NSV) format @tab @tab X -@item ADTS AAC audio @tab X @tab X -@item Creative VOC @tab X @tab X @tab Created for the Sound Blaster Pro. -@item American Laser Games MM @tab @tab X -@tab Multimedia format used in games like Mad Dog McCree -@item AVS @tab @tab X -@tab Multimedia format used by the Creature Shock game. -@item Smacker @tab @tab X -@tab Multimedia format used by many games. -@item GXF @tab X @tab X -@tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley playout servers. -@item CIN @tab @tab X -@tab Multimedia format used by Delphine Software games. -@item MXF @tab @tab X -@tab Material eXchange Format SMPTE 377M, used by D-Cinema, broadcast industry. -@item SEQ @tab @tab X -@tab Tiertex .seq files used in the DOS CDROM version of the game Flashback. -@item DXA @tab @tab X -@tab This format is used in non-Windows version of Feeble Files game and -different game cutscenes repacked for use with ScummVM. -@item THP @tab @tab X -@tab Used on the Nintendo GameCube. -@item C93 @tab @tab X -@tab Used in the game Cyberia from Interplay. -@item Bethsoft VID @tab @tab X -@tab Used in some games from Bethesda Softworks. -@item CRYO APC @tab @tab X -@tab Audio format used in some games by CRYO Interactive Entertainment. -@item Monkey's Audio @tab @tab X -@item SIFF @tab @tab X -@tab Audio and video format used in some games by Beam Software -@end multitable - -@code{X} means that encoding (resp. decoding) is supported. - -@section Image Formats - -FFmpeg can read and write images for each frame of a video sequence. The -following image formats are supported: - -@multitable @columnfractions .4 .1 .1 .4 -@item Supported Image Format @tab Encoding @tab Decoding @tab Comments -@item PGM, PPM @tab X @tab X -@item PAM @tab X @tab X @tab PAM is a PNM extension with alpha support. -@item PGMYUV @tab X @tab X @tab PGM with U and V components in YUV 4:2:0 -@item JPEG @tab X @tab X @tab Progressive JPEG is not supported. -@item .Y.U.V @tab X @tab X @tab one raw file per component -@item animated GIF @tab X @tab X @tab Only uncompressed GIFs are generated. -@item PNG @tab X @tab X @tab 2 bit and 4 bit/pixel not supported yet. -@item Targa @tab @tab X @tab Targa (.TGA) image format. -@item TIFF @tab X @tab X @tab YUV, JPEG and some extension is not supported yet. -@item SGI @tab X @tab X @tab SGI RGB image format -@item PTX @tab @tab X @tab V.Flash PTX format -@end multitable - -@code{X} means that encoding (resp. decoding) is supported. - -@section Video Codecs - -@multitable @columnfractions .4 .1 .1 .4 -@item Supported Codec @tab Encoding @tab Decoding @tab Comments -@item MPEG-1 video @tab X @tab X -@item MPEG-2 video @tab X @tab X -@item MPEG-4 @tab X @tab X -@item MSMPEG4 V1 @tab X @tab X -@item MSMPEG4 V2 @tab X @tab X -@item MSMPEG4 V3 @tab X @tab X -@item WMV7 @tab X @tab X -@item WMV8 @tab X @tab X @tab not completely working -@item WMV9 @tab @tab X @tab not completely working -@item VC1 @tab @tab X -@item H.261 @tab X @tab X -@item H.263(+) @tab X @tab X @tab also known as RealVideo 1.0 -@item H.264 @tab @tab X -@item RealVideo 1.0 @tab X @tab X -@item RealVideo 2.0 @tab X @tab X -@item MJPEG @tab X @tab X -@item lossless MJPEG @tab X @tab X -@item JPEG-LS @tab X @tab X @tab fourcc: MJLS, lossless and near-lossless is supported -@item Apple MJPEG-B @tab @tab X -@item Sunplus MJPEG @tab @tab X @tab fourcc: SP5X -@item DV @tab X @tab X -@item HuffYUV @tab X @tab X -@item FFmpeg Video 1 @tab X @tab X @tab experimental lossless codec (fourcc: FFV1) -@item FFmpeg Snow @tab X @tab X @tab experimental wavelet codec (fourcc: SNOW) -@item Asus v1 @tab X @tab X @tab fourcc: ASV1 -@item Asus v2 @tab X @tab X @tab fourcc: ASV2 -@item Creative YUV @tab @tab X @tab fourcc: CYUV -@item Sorenson Video 1 @tab X @tab X @tab fourcc: SVQ1 -@item Sorenson Video 3 @tab @tab X @tab fourcc: SVQ3 -@item On2 VP3 @tab @tab X @tab still experimental -@item On2 VP5 @tab @tab X @tab fourcc: VP50 -@item On2 VP6 @tab @tab X @tab fourcc: VP60,VP61,VP62 -@item Theora @tab X @tab X @tab still experimental -@item Intel Indeo 3 @tab @tab X -@item FLV @tab X @tab X @tab Sorenson H.263 used in Flash -@item Flash Screen Video @tab X @tab X @tab fourcc: FSV1 -@item ATI VCR1 @tab @tab X @tab fourcc: VCR1 -@item ATI VCR2 @tab @tab X @tab fourcc: VCR2 -@item Cirrus Logic AccuPak @tab @tab X @tab fourcc: CLJR -@item 4X Video @tab @tab X @tab Used in certain computer games. -@item Sony Playstation MDEC @tab @tab X -@item Id RoQ @tab X @tab X @tab Used in Quake III, Jedi Knight 2, other computer games. -@item Xan/WC3 @tab @tab X @tab Used in Wing Commander III .MVE files. -@item Interplay Video @tab @tab X @tab Used in Interplay .MVE files. -@item Apple Animation @tab X @tab X @tab fourcc: 'rle ' -@item Apple Graphics @tab @tab X @tab fourcc: 'smc ' -@item Apple Video @tab @tab X @tab fourcc: rpza -@item Apple QuickDraw @tab @tab X @tab fourcc: qdrw -@item Cinepak @tab @tab X -@item Microsoft RLE @tab @tab X -@item Microsoft Video-1 @tab @tab X -@item Westwood VQA @tab @tab X -@item Id Cinematic Video @tab @tab X @tab Used in Quake II. -@item Planar RGB @tab @tab X @tab fourcc: 8BPS -@item FLIC video @tab @tab X -@item Duck TrueMotion v1 @tab @tab X @tab fourcc: DUCK -@item Duck TrueMotion v2 @tab @tab X @tab fourcc: TM20 -@item VMD Video @tab @tab X @tab Used in Sierra VMD files. -@item MSZH @tab @tab X @tab Part of LCL -@item ZLIB @tab X @tab X @tab Part of LCL, encoder experimental -@item TechSmith Camtasia @tab @tab X @tab fourcc: TSCC -@item IBM Ultimotion @tab @tab X @tab fourcc: ULTI -@item Miro VideoXL @tab @tab X @tab fourcc: VIXL -@item QPEG @tab @tab X @tab fourccs: QPEG, Q1.0, Q1.1 -@item LOCO @tab @tab X @tab -@item Winnov WNV1 @tab @tab X @tab -@item Autodesk Animator Studio Codec @tab @tab X @tab fourcc: AASC -@item Fraps FPS1 @tab @tab X @tab -@item CamStudio @tab @tab X @tab fourcc: CSCD -@item American Laser Games Video @tab @tab X @tab Used in games like Mad Dog McCree -@item ZMBV @tab X @tab X @tab Encoder works only on PAL8 -@item AVS Video @tab @tab X @tab Video encoding used by the Creature Shock game. -@item Smacker Video @tab @tab X @tab Video encoding used in Smacker. -@item RTjpeg @tab @tab X @tab Video encoding used in NuppelVideo files. -@item KMVC @tab @tab X @tab Codec used in Worms games. -@item VMware Video @tab @tab X @tab Codec used in videos captured by VMware. -@item Cin Video @tab @tab X @tab Codec used in Delphine Software games. -@item Tiertex Seq Video @tab @tab X @tab Codec used in DOS CDROM FlashBack game. -@item DXA Video @tab @tab X @tab Codec originally used in Feeble Files game. -@item AVID DNxHD @tab X @tab X @tab aka SMPTE VC3 -@item C93 Video @tab @tab X @tab Codec used in Cyberia game. -@item THP @tab @tab X @tab Used on the Nintendo GameCube. -@item Bethsoft VID @tab @tab X @tab Used in some games from Bethesda Softworks. -@item Renderware TXD @tab @tab X @tab Texture dictionaries used by the Renderware Engine. -@item AMV @tab @tab X @tab Used in Chinese MP3 players. -@end multitable - -@code{X} means that encoding (resp. decoding) is supported. - -@section Audio Codecs - -@multitable @columnfractions .4 .1 .1 .1 .7 -@item Supported Codec @tab Encoding @tab Decoding @tab Comments -@item MPEG audio layer 2 @tab IX @tab IX -@item MPEG audio layer 1/3 @tab X @tab IX -@tab MP3 encoding is supported through the external library LAME. -@item AC3 @tab IX @tab IX -@tab liba52 is used internally for decoding. -@item Vorbis @tab X @tab X -@item WMA V1/V2 @tab X @tab X -@item AAC @tab X @tab X -@tab Supported through the external library libfaac/libfaad. -@item Microsoft ADPCM @tab X @tab X -@item AMV IMA ADPCM @tab @tab X -@tab Used in AMV files -@item MS IMA ADPCM @tab X @tab X -@item QT IMA ADPCM @tab @tab X -@item 4X IMA ADPCM @tab @tab X -@item G.726 ADPCM @tab X @tab X -@item Duck DK3 IMA ADPCM @tab @tab X -@tab Used in some Sega Saturn console games. -@item Duck DK4 IMA ADPCM @tab @tab X -@tab Used in some Sega Saturn console games. -@item Westwood Studios IMA ADPCM @tab @tab X -@tab Used in Westwood Studios games like Command and Conquer. -@item SMJPEG IMA ADPCM @tab @tab X -@tab Used in certain Loki game ports. -@item CD-ROM XA ADPCM @tab @tab X -@item CRI ADX ADPCM @tab X @tab X -@tab Used in Sega Dreamcast games. -@item Electronic Arts ADPCM @tab @tab X -@tab Used in various EA titles. -@item Creative ADPCM @tab @tab X -@tab 16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2 -@item THP ADPCM @tab @tab X -@tab Used on the Nintendo GameCube. -@item RA144 @tab @tab X -@tab Real 14400 bit/s codec -@item RA288 @tab @tab X -@tab Real 28800 bit/s codec -@item RADnet @tab X @tab IX -@tab Real low bitrate AC3 codec, liba52 is used for decoding. -@item AMR-NB @tab X @tab X -@tab Supported through an external library. -@item AMR-WB @tab X @tab X -@tab Supported through an external library. -@item DV audio @tab @tab X -@item Id RoQ DPCM @tab X @tab X -@tab Used in Quake III, Jedi Knight 2, other computer games. -@item Interplay MVE DPCM @tab @tab X -@tab Used in various Interplay computer games. -@item Xan DPCM @tab @tab X -@tab Used in Origin's Wing Commander IV AVI files. -@item Sierra Online DPCM @tab @tab X -@tab Used in Sierra Online game audio files. -@item Apple MACE 3 @tab @tab X -@item Apple MACE 6 @tab @tab X -@item FLAC lossless audio @tab X @tab X -@item Shorten lossless audio @tab @tab X -@item Apple lossless audio @tab @tab X -@tab QuickTime fourcc 'alac' -@item FFmpeg Sonic @tab X @tab X -@tab experimental lossy/lossless codec -@item Qdesign QDM2 @tab @tab X -@tab there are still some distortions -@item Real COOK @tab @tab X -@tab All versions except 5.1 are supported -@item DSP Group TrueSpeech @tab @tab X -@item True Audio (TTA) @tab @tab X -@item Smacker Audio @tab @tab X -@item WavPack Audio @tab @tab X -@item Cin Audio @tab @tab X -@tab Codec used in Delphine Software games. -@item Intel Music Coder @tab @tab X -@item Musepack @tab @tab X -@tab SV7 and SV8 are supported -@item DT$ Coherent Audio @tab @tab X -@item ATRAC 3 @tab @tab X -@item Monkey's Audio @tab @tab X @tab Only versions 3.97-3.99 are supported -@item Nellymoser ASAO @tab @tab X -@end multitable - -@code{X} means that encoding (resp. decoding) is supported. - -@code{I} means that an integer-only version is available, too (ensures high -performance on systems without hardware floating point support). - -@chapter Platform Specific information - -@section BSD - -BSD make will not build FFmpeg, you need to install and use GNU Make -(@file{gmake}). - -@section Windows - -To get help and instructions for building FFmpeg under Windows, check out -the FFmpeg Windows Help Forum at -@url{http://arrozcru.no-ip.org/ffmpeg/}. - -@subsection Native Windows compilation - -FFmpeg can be built to run natively on Windows using the MinGW tools. Install -the current versions of MSYS and MinGW from @url{http://www.mingw.org/}. Also -install the coreutils package. You can find detailed installation -instructions in the download section and the FAQ. - -Within the MSYS shell, configure and make with: - -@example -./configure --enable-memalign-hack -make -make install -@end example - -This will install @file{ffmpeg.exe} along with many other development files -to @file{/usr/local}. You may specify another install path using the -@code{--prefix} option in @file{configure}. - -Notes: - -@itemize - -@item Use at least bash 3.1. Older versions are known to fail on the -configure script. - -@item In order to compile vhooks, you must have a POSIX-compliant libdl in -your MinGW system. Get dlfcn-win32 from -@url{http://code.google.com/p/dlfcn-win32}. - -@item In order to compile FFplay, you must have the MinGW development library -of SDL. Get it from @url{http://www.libsdl.org}. -Edit the @file{bin/sdl-config} script so that it points to the correct prefix -where SDL was installed. Verify that @file{sdl-config} can be launched from -the MSYS command line. - -@item The target @code{make wininstaller} can be used to create a -Nullsoft-based Windows installer for FFmpeg and FFplay. @file{SDL.dll} -must be copied to the FFmpeg directory in order to build the -installer. - -@item By using @code{./configure --enable-shared} when configuring FFmpeg, -you can build libavutil, libavcodec and libavformat as DLLs. - -@end itemize - -@subsection Microsoft Visual C++ compatibility - -As stated in the FAQ, FFmpeg will not compile under MSVC++. However, if you -want to use the libav* libraries in your own applications, you can still -compile those applications using MSVC++. But the libav* libraries you link -to @emph{must} be built with MinGW. However, you will not be able to debug -inside the libav* libraries, since MSVC++ does not recognize the debug -symbols generated by GCC. -We strongly recommend you to move over from MSVC++ to MinGW tools. - -This description of how to use the FFmpeg libraries with MSVC++ is based on -Microsoft Visual C++ 2005 Express Edition. If you have a different version, -you might have to modify the procedures slightly. - -@subsubsection Using static libraries - -Assuming you have just built and installed FFmpeg in @file{/usr/local}. - -@enumerate - -@item Create a new console application ("File / New / Project") and then -select "Win32 Console Application". On the appropriate page of the -Application Wizard, uncheck the "Precompiled headers" option. - -@item Write the source code for your application, or, for testing, just -copy the code from an existing sample application into the source file -that MSVC++ has already created for you. For example, you can copy -@file{output_example.c} from the FFmpeg distribution. - -@item Open the "Project / Properties" dialog box. In the "Configuration" -combo box, select "All Configurations" so that the changes you make will -affect both debug and release builds. In the tree view on the left hand -side, select "C/C++ / General", then edit the "Additional Include -Directories" setting to contain the path where the FFmpeg includes were -installed (i.e. @file{c:\msys\1.0\local\include}). - -@item Still in the "Project / Properties" dialog box, select -"Linker / General" from the tree view and edit the -"Additional Library Directories" setting to contain the @file{lib} -directory where FFmpeg was installed (i.e. @file{c:\msys\1.0\local\lib}), -the directory where MinGW libs are installed (i.e. @file{c:\mingw\lib}), -and the directory where MinGW's GCC libs are installed -(i.e. @file{C:\mingw\lib\gcc\mingw32\4.2.1-sjlj}). Then select -"Linker / Input" from the tree view, and add the files @file{libavformat.a}, -@file{libavcodec.a}, @file{libavutil.a}, @file{libmingwex.a}, -@file{libgcc.a}, and any other libraries you used (i.e. @file{libz.a}) -to the end of "Additional Dependencies". - -@item Now, select "C/C++ / Code Generation" from the tree view. Select -"Debug" in the "Configuration" combo box. Make sure that "Runtime -Library" is set to "Multi-threaded Debug DLL". Then, select "Release" in -the "Configuration" combo box and make sure that "Runtime Library" is -set to "Multi-threaded DLL". - -@item Click "OK" to close the "Project / Properties" dialog box. - -@item MSVC++ lacks some C99 header files that are fundamental for FFmpeg. -Get msinttypes from @url{http://code.google.com/p/msinttypes/downloads/list} -and install it in MSVC++'s include directory -(i.e. @file{C:\Program Files\Microsoft Visual Studio 8\VC\include}). - -@item MSVC++ also does not understand the @code{inline} keyword used by -FFmpeg, so you must add this line before @code{#include}ing libav*: -@example -#define inline _inline -@end example - -@item If you used @file{output_example.c} as your sample application, -you will have to edit the @code{#include}s to point to the files which -are under the @file{ffmpeg} directory (i.e. @code{<ffmpeg/avformat.h>}). - -@item Build your application, everything should work. - -@end enumerate - -@subsubsection Using shared libraries - -This is how to create DLL and LIB files that are compatible with MSVC++: - -@enumerate - -@item Add a call to @file{vcvars32.bat} (which sets up the environment -variables for the Visual C++ tools) as the first line of @file{msys.bat}. -The standard location for @file{vcvars32.bat} is -@file{C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat}, -and the standard location for @file{msys.bat} is @file{C:\msys\1.0\msys.bat}. -If this corresponds to your setup, add the following line as the first line -of @file{msys.bat}: - -@example -call "C:\Program Files\Microsoft Visual Studio 8\VC\bin\vcvars32.bat" -@end example - -Alternatively, you may start the @file{Visual Studio 2005 Command Prompt}, -and run @file{c:\msys\1.0\msys.bat} from there. - -@item Within the MSYS shell, run @code{lib.exe}. If you get a help message -from @file{Microsoft (R) Library Manager}, this means your environment -variables are set up correctly, the @file{Microsoft (R) Library Manager} -is on the path and will be used by FFmpeg to create -MSVC++-compatible import libraries. - -@item Build FFmpeg with - -@example -./configure --enable-shared --enable-memalign-hack -make -make install -@end example - -Your install path (@file{/usr/local/} by default) should now have the -necessary DLL and LIB files under the @file{bin} directory. - -@end enumerate - -To use those files with MSVC++, do the same as you would do with -the static libraries, as described above. But in Step 4, -you should only need to add the directory where the LIB files are installed -(i.e. @file{c:\msys\usr\local\bin}). This is not a typo, the LIB files are -installed in the @file{bin} directory. And instead of adding @file{libxx.a} -files, you should add @file{avcodec.lib}, @file{avformat.lib}, and -@file{avutil.lib}. There should be no need for @file{libmingwex.a}, -@file{libgcc.a}, and @file{wsock32.lib}, nor any other external library -statically linked into the DLLs. The @file{bin} directory contains a bunch -of DLL files, but the ones that are actually used to run your application -are the ones with a major version number in their filenames -(i.e. @file{avcodec-51.dll}). - -@subsection Cross compilation for Windows with Linux - -You must use the MinGW cross compilation tools available at -@url{http://www.mingw.org/}. - -Then configure FFmpeg with the following options: -@example -./configure --target-os=mingw32 --cross-prefix=i386-mingw32msvc- -@end example -(you can change the cross-prefix according to the prefix chosen for the -MinGW tools). - -Then you can easily test FFmpeg with Wine -(@url{http://www.winehq.com/}). - -@subsection Compilation under Cygwin - -The main issue with Cygwin is that newlib, its C library, does not -contain llrint(). However, it is possible to leverage the -implementation in MinGW. - -Just install your Cygwin with all the "Base" packages, plus the -following "Devel" ones: -@example -binutils, gcc-core, make, subversion, mingw-runtime -@end example - -Do not install binutils-20060709-1 (they are buggy on shared builds); -use binutils-20050610-1 instead. - -Then create a small library that just contains llrint(): - -@example -ar x /usr/lib/mingw/libmingwex.a llrint.o -ar cq /usr/local/lib/libllrint.a llrint.o -@end example - -Then run - -@example -./configure --enable-static --disable-shared --extra-ldflags='-L /usr/local/lib' --extra-libs='-l llrint' -@end example - -to make a static build or - -@example -./configure --enable-shared --disable-static --extra-ldflags='-L /usr/local/lib' --extra-libs='-l llrint' -@end example - -to build shared libraries. - -If you want to build FFmpeg with additional libraries, download Cygwin -"Devel" packages for Ogg and Vorbis from any Cygwin packages repository -and/or SDL, xvid, faac, faad2 packages from Cygwin Ports, -(@url{http://cygwinports.dotsrc.org/}). - -@subsection Crosscompilation for Windows under Cygwin - -With Cygwin you can create Windows binaries that do not need the cygwin1.dll. - -Just install your Cygwin as explained before, plus these additional -"Devel" packages: -@example -gcc-mingw-core, mingw-runtime, mingw-zlib -@end example - -and add some special flags to your configure invocation. - -For a static build run -@example -./configure --target-os=mingw32 --enable-memalign-hack --enable-static --disable-shared --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin -@end example - -and for a build with shared libraries -@example -./configure --target-os=mingw32 --enable-memalign-hack --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin -@end example - -@section BeOS - -BeOS support is broken in mysterious ways. - -@section OS/2 - -For information about compiling FFmpeg on OS/2 see -@url{http://www.edm2.com/index.php/FFmpeg}. - -@chapter Developers Guide - -@section API -@itemize @bullet -@item libavcodec is the library containing the codecs (both encoding and -decoding). Look at @file{libavcodec/apiexample.c} to see how to use it. - -@item libavformat is the library containing the file format handling (mux and -demux code for several formats). Look at @file{ffplay.c} to use it in a -player. See @file{output_example.c} to use it to generate audio or video -streams. - -@end itemize - -@section Integrating libavcodec or libavformat in your program - -You can integrate all the source code of the libraries to link them -statically to avoid any version problem. All you need is to provide a -'config.mak' and a 'config.h' in the parent directory. See the defines -generated by ./configure to understand what is needed. - -You can use libavcodec or libavformat in your commercial program, but -@emph{any patch you make must be published}. The best way to proceed is -to send your patches to the FFmpeg mailing list. - -@node Coding Rules -@section Coding Rules - -FFmpeg is programmed in the ISO C90 language with a few additional -features from ISO C99, namely: -@itemize @bullet -@item -the @samp{inline} keyword; -@item -@samp{//} comments; -@item -designated struct initializers (@samp{struct s x = @{ .i = 17 @};}) -@item -compound literals (@samp{x = (struct s) @{ 17, 23 @};}) -@end itemize - -These features are supported by all compilers we care about, so we will not -accept patches to remove their use unless they absolutely do not impair -clarity and performance. - -All code must compile with GCC 2.95 and GCC 3.3. Currently, FFmpeg also -compiles with several other compilers, such as the Compaq ccc compiler -or Sun Studio 9, and we would like to keep it that way unless it would -be exceedingly involved. To ensure compatibility, please do not use any -additional C99 features or GCC extensions. Especially watch out for: -@itemize @bullet -@item -mixing statements and declarations; -@item -@samp{long long} (use @samp{int64_t} instead); -@item -@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar; -@item -GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}). -@end itemize - -Indent size is 4. -The presentation is the one specified by 'indent -i4 -kr -nut'. -The TAB character is forbidden outside of Makefiles as is any -form of trailing whitespace. Commits containing either will be -rejected by the Subversion repository. - -The main priority in FFmpeg is simplicity and small code size in order to -minimize the bug count. - -Comments: Use the JavaDoc/Doxygen -format (see examples below) so that code documentation -can be generated automatically. All nontrivial functions should have a comment -above them explaining what the function does, even if it is just one sentence. -All structures and their member variables should be documented, too. -@example -/** - * @@file mpeg.c - * MPEG codec. - * @@author ... - */ - -/** - * Summary sentence. - * more text ... - * ... - */ -typedef struct Foobar@{ - int var1; /**< var1 description */ - int var2; ///< var2 description - /** var3 description */ - int var3; -@} Foobar; - -/** - * Summary sentence. - * more text ... - * ... - * @@param my_parameter description of my_parameter - * @@return return value description - */ -int myfunc(int my_parameter) -... -@end example - -fprintf and printf are forbidden in libavformat and libavcodec, -please use av_log() instead. - -Casts should be used only when necessary. Unneeded parentheses -should also be avoided if they don't make the code easier to understand. - -@section Development Policy - -@enumerate -@item - Contributions should be licensed under the LGPL 2.1, including an - "or any later version" clause, or the MIT license. GPL 2 including - an "or any later version" clause is also acceptable, but LGPL is - preferred. -@item - You must not commit code which breaks FFmpeg! (Meaning unfinished but - enabled code which breaks compilation or compiles but does not work or - breaks the regression tests) - You can commit unfinished stuff (for testing etc), but it must be disabled - (#ifdef etc) by default so it does not interfere with other developers' - work. -@item - You do not have to over-test things. If it works for you, and you think it - should work for others, then commit. If your code has problems - (portability, triggers compiler bugs, unusual environment etc) they will be - reported and eventually fixed. -@item - Do not commit unrelated changes together, split them into self-contained - pieces. Also do not forget that if part B depends on part A, but A does not - depend on B, then A can and should be committed first and separate from B. - Keeping changes well split into self-contained parts makes reviewing and - understanding them on the commit log mailing list easier. This also helps - in case of debugging later on. - Also if you have doubts about splitting or not splitting, do not hesitate to - ask/discuss it on the developer mailing list. -@item - Do not change behavior of the program (renaming options etc) without - first discussing it on the ffmpeg-devel mailing list. Do not remove - functionality from the code. Just improve! - - Note: Redundant code can be removed. -@item - Do not commit changes to the build system (Makefiles, configure script) - which change behavior, defaults etc, without asking first. The same - applies to compiler warning fixes, trivial looking fixes and to code - maintained by other developers. We usually have a reason for doing things - the way we do. Send your changes as patches to the ffmpeg-devel mailing - list, and if the code maintainers say OK, you may commit. This does not - apply to files you wrote and/or maintain. -@item - We refuse source indentation and other cosmetic changes if they are mixed - with functional changes, such commits will be rejected and removed. Every - developer has his own indentation style, you should not change it. Of course - if you (re)write something, you can use your own style, even though we would - prefer if the indentation throughout FFmpeg was consistent (Many projects - force a given indentation style - we do not.). If you really need to make - indentation changes (try to avoid this), separate them strictly from real - changes. - - NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code, - then either do NOT change the indentation of the inner part within (do not - move it to the right)! or do so in a separate commit -@item - Always fill out the commit log message. Describe in a few lines what you - changed and why. You can refer to mailing list postings if you fix a - particular bug. Comments such as "fixed!" or "Changed it." are unacceptable. -@item - If you apply a patch by someone else, include the name and email address in - the log message. Since the ffmpeg-cvslog mailing list is publicly - archived you should add some SPAM protection to the email address. Send an - answer to ffmpeg-devel (or wherever you got the patch from) saying that - you applied the patch. -@item - When applying patches that have been discussed (at length) on the mailing - list, reference the thread in the log message. -@item - Do NOT commit to code actively maintained by others without permission. - Send a patch to ffmpeg-devel instead. If no one answers within a reasonable - timeframe (12h for build failures and security fixes, 3 days small changes, - 1 week for big patches) then commit your patch if you think it is OK. - Also note, the maintainer can simply ask for more time to review! -@item - Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits - are sent there and reviewed by all the other developers. Bugs and possible - improvements or general questions regarding commits are discussed there. We - expect you to react if problems with your code are uncovered. -@item - Update the documentation if you change behavior or add features. If you are - unsure how best to do this, send a patch to ffmpeg-devel, the documentation - maintainer(s) will review and commit your stuff. -@item - Try to keep important discussions and requests (also) on the public - developer mailing list, so that all developers can benefit from them. -@item - Never write to unallocated memory, never write over the end of arrays, - always check values read from some untrusted source before using them - as array index or other risky things. -@item - Remember to check if you need to bump versions for the specific libav - parts (libavutil, libavcodec, libavformat) you are changing. You need - to change the version integer and the version string. - Incrementing the first component means no backward compatibility to - previous versions (e.g. removal of a function from the public API). - Incrementing the second component means backward compatible change - (e.g. addition of a function to the public API). - Incrementing the third component means a noteworthy binary compatible - change (e.g. encoder bug fix that matters for the decoder). -@item - If you add a new codec, remember to update the changelog, add it to - the supported codecs table in the documentation and bump the second - component of the @file{libavcodec} version number appropriately. If - it has a fourcc, add it to @file{libavformat/avienc.c}, even if it - is only a decoder. -@item - Compiler warnings indicate potential bugs or code with bad style. If a type of - warning always points to correct and clean code, that warning should - be disabled, not the code changed. - Thus the remaining warnings can either be bugs or correct code. - If it is a bug, the bug has to be fixed. If it is not, the code should - be changed to not generate a warning unless that causes a slowdown - or obfuscates the code. -@item - If you add a new file, give it a proper license header. Do not copy and - paste it from a random place, use an existing file as template. -@end enumerate - -We think our rules are not too hard. If you have comments, contact us. - -Note, these rules are mostly borrowed from the MPlayer project. - -@section Submitting patches - -First, (@pxref{Coding Rules}) above if you did not yet. - -When you submit your patch, try to send a unified diff (diff '-up' -option). We cannot read other diffs :-) - -Also please do not submit a patch which contains several unrelated changes. -Split it into separate, self-contained pieces. This does not mean splitting -file by file. Instead, make the patch as small as possible while still -keeping it as a logical unit that contains an individual change, even -if it spans multiple files. This makes reviewing your patches much easier -for us and greatly increases your chances of getting your patch applied. - -Run the regression tests before submitting a patch so that you can -verify that there are no big problems. - -Patches should be posted as base64 encoded attachments (or any other -encoding which ensures that the patch will not be trashed during -transmission) to the ffmpeg-devel mailing list, see -@url{http://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-devel} - -It also helps quite a bit if you tell us what the patch does (for example -'replaces lrint by lrintf'), and why (for example '*BSD isn't C99 compliant -and has no lrint()') - -Also please if you send several patches, send each patch as a separate mail, -do not attach several unrelated patches to the same mail. - -@section patch submission checklist - -@enumerate -@item - Do the regression tests pass with the patch applied? -@item - Is the patch a unified diff? -@item - Is the patch against latest FFmpeg SVN? -@item - Are you subscribed to ffmpeg-dev? - (the list is subscribers only due to spam) -@item - Have you checked that the changes are minimal, so that the same cannot be - achieved with a smaller patch and/or simpler final code? -@item - If the change is to speed critical code, did you benchmark it? -@item - If you did any benchmarks, did you provide them in the mail? -@item - Have you checked that the patch does not introduce buffer overflows or - other security issues? -@item - Is the patch created from the root of the source tree, so it can be - applied with @code{patch -p0}? -@item - Does the patch not mix functional and cosmetic changes? -@item - Did you add tabs or trailing whitespace to the code? Both are forbidden. -@item - Is the patch attached to the email you send? -@item - Is the mime type of the patch correct? It should be text/x-diff or - text/x-patch or at least text/plain and not application/octet-stream. -@item - If the patch fixes a bug, did you provide a verbose analysis of the bug? -@item - If the patch fixes a bug, did you provide enough information, including - a sample, so the bug can be reproduced and the fix can be verified? - Note please do not attach samples >100k to mails but rather provide a - URL, you can upload to ftp://upload.mplayerhq.hu -@item - Did you provide a verbose summary about what the patch does change? -@item - Did you provide a verbose explanation why it changes things like it does? -@item - Did you provide a verbose summary of the user visible advantages and - disadvantages if the patch is applied? -@item - Did you provide an example so we can verify the new feature added by the - patch easily? -@item - If you added a new file, did you insert a license header? It should be - taken from FFmpeg, not randomly copied and pasted from somewhere else. -@item - You should maintain alphabetical order in alphabetically ordered lists as - long as doing so does not break API/ABI compatibility. -@item - Lines with similar content should be aligned vertically when doing so - improves readability. -@item - Did you provide a suggestion for a clear commit log message? -@item - Did you test your decoder or demuxer against damaged data? If no, see - tools/trasher and the noise bitstream filter. Your decoder or demuxer - should not crash or end in a (near) infinite loop when fed damaged data. -@end enumerate - -@section Patch review process - -All patches posted to ffmpeg-devel will be reviewed, unless they contain a -clear note that the patch is not for SVN. -Reviews and comments will be posted as replies to the patch on the -mailing list. The patch submitter then has to take care of every comment, -that can be by resubmitting a changed patch or by discussion. Resubmitted -patches will themselves be reviewed like any other patch. If at some point -a patch passes review with no comments then it is approved, that can for -simple and small patches happen immediately while large patches will generally -have to be changed and reviewed many times before they are approved. -After a patch is approved it will be committed to the repository. - -We will review all submitted patches, but sometimes we are quite busy so -especially for large patches this can take several weeks. - -When resubmitting patches, please do not make any significant changes -not related to the comments received during review. Such patches will -be rejected. Instead, submit significant changes or new features as -separate patches. - -@section Regression tests - -Before submitting a patch (or committing to the repository), you should at least -test that you did not break anything. - -The regression tests build a synthetic video stream and a synthetic -audio stream. These are then encoded and decoded with all codecs or -formats. The CRC (or MD5) of each generated file is recorded in a -result file. A 'diff' is launched to compare the reference results and -the result file. - -The regression tests then go on to test the FFserver code with a -limited set of streams. It is important that this step runs correctly -as well. - -Run 'make test' to test all the codecs and formats. - -Run 'make fulltest' to test all the codecs, formats and FFserver. - -[Of course, some patches may change the results of the regression tests. In -this case, the reference results of the regression tests shall be modified -accordingly]. - -@bye diff --git a/contrib/ffmpeg/doc/hooks.texi b/contrib/ffmpeg/doc/hooks.texi deleted file mode 100644 index c410f1cb0..000000000 --- a/contrib/ffmpeg/doc/hooks.texi +++ /dev/null @@ -1,299 +0,0 @@ -\input texinfo @c -*- texinfo -*- - -@settitle Video Hook Documentation -@titlepage -@sp 7 -@center @titlefont{Video Hook Documentation} -@sp 3 -@end titlepage - - -@chapter Introduction - -@var{Please be aware that vhook is deprecated, and hence its development is -frozen (bug fixes are still accepted). -The substitute will be 'libavfilter', the result of our 'Video Filter API' -Google Summer of Code project. You may monitor its progress by subscribing to -the ffmpeg-soc mailing list at -@url{http://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-soc}.} - -The video hook functionality is designed (mostly) for live video. It allows -the video to be modified or examined between the decoder and the encoder. - -Any number of hook modules can be placed inline, and they are run in the -order that they were specified on the ffmpeg command line. - -The video hook modules are provided for use as a base for your own modules, -and are described below. - -Modules are loaded using the -vhook option to ffmpeg. The value of this parameter -is a space separated list of arguments. The first is the module name, and the rest -are passed as arguments to the Configure function of the module. - -The modules are dynamic libraries: They have different suffixes (.so, .dll, .dylib) -depending on your platform. And your platform dictates if they need to be -somewhere in your PATH, or in your LD_LIBRARY_PATH. Otherwise you will need to -specify the full path of the vhook file that you are using. - -@section null.c - -This does nothing. Actually it converts the input image to RGB24 and then converts -it back again. This is meant as a sample that you can use to test your setup. - -@section fish.c - -This implements a 'fish detector'. Essentially it converts the image into HSV -space and tests whether more than a certain percentage of the pixels fall into -a specific HSV cuboid. If so, then the image is saved into a file for processing -by other bits of code. - -Why use HSV? It turns out that HSV cuboids represent a more compact range of -colors than would an RGB cuboid. - -@section imlib2.c - -This module implements a text overlay for a video image. Currently it -supports a fixed overlay or reading the text from a file. The string -is passed through strftime() so that it is easy to imprint the date and -time onto the image. - -This module depends on the external library imlib2, available on -Sourceforge, among other places, if it is not already installed on -your system. - -You may also overlay an image (even semi-transparent) like TV stations do. -You may move either the text or the image around your video to create -scrolling credits, for example. - -The font file used is looked for in a FONTPATH environment variable, and -prepended to the point size as a command line option and can be specified -with the full path to the font file, as in: -@example --F /usr/X11R6/lib/X11/fonts/TTF/VeraBd.ttf/20 -@end example -where 20 is the point size. - -You can specify the filename to read RGB color names from. If it is not -specified, these defaults are used: @file{/usr/share/X11/rgb.txt} and -@file{/usr/lib/X11/rgb.txt} - -Options: -@multitable @columnfractions .2 .8 -@item @option{-C <rgb.txt>} @tab The filename to read RGB color names from -@item @option{-c <color>} @tab The color of the text -@item @option{-F <fontname>} @tab The font face and size -@item @option{-t <text>} @tab The text -@item @option{-f <filename>} @tab The filename to read text from -@item @option{-x <expression>}@tab x coordinate of text or image -@item @option{-y <expression>}@tab y coordinate of text or image -@item @option{-i <filename>} @tab The filename to read a image from -@item @option{-R <expression>}@tab Value for R color -@item @option{-G <expression>}@tab Value for G color -@item @option{-B <expression>}@tab Value for B color -@item @option{-A <expression>}@tab Value for Alpha channel -@end multitable - -Expressions are functions of these variables: -@multitable @columnfractions .2 .8 -@item @var{N} @tab frame number (starting at zero) -@item @var{H} @tab frame height -@item @var{W} @tab frame width -@item @var{h} @tab image height -@item @var{w} @tab image width -@item @var{X} @tab previous x coordinate of text or image -@item @var{Y} @tab previous y coordinate of text or image -@end multitable - -You may also use the constants @var{PI}, @var{E}, and the math functions available at the -FFmpeg formula evaluator at (@url{ffmpeg-doc.html#SEC13}), except @var{bits2qp(bits)} -and @var{qp2bits(qp)}. - -Usage examples: - -@example - # Remember to set the path to your fonts - FONTPATH="/cygdrive/c/WINDOWS/Fonts/" - FONTPATH="$FONTPATH:/usr/share/imlib2/data/fonts/" - FONTPATH="$FONTPATH:/usr/X11R6/lib/X11/fonts/TTF/" - export FONTPATH - - # Bulb dancing in a Lissajous pattern - ffmpeg -i input.avi -vhook \ - 'vhook/imlib2.dll -x W*(0.5+0.25*sin(N/47*PI))-w/2 -y H*(0.5+0.50*cos(N/97*PI))-h/2 -i /usr/share/imlib2/data/images/bulb.png' \ - -acodec copy -sameq output.avi - - # Text scrolling - ffmpeg -i input.avi -vhook \ - 'vhook/imlib2.dll -c red -F Vera.ttf/20 -x 150+0.5*N -y 70+0.25*N -t Hello' \ - -acodec copy -sameq output.avi - - # Date and time stamp, security-camera style: - ffmpeg -r 29.97 -s 320x256 -f video4linux -i /dev/video0 \ - -vhook 'vhook/imlib2.so -x 0 -y 0 -i black-260x20.png' \ - -vhook 'vhook/imlib2.so -c white -F VeraBd.ttf/12 -x 0 -y 0 -t %A-%D-%T' \ - output.avi - - In this example the video is captured from the first video capture card as a - 320x256 AVI, and a black 260 by 20 pixel PNG image is placed in the upper - left corner, with the day, date and time overlaid on it in Vera Bold 12 - point font. A simple black PNG file 260 pixels wide and 20 pixels tall - was created in the GIMP for this purpose. - - # Scrolling credits from a text file - ffmpeg -i input.avi -vhook \ - 'vhook/imlib2.so -c white -F VeraBd.ttf/16 -x 100 -y -1.0*N -f credits.txt' \ - -sameq output.avi - - In this example, the text is stored in a file, and is positioned 100 - pixels from the left hand edge of the video. The text is scrolled from the - bottom up. Making the y factor positive will scroll from the top down. - Increasing the magnitude of the y factor makes the text scroll faster, - decreasing it makes it scroll slower. Hint: Blank lines containing only - a newline are treated as end-of-file. To create blank lines, use lines - that consist of space characters only. - - # Scrolling credits with custom color from a text file - ffmpeg -i input.avi -vhook \ - 'vhook/imlib2.so -C rgb.txt -c CustomColor1 -F VeraBd.ttf/16 -x 100 -y -1.0*N -f credits.txt' \ - -sameq output.avi - - This example does the same as the one above, but specifies an rgb.txt file - to be used, which has a custom-made color in it. - - # Variable colors - ffmpeg -i input.avi -vhook \ - 'vhook/imlib2.so -t Hello -R abs(255*sin(N/47*PI)) -G abs(255*sin(N/47*PI)) -B abs(255*sin(N/47*PI))' \ - -sameq output.avi - - In this example, the color for the text goes up and down from black to - white. - - # Text fade-out - ffmpeg -i input.avi -vhook \ - 'vhook/imlib2.so -t Hello -A max(0,255-exp(N/47))' \ - -sameq output.avi - - In this example, the text fades out in about 10 seconds for a 25 fps input - video file. - - # scrolling credits from a graphics file - ffmpeg -sameq -i input.avi \ - -vhook 'vhook/imlib2.so -x 0 -y -1.0*N -i credits.png' output.avi - - In this example, a transparent PNG file the same width as the video - (e.g. 320 pixels), but very long, (e.g. 3000 pixels), was created, and - text, graphics, brushstrokes, etc, were added to the image. The image - is then scrolled up, from the bottom of the frame. - -@end example - -@section ppm.c - -It's basically a launch point for a PPM pipe, so you can use any -executable (or script) which consumes a PPM on stdin and produces a PPM -on stdout (and flushes each frame). The Netpbm utilities are a series of -such programs. - -A list of them is here: - -@url{http://netpbm.sourceforge.net/doc/directory.html} - -Usage example: - -@example -ffmpeg -i input -vhook "/path/to/ppm.so some-ppm-filter args" output -@end example - -@section drawtext.c - -This module implements a text overlay for a video image. Currently it -supports a fixed overlay or reading the text from a file. The string -is passed through strftime() so that it is easy to imprint the date and -time onto the image. - -Features: -@itemize @minus -@item TrueType, Type1 and others via the FreeType2 library -@item Font kerning (better output) -@item Line Wrap (put the text that doesn't fit one line on the next line) -@item Background box (currently in development) -@item Outline -@end itemize - -Options: -@multitable @columnfractions .2 .8 -@item @option{-c <color>} @tab Foreground color of the text ('internet' way) <#RRGGBB> [default #FFFFFF] -@item @option{-C <color>} @tab Background color of the text ('internet' way) <#RRGGBB> [default #000000] -@item @option{-f <font-filename>} @tab font file to use -@item @option{-t <text>} @tab text to display -@item @option{-T <filename>} @tab file to read text from -@item @option{-x <pos>} @tab x coordinate of the start of the text -@item @option{-y <pos>} @tab y coordinate of the start of the text -@end multitable - -Text fonts are being looked for in a FONTPATH environment variable. -If the FONTPATH environment variable is not available, or is not checked by -your target (i.e. Cygwin), then specify the full path to the font file as in: -@example --f /usr/X11R6/lib/X11/fonts/TTF/VeraBd.ttf -@end example - -Usage Example: -@example - # Remember to set the path to your fonts - FONTPATH="/cygdrive/c/WINDOWS/Fonts/" - FONTPATH="$FONTPATH:/usr/share/imlib2/data/fonts/" - FONTPATH="$FONTPATH:/usr/X11R6/lib/X11/fonts/TTF/" - export FONTPATH - - # Time and date display - ffmpeg -f video4linux2 -i /dev/video0 \ - -vhook 'vhook/drawtext.so -f VeraBd.ttf -t %A-%D-%T' movie.mpg - - This example grabs video from the first capture card and outputs it to an - MPEG video, and places "Weekday-dd/mm/yy-hh:mm:ss" at the top left of the - frame, updated every second, using the Vera Bold TrueType Font, which - should exist in: /usr/X11R6/lib/X11/fonts/TTF/ -@end example - -Check the man page for strftime() for all the various ways you can format -the date and time. - -@section watermark.c - -Command Line options: -@multitable @columnfractions .2 .8 -@item @option{-m [0|1]} @tab Mode (default: 0, see below) -@item @option{-t 000000 - FFFFFF} @tab Threshold, six digit hex number -@item @option{-f <filename>} @tab Watermark image filename, must be specified! -@end multitable - -MODE 0: - The watermark picture works like this (assuming color intensities 0..0xFF): - Per color do this: - If mask color is 0x80, no change to the original frame. - If mask color is < 0x80 the absolute difference is subtracted from the - frame. If result < 0, result = 0. - If mask color is > 0x80 the absolute difference is added to the - frame. If result > 0xFF, result = 0xFF. - - You can override the 0x80 level with the -t flag. E.g. if threshold is - 000000 the color value of watermark is added to the destination. - - This way a mask that is visible both in light and dark pictures can be made - (e.g. by using a picture generated by the Gimp and the bump map tool). - - An example watermark file is at: - @url{http://engene.se/ffmpeg_watermark.gif} - -MODE 1: - Per color do this: - If mask color > threshold color then the watermark pixel is used. - -Example usage: -@example - ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif' -an out.mov - ffmpeg -i infile -vhook '/path/watermark.so -f wm.gif -m 1 -t 222222' -an out.mov -@end example - -@bye diff --git a/contrib/ffmpeg/doc/issue_tracker.txt b/contrib/ffmpeg/doc/issue_tracker.txt deleted file mode 100644 index 4b6a8a134..000000000 --- a/contrib/ffmpeg/doc/issue_tracker.txt +++ /dev/null @@ -1,222 +0,0 @@ -FFmpeg's bug/patch/feature request tracker manual -================================================= - -NOTE: This is a draft. - -Overview: ---------- -FFmpeg uses Roundup for tracking issues, new issues and changes to -existing issues can be done through a web interface and through email. -It is possible to subscribe to individual issues by adding yourself to the -nosy list or to subscribe to the ffmpeg-issues mailing list which receives -a mail for every change to every issue. Replies to such mails will also -be properly added to the respective issue. -(the above does all work already after light testing) -The subscription URL for the ffmpeg-issues list is: -http://live.polito/mailman/listinfo/ffmpeg-issues -The URL of the webinterface of the tracker is: -http(s)://roundup.mplayerhq/roundup/ffmpeg/ -Note the URLs in this document are obfuscated, you must append the top level -domain of Hungary to the tracker, and of Italy to the mailing list. - -Email Interface: ----------------- -There is a mailing list to which all new issues and changes to existing issues -are sent. You can subscribe through -http://live.polito/mailman/listinfo/ffmpeg-issues -Replies to messages there will have their text added to the specific issues. -Attachments will be added as if they had been uploaded via the web interface. -You can change the status, substatus, topic, ... by changing the subject in -your reply like: -Re: [issue94] register_avcodec and allcodecs.h [type=patch;status=open;substatus=approved] -Roundup will then change things as you requested and remove the [...] from -the subject before forwarding the mail to the mailing list. - - -NOTE: issue = (bug report || patch || feature request) - -Type: ------ -bug - An error, flaw, mistake, failure, or fault in FFmpeg or libav* that - prevents it from behaving as intended. - -feature request - Request of support for encoding or decoding of a new codec, container - or variant. - Request of support for more, less or plain different output or behavior - where the current implementation cannot be considered wrong. - -patch - A patch as generated by diff which conforms to the patch submission and - development policy. - - -Priority: ---------- -critical - Bugs and patches which deal with data loss and security issues. - No feature request can be critical. - -important - Bugs which make FFmpeg unusable for a significant number of users, and - patches fixing them. - Examples here might be completely broken MPEG-4 decoding or a build issue - on Linux. - While broken 4xm decoding or a broken OS/2 build would not be important, - the separation to normal is somewhat fuzzy. - For feature requests this priority would be used for things many people - want. - -normal - - -minor - Bugs and patches about things like spelling errors, "mp2" instead of - "mp3" being shown and such. - Feature requests about things few people want or which do not make a big - difference. - -wish - Something that is desirable to have but that there is no urgency at - all to implement, e.g. something completely cosmetic like a website - restyle or a personalized doxy template or the FFmpeg logo. - This priority is not valid for bugs. - - -Status: -------- -new - initial state - -open - intermediate states - -closed - final state - - -Type/Status/Substatus: ----------- -*/new/new - Initial state of new bugs, patches and feature requests submitted by - users. - -*/open/open - Issues which have been briefly looked at and which did not look outright - invalid. - This implicates that no real more detailed state applies yet. Conversely, - the more detailed states below implicate that the issue has been briefly - looked at. - -*/closed/duplicate - Bugs, patches or feature requests which are duplicates. - Note that patches dealing with the same thing in a different way are not - duplicates. - Note, if you mark something as duplicate, do not forget setting the - superseder so bug reports are properly linked. - -*/closed/invalid - Bugs caused by user errors, random ineligible or otherwise nonsense stuff. - -*/closed/needs_more_info - Issues for which some information has been requested by the developers, - but which has not been provided by anyone within reasonable time. - -bug/open/reproduced - Bugs which have been reproduced. - -bug/open/analyzed - Bugs which have been analyzed and where it is understood what causes them - and which exact chain of events triggers them. This analysis should be - available as a message in the bug report. - Note, do not change the status to analyzed without also providing a clear - and understandable analysis. - This state implicates that the bug either has been reproduced or that - reproduction is not needed as the bug is already understood. - -bug/open/needs_more_info - Bug reports which are incomplete and or where more information is needed - from the submitter or another person who can provide it. - This state implicates that the bug has not been analyzed or reproduced. - Note, the idea behind needs_more_info is to offload work from the - developers to the users whenever possible. - -bug/closed/fixed - Bugs which have to the best of our knowledge been fixed. - -bug/closed/wont_fix - Bugs which we will not fix, the reasons here could be legal, philosophical - or others. - -bug/closed/works_for_me - Bugs for which sufficient information was provided to reproduce but - reproduction failed - that is the code seems to work correctly to the - best of our knowledge. - -patch/open/approved - Patches which have been reviewed and approved by a developer. - Such patches can be applied anytime by any other developer after some - reasonable testing (compile + regression tests + does the patch do - what the author claimed). - -patch/open/needs_changes - Patches which have been reviewed and need changes to be accepted. - -patch/closed/applied - Patches which have been applied. - -patch/closed/rejected - Patches which have been rejected. - -feature_request/open/needs_more_info - Feature requests where it is not clear what exactly is wanted - (these also could be closed as invalid ...). - -feature_request/closed/implemented - Feature requests which have been implemented. - -feature_request/closed/wont_implement - Feature requests which will not be implemented. The reasons here could - be legal, philosophical or others. - -Note, please do not use type-status-substatus combinations other than the -above without asking on ffmpeg-dev first! - -Note2, if you provide the requested info do not forget to remove the -needs_more_info substate. - -Topic: ------- -A topic is a tag you should add to your issue in order to make grouping them -easier. - -avcodec - issues in libavcodec/* - -avformat - issues in libavformat/* - -avutil - issues in libavutil/* - -regression test - issues in tests/* - -ffmpeg - issues in or related to ffmpeg.c - -ffplay - issues in or related to ffplay.c - -ffserver - issues in or related to ffserver.c - -build system - issues in or related to configure/Makefile - -regression - bugs which were working in a past revision - -roundup - issues related to our issue tracker diff --git a/contrib/ffmpeg/doc/optimization.txt b/contrib/ffmpeg/doc/optimization.txt deleted file mode 100644 index 4c0934b4b..000000000 --- a/contrib/ffmpeg/doc/optimization.txt +++ /dev/null @@ -1,233 +0,0 @@ -optimization Tips (for libavcodec): -=================================== - -What to optimize: ------------------ -If you plan to do non-x86 architecture specific optimizations (SIMD normally), -then take a look in the i386/ directory, as most important functions are -already optimized for MMX. - -If you want to do x86 optimizations then you can either try to finetune the -stuff in the i386 directory or find some other functions in the C source to -optimize, but there aren't many left. - - -Understanding these overoptimized functions: --------------------------------------------- -As many functions tend to be a bit difficult to understand because -of optimizations, it can be hard to optimize them further, or write -architecture-specific versions. It is recommended to look at older -revisions of the interesting files (for a web frontend try ViewVC at -http://svn.mplayerhq.hu/ffmpeg/trunk/). -Alternatively, look into the other architecture-specific versions in -the i386/, ppc/, alpha/ subdirectories. Even if you don't exactly -comprehend the instructions, it could help understanding the functions -and how they can be optimized. - -NOTE: If you still don't understand some function, ask at our mailing list!!! -(http://lists.mplayerhq.hu/mailman/listinfo/ffmpeg-devel) - - -When is an optimization justified? ----------------------------------- -Normally, clean and simple optimizations for widely used codecs are -justified even if they only achieve an overall speedup of 0.1%. These -speedups accumulate and can make a big difference after awhile. Also, if -none of the following factors get worse due to an optimization -- speed, -binary code size, source size, source readability -- and at least one -factor improves, then an optimization is always a good idea even if the -overall gain is less than 0.1%. For obscure codecs that are not often -used, the goal is more toward keeping the code clean, small, and -readable instead of making it 1% faster. - - -WTF is that function good for ....: ------------------------------------ -The primary purpose of this list is to avoid wasting time optimizing functions -which are rarely used. - -put(_no_rnd)_pixels{,_x2,_y2,_xy2} - Used in motion compensation (en/decoding). - -avg_pixels{,_x2,_y2,_xy2} - Used in motion compensation of B-frames. - These are less important than the put*pixels functions. - -avg_no_rnd_pixels* - unused - -pix_abs16x16{,_x2,_y2,_xy2} - Used in motion estimation (encoding) with SAD. - -pix_abs8x8{,_x2,_y2,_xy2} - Used in motion estimation (encoding) with SAD of MPEG-4 4MV only. - These are less important than the pix_abs16x16* functions. - -put_mspel8_mc* / wmv2_mspel8* - Used only in WMV2. - it is not recommended that you waste your time with these, as WMV2 - is an ugly and relatively useless codec. - -mpeg4_qpel* / *qpel_mc* - Used in MPEG-4 qpel motion compensation (encoding & decoding). - The qpel8 functions are used only for 4mv, - the avg_* functions are used only for B-frames. - Optimizing them should have a significant impact on qpel - encoding & decoding. - -qpel{8,16}_mc??_old_c / *pixels{8,16}_l4 - Just used to work around a bug in an old libavcodec encoder version. - Don't optimize them. - -tpel_mc_func {put,avg}_tpel_pixels_tab - Used only for SVQ3, so only optimize them if you need fast SVQ3 decoding. - -add_bytes/diff_bytes - For huffyuv only, optimize if you want a faster ffhuffyuv codec. - -get_pixels / diff_pixels - Used for encoding, easy. - -clear_blocks - easiest to optimize - -gmc - Used for MPEG-4 gmc. - Optimizing this should have a significant effect on the gmc decoding - speed. - -gmc1 - Used for chroma blocks in MPEG-4 gmc with 1 warp point - (there are 4 luma & 2 chroma blocks per macroblock, so - only 1/3 of the gmc blocks use this, the other 2/3 - use the normal put_pixel* code, but only if there is - just 1 warp point). - Note: DivX5 gmc always uses just 1 warp point. - -pix_sum - Used for encoding. - -hadamard8_diff / sse / sad == pix_norm1 / dct_sad / quant_psnr / rd / bit - Specific compare functions used in encoding, it depends upon the - command line switches which of these are used. - Don't waste your time with dct_sad & quant_psnr, they aren't - really useful. - -put_pixels_clamped / add_pixels_clamped - Used for en/decoding in the IDCT, easy. - Note, some optimized IDCTs have the add/put clamped code included and - then put_pixels_clamped / add_pixels_clamped will be unused. - -idct/fdct - idct (encoding & decoding) - fdct (encoding) - difficult to optimize - -dct_quantize_trellis - Used for encoding with trellis quantization. - difficult to optimize - -dct_quantize - Used for encoding. - -dct_unquantize_mpeg1 - Used in MPEG-1 en/decoding. - -dct_unquantize_mpeg2 - Used in MPEG-2 en/decoding. - -dct_unquantize_h263 - Used in MPEG-4/H.263 en/decoding. - -FIXME remaining functions? -BTW, most of these functions are in dsputil.c/.h, some are in mpegvideo.c/.h. - - - -Alignment: -Some instructions on some architectures have strict alignment restrictions, -for example most SSE/SSE2 instructions on x86. -The minimum guaranteed alignment is written in the .h files, for example: - void (*put_pixels_clamped)(const DCTELEM *block/*align 16*/, UINT8 *pixels/*align 8*/, int line_size); - - -General Tips: -------------- -Use asm loops like: -asm( - "1: .... - ... - "jump_instruciton .... -Do not use C loops: -do{ - asm( - ... -}while() - -Use asm() instead of intrinsics. The latter requires a good optimizing compiler -which gcc is not. - - -Links: -====== -http://www.aggregate.org/MAGIC/ - -x86-specific: -------------- -http://developer.intel.com/design/pentium4/manuals/248966.htm - -The IA-32 Intel Architecture Software Developer's Manual, Volume 2: -Instruction Set Reference -http://developer.intel.com/design/pentium4/manuals/245471.htm - -http://www.agner.org/assem/ - -AMD Athlon Processor x86 Code Optimization Guide: -http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/22007.pdf - - -ARM-specific: -------------- -ARM Architecture Reference Manual (up to ARMv5TE): -http://www.arm.com/community/university/eulaarmarm.html - -Procedure Call Standard for the ARM Architecture: -http://www.arm.com/pdfs/aapcs.pdf - -Optimization guide for ARM9E (used in Nokia 770 Internet Tablet): -http://infocenter.arm.com/help/topic/com.arm.doc.ddi0240b/DDI0240A.pdf -Optimization guide for ARM11 (used in Nokia N800 Internet Tablet): -http://infocenter.arm.com/help/topic/com.arm.doc.ddi0211j/DDI0211J_arm1136_r1p5_trm.pdf -Optimization guide for Intel XScale (used in Sharp Zaurus PDA): -http://download.intel.com/design/intelxscale/27347302.pdf - -PowerPC-specific: ------------------ -PowerPC32/AltiVec PIM: -www.freescale.com/files/32bit/doc/ref_manual/ALTIVECPEM.pdf - -PowerPC32/AltiVec PEM: -www.freescale.com/files/32bit/doc/ref_manual/ALTIVECPIM.pdf - -CELL/SPU: -http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/30B3520C93F437AB87257060006FFE5E/$file/Language_Extensions_for_CBEA_2.4.pdf -http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/9F820A5FFA3ECE8C8725716A0062585F/$file/CBE_Handbook_v1.1_24APR2007_pub.pdf - -SPARC-specific: ---------------- -SPARC Joint Programming Specification (JPS1): Commonality -http://www.fujitsu.com/downloads/PRMPWR/JPS1-R1.0.4-Common-pub.pdf - -UltraSPARC III Processor User's Manual (contains instruction timings) -http://www.sun.com/processors/manuals/USIIIv2.pdf - -VIS Whitepaper (contains optimization guidelines) -http://www.sun.com/processors/vis/download/vis/vis_whitepaper.pdf - -GCC asm links: --------------- -official doc but quite ugly -http://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html - -a bit old (note "+" is valid for input-output, even though the next disagrees) -http://www.cs.virginia.edu/~clc5q/gcc-inline-asm.pdf diff --git a/contrib/ffmpeg/doc/snow.txt b/contrib/ffmpeg/doc/snow.txt deleted file mode 100644 index f99133971..000000000 --- a/contrib/ffmpeg/doc/snow.txt +++ /dev/null @@ -1,630 +0,0 @@ -============================================= -Snow Video Codec Specification Draft 20080110 -============================================= - -Introduction: -============= -This specification describes the Snow bitstream syntax and semantics as -well as the formal Snow decoding process. - -The decoding process is described precisely and any compliant decoder -MUST produce the exact same output for a spec-conformant Snow stream. -For encoding, though, any process which generates a stream compliant to -the syntactical and semantic requirements and which is decodable by -the process described in this spec shall be considered a conformant -Snow encoder. - -Definitions: -============ - -MUST the specific part must be done to conform to this standard -SHOULD it is recommended to be done that way, but not strictly required - -ilog2(x) is the rounded down logarithm of x with basis 2 -ilog2(0) = 0 - -Type definitions: -================= - -b 1-bit range coded -u unsigned scalar value range coded -s signed scalar value range coded - - -Bitstream syntax: -================= - -frame: - header - prediction - residual - -header: - keyframe b MID_STATE - if(keyframe || always_reset) - reset_contexts - if(keyframe){ - version u header_state - always_reset b header_state - temporal_decomposition_type u header_state - temporal_decomposition_count u header_state - spatial_decomposition_count u header_state - colorspace_type u header_state - chroma_h_shift u header_state - chroma_v_shift u header_state - spatial_scalability b header_state - max_ref_frames-1 u header_state - qlogs - } - if(!keyframe){ - update_mc b header_state - if(update_mc){ - for(plane=0; plane<2; plane++){ - diag_mc b header_state - htaps/2-1 u header_state - for(i= p->htaps/2; i; i--) - |hcoeff[i]| u header_state - } - } - update_qlogs b header_state - if(update_qlogs){ - spatial_decomposition_count u header_state - qlogs - } - } - - spatial_decomposition_type s header_state - qlog s header_state - mv_scale s header_state - qbias s header_state - block_max_depth s header_state - -qlogs: - for(plane=0; plane<2; plane++){ - quant_table[plane][0][0] s header_state - for(level=0; level < spatial_decomposition_count; level++){ - quant_table[plane][level][1]s header_state - quant_table[plane][level][3]s header_state - } - } - -reset_contexts - *_state[*]= MID_STATE - -prediction: - for(y=0; y<block_count_vertical; y++) - for(x=0; x<block_count_horizontal; x++) - block(0) - -block(level): - mvx_diff=mvy_diff=y_diff=cb_diff=cr_diff=0 - if(keyframe){ - intra=1 - }else{ - if(level!=max_block_depth){ - s_context= 2*left->level + 2*top->level + topleft->level + topright->level - leaf b block_state[4 + s_context] - } - if(level==max_block_depth || leaf){ - intra b block_state[1 + left->intra + top->intra] - if(intra){ - y_diff s block_state[32] - cb_diff s block_state[64] - cr_diff s block_state[96] - }else{ - ref_context= ilog2(2*left->ref) + ilog2(2*top->ref) - if(ref_frames > 1) - ref u block_state[128 + 1024 + 32*ref_context] - mx_context= ilog2(2*abs(left->mx - top->mx)) - my_context= ilog2(2*abs(left->my - top->my)) - mvx_diff s block_state[128 + 32*(mx_context + 16*!!ref)] - mvy_diff s block_state[128 + 32*(my_context + 16*!!ref)] - } - }else{ - block(level+1) - block(level+1) - block(level+1) - block(level+1) - } - } - - -residual: - residual2(luma) - residual2(chroma_cr) - residual2(chroma_cb) - -residual2: - for(level=0; level<spatial_decomposition_count; level++){ - if(level==0) - subband(LL, 0) - subband(HL, level) - subband(LH, level) - subband(HH, level) - } - -subband: - FIXME - - - -Tag description: ----------------- - -version - 0 - this MUST NOT change within a bitstream - -always_reset - if 1 then the range coder contexts will be reset after each frame - -temporal_decomposition_type - 0 - -temporal_decomposition_count - 0 - -spatial_decomposition_count - FIXME - -colorspace_type - 0 - this MUST NOT change within a bitstream - -chroma_h_shift - log2(luma.width / chroma.width) - this MUST NOT change within a bitstream - -chroma_v_shift - log2(luma.height / chroma.height) - this MUST NOT change within a bitstream - -spatial_scalability - 0 - -max_ref_frames - maximum number of reference frames - this MUST NOT change within a bitstream - -update_mc - indicates that motion compensation filter parameters are stored in the - header - -diag_mc - flag to enable faster diagonal interpolation - this SHOULD be 1 unless it turns out to be covered by a valid patent - -htaps - number of half pel interpolation filter taps, MUST be even, >0 and <10 - -hcoeff - half pel interpolation filter coefficients, hcoeff[0] are the 2 middle - coefficients [1] are the next outer ones and so on, resulting in a filter - like: ...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... - the sign of the coefficients is not explicitly stored but alternates - after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... - hcoeff[0] is not explicitly stored but found by subtracting the sum - of all stored coefficients with signs from 32 - hcoeff[0]= 32 - hcoeff[1] - hcoeff[2] - ... - a good choice for hcoeff and htaps is - htaps= 6 - hcoeff={40,-10,2} - an alternative which requires more computations at both encoder and - decoder side and may or may not be better is - htaps= 8 - hcoeff={42,-14,6,-2} - - -ref_frames - minimum of the number of available reference frames and max_ref_frames - for example the first frame after a key frame always has ref_frames=1 - -spatial_decomposition_type - wavelet type - 0 is a 9/7 symmetric compact integer wavelet - 1 is a 5/3 symmetric compact integer wavelet - others are reserved - stored as delta from last, last is reset to 0 if always_reset || keyframe - -qlog - quality (logarthmic quantizer scale) - stored as delta from last, last is reset to 0 if always_reset || keyframe - -mv_scale - stored as delta from last, last is reset to 0 if always_reset || keyframe - FIXME check that everything works fine if this changes between frames - -qbias - dequantization bias - stored as delta from last, last is reset to 0 if always_reset || keyframe - -block_max_depth - maximum depth of the block tree - stored as delta from last, last is reset to 0 if always_reset || keyframe - -quant_table - quantiztation table - - -Highlevel bitstream structure: -============================= - -------------------------------------------- -| Header | - -------------------------------------------- -| ------------------------------------ | -| | Block0 | | -| | split? | | -| | yes no | | -| | ......... intra? | | -| | : Block01 : yes no | | -| | : Block02 : ....... .......... | | -| | : Block03 : : y DC : : ref index: | | -| | : Block04 : : cb DC : : motion x : | | -| | ......... : cr DC : : motion y : | | -| | ....... .......... | | -| ------------------------------------ | -| ------------------------------------ | -| | Block1 | | -| ... | - -------------------------------------------- -| ------------ ------------ ------------ | -|| Y subbands | | Cb subbands| | Cr subbands|| -|| --- --- | | --- --- | | --- --- || -|| |LL0||HL0| | | |LL0||HL0| | | |LL0||HL0| || -|| --- --- | | --- --- | | --- --- || -|| --- --- | | --- --- | | --- --- || -|| |LH0||HH0| | | |LH0||HH0| | | |LH0||HH0| || -|| --- --- | | --- --- | | --- --- || -|| --- --- | | --- --- | | --- --- || -|| |HL1||LH1| | | |HL1||LH1| | | |HL1||LH1| || -|| --- --- | | --- --- | | --- --- || -|| --- --- | | --- --- | | --- --- || -|| |HH1||HL2| | | |HH1||HL2| | | |HH1||HL2| || -|| ... | | ... | | ... || -| ------------ ------------ ------------ | - -------------------------------------------- - -Decoding process: -================= - - ------------ - | | - | Subbands | - ------------ | | - | | ------------ - | Intra DC | | - | | LL0 subband prediction - ------------ | - \ Dequantizaton - ------------------- \ | -| Reference frames | \ IDWT -| ------- ------- | Motion \ | -||Frame 0| |Frame 1|| Compensation . OBMC v ------- -| ------- ------- | --------------. \------> + --->|Frame n|-->output -| ------- ------- | ------- -||Frame 2| |Frame 3||<----------------------------------/ -| ... | - ------------------- - - -Range Coder: -============ - -Binary Range Coder: -------------------- -The implemented range coder is an adapted version based upon "Range encoding: -an algorithm for removing redundancy from a digitised message." by G. N. N. -Martin. -The symbols encoded by the Snow range coder are bits (0|1). The -associated probabilities are not fix but change depending on the symbol mix -seen so far. - - -bit seen | new state ----------+----------------------------------------------- - 0 | 256 - state_transition_table[256 - old_state]; - 1 | state_transition_table[ old_state]; - -state_transition_table = { - 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, - 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, - 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, - 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, -104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, -119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, -134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, -150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, -165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, -180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, -195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, -210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, -226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, -241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0}; - -FIXME - - -Range Coding of integers: -------------------------- -FIXME - - -Neighboring Blocks: -=================== -left and top are set to the respective blocks unless they are outside of -the image in which case they are set to the Null block - -top-left is set to the top left block unless it is outside of the image in -which case it is set to the left block - -if this block has no larger parent block or it is at the left side of its -parent block and the top right block is not outside of the image then the -top right block is used for top-right else the top-left block is used - -Null block -y,cb,cr are 128 -level, ref, mx and my are 0 - - -Motion Vector Prediction: -========================= -1. the motion vectors of all the neighboring blocks are scaled to -compensate for the difference of reference frames - -scaled_mv= (mv * (256 * (current_reference+1) / (mv.reference+1)) + 128)>>8 - -2. the median of the scaled left, top and top-right vectors is used as -motion vector prediction - -3. the used motion vector is the sum of the predictor and - (mvx_diff, mvy_diff)*mv_scale - - -Intra DC Predicton: -====================== -the luma and chroma values of the left block are used as predictors - -the used luma and chroma is the sum of the predictor and y_diff, cb_diff, cr_diff -to reverse this in the decoder apply the following: -block[y][x].dc[0] = block[y][x-1].dc[0] + y_diff; -block[y][x].dc[1] = block[y][x-1].dc[1] + cb_diff; -block[y][x].dc[2] = block[y][x-1].dc[2] + cr_diff; -block[*][-1].dc[*]= 128; - - -Motion Compensation: -==================== - -Halfpel interpolation: ----------------------- -halfpel interpolation is done by convolution with the halfpel filter stored -in the header: - -horizontal halfpel samples are found by -H1[y][x] = hcoeff[0]*(F[y][x ] + F[y][x+1]) - + hcoeff[1]*(F[y][x-1] + F[y][x+2]) - + hcoeff[2]*(F[y][x-2] + F[y][x+3]) - + ... -h1[y][x] = (H1[y][x] + 32)>>6; - -vertical halfpel samples are found by -H2[y][x] = hcoeff[0]*(F[y ][x] + F[y+1][x]) - + hcoeff[1]*(F[y-1][x] + F[y+2][x]) - + ... -h2[y][x] = (H2[y][x] + 32)>>6; - -vertical+horizontal halfpel samples are found by -H3[y][x] = hcoeff[0]*(H2[y][x ] + H2[y][x+1]) - + hcoeff[1]*(H2[y][x-1] + H2[y][x+2]) - + ... -H3[y][x] = hcoeff[0]*(H1[y ][x] + H1[y+1][x]) - + hcoeff[1]*(H1[y+1][x] + H1[y+2][x]) - + ... -h3[y][x] = (H3[y][x] + 2048)>>12; - - - F H1 F - | | | - | | | - | | | - F H1 F - | | | - | | | - | | | - F-------F-------F-> H1<-F-------F-------F - v v v - H2 H3 H2 - ^ ^ ^ - F-------F-------F-> H1<-F-------F-------F - | | | - | | | - | | | - F H1 F - | | | - | | | - | | | - F H1 F - - -unavailable fullpel samples (outside the picture for example) shall be equal -to the closest available fullpel sample - - -Smaller pel interpolation: --------------------------- -if diag_mc is set then points which lie on a line between 2 vertically, -horiziontally or diagonally adjacent halfpel points shall be interpolated -linearls with rounding to nearest and halfway values rounded up. -points which lie on 2 diagonals at the same time should only use the one -diagonal not containing the fullpel point - - - - F-->O---q---O<--h1->O---q---O<--F - v \ / v \ / v - O O O O O O O - | / | \ | - q q q q q - | / | \ | - O O O O O O O - ^ / \ ^ / \ ^ - h2-->O---q---O<--h3->O---q---O<--h2 - v \ / v \ / v - O O O O O O O - | \ | / | - q q q q q - | \ | / | - O O O O O O O - ^ / \ ^ / \ ^ - F-->O---q---O<--h1->O---q---O<--F - - - -the remaining points shall be bilinearly interpolated from the -up to 4 surrounding halfpel and fullpel points, again rounding should be to -nearest and halfway values rounded up - -compliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chroma -interpolation at least - - -Overlapped block motion compensation: -------------------------------------- -FIXME - -LL band prediction: -=================== -Each sample in the LL0 subband is predicted by the median of the left, top and -left+top-topleft samples, samples outside the subband shall be considered to -be 0. To reverse this prediction in the decoder apply the following. -for(y=0; y<height; y++){ - for(x=0; x<width; x++){ - sample[y][x] += median(sample[y-1][x], - sample[y][x-1], - sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]); - } -} -sample[-1][*]=sample[*][-1]= 0; -width,height here are the width and height of the LL0 subband not of the final -video - - -Dequantizaton: -============== -FIXME - -Wavelet Transform: -================== - -Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integer -transform and a integer approximation of the symmetric biorthogonal 9/7 -daubechies wavelet. - -2D IDWT (inverse discrete wavelet transform) --------------------------------------------- -The 2D IDWT applies a 2D filter recursively, each time combining the -4 lowest frequency subbands into a single subband until only 1 subband -remains. -The 2D filter is done by first applying a 1D filter in the vertical direction -and then applying it in the horizontal one. - --------------- --------------- --------------- --------------- -|LL0|HL0| | | | | | | | | | | | -|---+---| HL1 | | L0|H0 | HL1 | | LL1 | HL1 | | | | -|LH0|HH0| | | | | | | | | | | | -|-------+-------|->|-------+-------|->|-------+-------|->| L1 | H1 |->... -| | | | | | | | | | | | -| LH1 | HH1 | | LH1 | HH1 | | LH1 | HH1 | | | | -| | | | | | | | | | | | - --------------- --------------- --------------- --------------- - - -1D Filter: ----------- -1. interleave the samples of the low and high frequency subbands like -s={L0, H0, L1, H1, L2, H2, L3, H3, ... } -note, this can end with a L or a H, the number of elements shall be w -s[-1] shall be considered equivalent to s[1 ] -s[w ] shall be considered equivalent to s[w-2] - -2. perform the lifting steps in order as described below - -5/3 Integer filter: -1. s[i] -= (s[i-1] + s[i+1] + 2)>>2; for all even i < w -2. s[i] += (s[i-1] + s[i+1] )>>1; for all odd i < w - -\ | /|\ | /|\ | /|\ | /|\ - \|/ | \|/ | \|/ | \|/ | - + | + | + | + | -1/4 - /|\ | /|\ | /|\ | /|\ | -/ | \|/ | \|/ | \|/ | \|/ - | + | + | + | + +1/2 - - -Snow's 9/7 Integer filter: -1. s[i] -= (3*(s[i-1] + s[i+1]) + 4)>>3; for all even i < w -2. s[i] -= s[i-1] + s[i+1] ; for all odd i < w -3. s[i] += ( s[i-1] + s[i+1] + 4*s[i] + 8)>>4; for all even i < w -4. s[i] += (3*(s[i-1] + s[i+1]) )>>1; for all odd i < w - -\ | /|\ | /|\ | /|\ | /|\ - \|/ | \|/ | \|/ | \|/ | - + | + | + | + | -3/8 - /|\ | /|\ | /|\ | /|\ | -/ | \|/ | \|/ | \|/ | \|/ - (| + (| + (| + (| + -1 -\ + /|\ + /|\ + /|\ + /|\ +1/4 - \|/ | \|/ | \|/ | \|/ | - + | + | + | + | +1/16 - /|\ | /|\ | /|\ | /|\ | -/ | \|/ | \|/ | \|/ | \|/ - | + | + | + | + +3/2 - -optimization tips: -following are exactly identical -(3a)>>1 == a + (a>>1) -(a + 4b + 8)>>4 == ((a>>2) + b + 2)>>2 - -16bit implementation note: -The IDWT can be implemented with 16bits, but this requires some care to -prevent overflows, the following list, lists the minimum number of bits needed -for some terms -1. lifting step -A= s[i-1] + s[i+1] 16bit -3*A + 4 18bit -A + (A>>1) + 2 17bit - -3. lifting step -s[i-1] + s[i+1] 17bit - -4. lifiting step -3*(s[i-1] + s[i+1]) 17bit - - -TODO: -===== -Important: -finetune initial contexts -flip wavelet? -try to use the wavelet transformed predicted image (motion compensated image) as context for coding the residual coefficients -try the MV length as context for coding the residual coefficients -use extradata for stuff which is in the keyframes now? -the MV median predictor is patented IIRC -implement per picture halfpel interpolation -try different range coder state transition tables for different contexts - -Not Important: -compare the 6 tap and 8 tap hpel filters (psnr/bitrate and subjective quality) -spatial_scalability b vs u (!= 0 breaks syntax anyway so we can add a u later) - - -Credits: -======== -Michael Niedermayer -Loren Merritt - - -Copyright: -========== -GPL + GFDL + whatever is needed to make this a RFC diff --git a/contrib/ffmpeg/doc/soc.txt b/contrib/ffmpeg/doc/soc.txt deleted file mode 100644 index 8b4a86db8..000000000 --- a/contrib/ffmpeg/doc/soc.txt +++ /dev/null @@ -1,24 +0,0 @@ -Google Summer of Code and similar project guidelines - -Summer of Code is a project by Google in which students are paid to implement -some nice new features for various participating open source projects ... - -This text is a collection of things to take care of for the next soc as -it's a little late for this year's soc (2006). - -The Goal: -Our goal in respect to soc is and must be of course exactly one thing and -that is to improve FFmpeg, to reach this goal, code must -* conform to the svn policy and patch submission guidelines -* must improve FFmpeg somehow (faster, smaller, "better", - more codecs supported, fewer bugs, cleaner, ...) - -for mentors and other developers to help students to reach that goal it is -essential that changes to their codebase are publicly visible, clean and -easy reviewable that again leads us to: -* use of a revision control system like svn -* separation of cosmetic from non-cosmetic changes (this is almost entirely - ignored by mentors and students in soc 2006 which might lead to a suprise - when the code will be reviewed at the end before a possible inclusion in - FFmpeg, individual changes were generally not reviewable due to cosmetics). -* frequent commits, so that comments can be provided early diff --git a/contrib/ffmpeg/doc/texi2pod.pl b/contrib/ffmpeg/doc/texi2pod.pl deleted file mode 100755 index c414ffcc6..000000000 --- a/contrib/ffmpeg/doc/texi2pod.pl +++ /dev/null @@ -1,427 +0,0 @@ -#! /usr/bin/perl -w - -# Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc. - -# This file is part of GNU CC. - -# GNU CC is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. - -# GNU CC is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with GNU CC; see the file COPYING. If not, write to -# the Free Software Foundation, 51 Franklin Street, Fifth Floor, -# Boston, MA 02110-1301 USA - -# This does trivial (and I mean _trivial_) conversion of Texinfo -# markup to Perl POD format. It's intended to be used to extract -# something suitable for a manpage from a Texinfo document. - -$output = 0; -$skipping = 0; -%sects = (); -$section = ""; -@icstack = (); -@endwstack = (); -@skstack = (); -@instack = (); -$shift = ""; -%defs = (); -$fnno = 1; -$inf = ""; -$ibase = ""; - -while ($_ = shift) { - if (/^-D(.*)$/) { - if ($1 ne "") { - $flag = $1; - } else { - $flag = shift; - } - $value = ""; - ($flag, $value) = ($flag =~ /^([^=]+)(?:=(.+))?/); - die "no flag specified for -D\n" - unless $flag ne ""; - die "flags may only contain letters, digits, hyphens, dashes and underscores\n" - unless $flag =~ /^[a-zA-Z0-9_-]+$/; - $defs{$flag} = $value; - } elsif (/^-/) { - usage(); - } else { - $in = $_, next unless defined $in; - $out = $_, next unless defined $out; - usage(); - } -} - -if (defined $in) { - $inf = gensym(); - open($inf, "<$in") or die "opening \"$in\": $!\n"; - $ibase = $1 if $in =~ m|^(.+)/[^/]+$|; -} else { - $inf = \*STDIN; -} - -if (defined $out) { - open(STDOUT, ">$out") or die "opening \"$out\": $!\n"; -} - -while(defined $inf) { -while(<$inf>) { - # Certain commands are discarded without further processing. - /^\@(?: - [a-z]+index # @*index: useful only in complete manual - |need # @need: useful only in printed manual - |(?:end\s+)?group # @group .. @end group: ditto - |page # @page: ditto - |node # @node: useful only in .info file - |(?:end\s+)?ifnottex # @ifnottex .. @end ifnottex: use contents - )\b/x and next; - - chomp; - - # Look for filename and title markers. - /^\@setfilename\s+([^.]+)/ and $fn = $1, next; - /^\@settitle\s+([^.]+)/ and $tl = postprocess($1), next; - - # Identify a man title but keep only the one we are interested in. - /^\@c\s+man\s+title\s+([A-Za-z0-9-]+)\s+(.+)/ and do { - if (exists $defs{$1}) { - $fn = $1; - $tl = postprocess($2); - } - next; - }; - - # Look for blocks surrounded by @c man begin SECTION ... @c man end. - # This really oughta be @ifman ... @end ifman and the like, but such - # would require rev'ing all other Texinfo translators. - /^\@c\s+man\s+begin\s+([A-Z]+)\s+([A-Za-z0-9-]+)/ and do { - $output = 1 if exists $defs{$2}; - $sect = $1; - next; - }; - /^\@c\s+man\s+begin\s+([A-Z]+)/ and $sect = $1, $output = 1, next; - /^\@c\s+man\s+end/ and do { - $sects{$sect} = "" unless exists $sects{$sect}; - $sects{$sect} .= postprocess($section); - $section = ""; - $output = 0; - next; - }; - - # handle variables - /^\@set\s+([a-zA-Z0-9_-]+)\s*(.*)$/ and do { - $defs{$1} = $2; - next; - }; - /^\@clear\s+([a-zA-Z0-9_-]+)/ and do { - delete $defs{$1}; - next; - }; - - next unless $output; - - # Discard comments. (Can't do it above, because then we'd never see - # @c man lines.) - /^\@c\b/ and next; - - # End-block handler goes up here because it needs to operate even - # if we are skipping. - /^\@end\s+([a-z]+)/ and do { - # Ignore @end foo, where foo is not an operation which may - # cause us to skip, if we are presently skipping. - my $ended = $1; - next if $skipping && $ended !~ /^(?:ifset|ifclear|ignore|menu|iftex)$/; - - die "\@end $ended without \@$ended at line $.\n" unless defined $endw; - die "\@$endw ended by \@end $ended at line $.\n" unless $ended eq $endw; - - $endw = pop @endwstack; - - if ($ended =~ /^(?:ifset|ifclear|ignore|menu|iftex)$/) { - $skipping = pop @skstack; - next; - } elsif ($ended =~ /^(?:example|smallexample|display)$/) { - $shift = ""; - $_ = ""; # need a paragraph break - } elsif ($ended =~ /^(?:itemize|enumerate|[fv]?table)$/) { - $_ = "\n=back\n"; - $ic = pop @icstack; - } else { - die "unknown command \@end $ended at line $.\n"; - } - }; - - # We must handle commands which can cause skipping even while we - # are skipping, otherwise we will not process nested conditionals - # correctly. - /^\@ifset\s+([a-zA-Z0-9_-]+)/ and do { - push @endwstack, $endw; - push @skstack, $skipping; - $endw = "ifset"; - $skipping = 1 unless exists $defs{$1}; - next; - }; - - /^\@ifclear\s+([a-zA-Z0-9_-]+)/ and do { - push @endwstack, $endw; - push @skstack, $skipping; - $endw = "ifclear"; - $skipping = 1 if exists $defs{$1}; - next; - }; - - /^\@(ignore|menu|iftex)\b/ and do { - push @endwstack, $endw; - push @skstack, $skipping; - $endw = $1; - $skipping = 1; - next; - }; - - next if $skipping; - - # Character entities. First the ones that can be replaced by raw text - # or discarded outright: - s/\@copyright\{\}/(c)/g; - s/\@dots\{\}/.../g; - s/\@enddots\{\}/..../g; - s/\@([.!? ])/$1/g; - s/\@[:-]//g; - s/\@bullet(?:\{\})?/*/g; - s/\@TeX\{\}/TeX/g; - s/\@pounds\{\}/\#/g; - s/\@minus(?:\{\})?/-/g; - s/\\,/,/g; - - # Now the ones that have to be replaced by special escapes - # (which will be turned back into text by unmunge()) - s/&/&/g; - s/\@\{/{/g; - s/\@\}/}/g; - s/\@\@/&at;/g; - - # Inside a verbatim block, handle @var specially. - if ($shift ne "") { - s/\@var\{([^\}]*)\}/<$1>/g; - } - - # POD doesn't interpret E<> inside a verbatim block. - if ($shift eq "") { - s/</</g; - s/>/>/g; - } else { - s/</</g; - s/>/>/g; - } - - # Single line command handlers. - - /^\@include\s+(.+)$/ and do { - push @instack, $inf; - $inf = gensym(); - - # Try cwd and $ibase. - open($inf, "<" . $1) - or open($inf, "<" . $ibase . "/" . $1) - or die "cannot open $1 or $ibase/$1: $!\n"; - next; - }; - - /^\@(?:section|unnumbered|unnumberedsec|center)\s+(.+)$/ - and $_ = "\n=head2 $1\n"; - /^\@subsection\s+(.+)$/ - and $_ = "\n=head3 $1\n"; - - # Block command handlers: - /^\@itemize\s+(\@[a-z]+|\*|-)/ and do { - push @endwstack, $endw; - push @icstack, $ic; - $ic = $1; - $_ = "\n=over 4\n"; - $endw = "itemize"; - }; - - /^\@enumerate(?:\s+([a-zA-Z0-9]+))?/ and do { - push @endwstack, $endw; - push @icstack, $ic; - if (defined $1) { - $ic = $1 . "."; - } else { - $ic = "1."; - } - $_ = "\n=over 4\n"; - $endw = "enumerate"; - }; - - /^\@([fv]?table)\s+(\@[a-z]+)/ and do { - push @endwstack, $endw; - push @icstack, $ic; - $endw = $1; - $ic = $2; - $ic =~ s/\@(?:samp|strong|key|gcctabopt|option|env)/B/; - $ic =~ s/\@(?:code|kbd)/C/; - $ic =~ s/\@(?:dfn|var|emph|cite|i)/I/; - $ic =~ s/\@(?:file)/F/; - $_ = "\n=over 4\n"; - }; - - /^\@((?:small)?example|display)/ and do { - push @endwstack, $endw; - $endw = $1; - $shift = "\t"; - $_ = ""; # need a paragraph break - }; - - /^\@itemx?\s*(.+)?$/ and do { - if (defined $1) { - # Entity escapes prevent munging by the <> processing below. - $_ = "\n=item $ic\<$1\>\n"; - } else { - $_ = "\n=item $ic\n"; - $ic =~ y/A-Ya-y/B-Zb-z/; - $ic =~ s/(\d+)/$1 + 1/eg; - } - }; - - $section .= $shift.$_."\n"; -} -# End of current file. -close($inf); -$inf = pop @instack; -} - -die "No filename or title\n" unless defined $fn && defined $tl; - -$sects{NAME} = "$fn \- $tl\n"; -$sects{FOOTNOTES} .= "=back\n" if exists $sects{FOOTNOTES}; - -for $sect (qw(NAME SYNOPSIS DESCRIPTION OPTIONS EXAMPLES ENVIRONMENT FILES - BUGS NOTES FOOTNOTES SEEALSO AUTHOR COPYRIGHT)) { - if(exists $sects{$sect}) { - $head = $sect; - $head =~ s/SEEALSO/SEE ALSO/; - print "=head1 $head\n\n"; - print scalar unmunge ($sects{$sect}); - print "\n"; - } -} - -sub usage -{ - die "usage: $0 [-D toggle...] [infile [outfile]]\n"; -} - -sub postprocess -{ - local $_ = $_[0]; - - # @value{foo} is replaced by whatever 'foo' is defined as. - while (m/(\@value\{([a-zA-Z0-9_-]+)\})/g) { - if (! exists $defs{$2}) { - print STDERR "Option $2 not defined\n"; - s/\Q$1\E//; - } else { - $value = $defs{$2}; - s/\Q$1\E/$value/; - } - } - - # Formatting commands. - # Temporary escape for @r. - s/\@r\{([^\}]*)\}/R<$1>/g; - s/\@(?:dfn|var|emph|cite|i)\{([^\}]*)\}/I<$1>/g; - s/\@(?:code|kbd)\{([^\}]*)\}/C<$1>/g; - s/\@(?:gccoptlist|samp|strong|key|option|env|command|b)\{([^\}]*)\}/B<$1>/g; - s/\@sc\{([^\}]*)\}/\U$1/g; - s/\@file\{([^\}]*)\}/F<$1>/g; - s/\@w\{([^\}]*)\}/S<$1>/g; - s/\@(?:dmn|math)\{([^\}]*)\}/$1/g; - - # Cross references are thrown away, as are @noindent and @refill. - # (@noindent is impossible in .pod, and @refill is unnecessary.) - # @* is also impossible in .pod; we discard it and any newline that - # follows it. Similarly, our macro @gol must be discarded. - - s/\(?\@xref\{(?:[^\}]*)\}(?:[^.<]|(?:<[^<>]*>))*\.\)?//g; - s/\s+\(\@pxref\{(?:[^\}]*)\}\)//g; - s/;\s+\@pxref\{(?:[^\}]*)\}//g; - s/\@noindent\s*//g; - s/\@refill//g; - s/\@gol//g; - s/\@\*\s*\n?//g; - - # @uref can take one, two, or three arguments, with different - # semantics each time. @url and @email are just like @uref with - # one argument, for our purposes. - s/\@(?:uref|url|email)\{([^\},]*)\}/<B<$1>>/g; - s/\@uref\{([^\},]*),([^\},]*)\}/$2 (C<$1>)/g; - s/\@uref\{([^\},]*),([^\},]*),([^\},]*)\}/$3/g; - - # Turn B<blah I<blah> blah> into B<blah> I<blah> B<blah> to - # match Texinfo semantics of @emph inside @samp. Also handle @r - # inside bold. - s/</</g; - s/>/>/g; - 1 while s/B<((?:[^<>]|I<[^<>]*>)*)R<([^>]*)>/B<$1>${2}B</g; - 1 while (s/B<([^<>]*)I<([^>]+)>/B<$1>I<$2>B</g); - 1 while (s/I<([^<>]*)B<([^>]+)>/I<$1>B<$2>I</g); - s/[BI]<>//g; - s/([BI])<(\s+)([^>]+)>/$2$1<$3>/g; - s/([BI])<([^>]+?)(\s+)>/$1<$2>$3/g; - - # Extract footnotes. This has to be done after all other - # processing because otherwise the regexp will choke on formatting - # inside @footnote. - while (/\@footnote/g) { - s/\@footnote\{([^\}]+)\}/[$fnno]/; - add_footnote($1, $fnno); - $fnno++; - } - - return $_; -} - -sub unmunge -{ - # Replace escaped symbols with their equivalents. - local $_ = $_[0]; - - s/</E<lt>/g; - s/>/E<gt>/g; - s/{/\{/g; - s/}/\}/g; - s/&at;/\@/g; - s/&/&/g; - return $_; -} - -sub add_footnote -{ - unless (exists $sects{FOOTNOTES}) { - $sects{FOOTNOTES} = "\n=over 4\n\n"; - } - - $sects{FOOTNOTES} .= "=item $fnno.\n\n"; $fnno++; - $sects{FOOTNOTES} .= $_[0]; - $sects{FOOTNOTES} .= "\n\n"; -} - -# stolen from Symbol.pm -{ - my $genseq = 0; - sub gensym - { - my $name = "GEN" . $genseq++; - my $ref = \*{$name}; - delete $::{$name}; - return $ref; - } -} |